Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/dotnet/runtime.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Vorlicek <jan.vorlicek@volny.cz>2021-06-10 16:26:40 +0300
committerGitHub <noreply@github.com>2021-06-10 16:26:40 +0300
commitd617e830ec621db80f6df9ed8ba75176f5ba6035 (patch)
treef2389aecbcca81cd24d0604263ea3bbc7ffab50c /src/coreclr/vm
parent7b3564a3de6c5d50515b0be31aa14e52c257a44f (diff)
Add writeable holders for executable memory (#53934)
* Add writeable holders for executable memory This change adds holders for writeable mappings for executable memory. It is the largest part of the W^X support. The ExecutableWriterHolder implementation is dummy in this change, but it was fully tested with coreclr / libraries tests on Windows arm, arm64, x64 and x86 with the real double memory mapping. There are few concepts / conventions used: * When the writeable pointer isn't known at a place where it is needed and also not at the caller, the ExecutableWriterHolder instance is created. * When a callee needs writeable pointer to executable memory and caller knows RW and RX, the argument is doubled with RX and RW suffixes. For constructors and member methods when "this" is the RW one, we pass just extra RX argument. * Locals holding RW pointer use RW suffix. * Locals holding RX pointer usually have no suffix to minimize number of changes, but in some cases they have a RX suffix where I felt like it was better to make things clear.
Diffstat (limited to 'src/coreclr/vm')
-rw-r--r--src/coreclr/vm/amd64/cgenamd64.cpp100
-rw-r--r--src/coreclr/vm/amd64/cgencpu.h18
-rw-r--r--src/coreclr/vm/amd64/virtualcallstubcpu.hpp27
-rw-r--r--src/coreclr/vm/arm/cgencpu.h37
-rw-r--r--src/coreclr/vm/arm/stubs.cpp97
-rw-r--r--src/coreclr/vm/arm/virtualcallstubcpu.hpp9
-rw-r--r--src/coreclr/vm/arm64/cgencpu.h37
-rw-r--r--src/coreclr/vm/arm64/stubs.cpp101
-rw-r--r--src/coreclr/vm/arm64/virtualcallstubcpu.hpp9
-rw-r--r--src/coreclr/vm/array.cpp7
-rw-r--r--src/coreclr/vm/callcounting.cpp8
-rw-r--r--src/coreclr/vm/ceeload.cpp11
-rw-r--r--src/coreclr/vm/ceemain.cpp10
-rw-r--r--src/coreclr/vm/clrtocomcall.cpp13
-rw-r--r--src/coreclr/vm/codeman.cpp77
-rw-r--r--src/coreclr/vm/comcallablewrapper.cpp150
-rw-r--r--src/coreclr/vm/comcallablewrapper.h11
-rw-r--r--src/coreclr/vm/comdelegate.cpp22
-rw-r--r--src/coreclr/vm/comtoclrcall.cpp24
-rw-r--r--src/coreclr/vm/crossgencompile.cpp2
-rw-r--r--src/coreclr/vm/dataimage.cpp4
-rw-r--r--src/coreclr/vm/dllimportcallback.cpp26
-rw-r--r--src/coreclr/vm/dllimportcallback.h12
-rw-r--r--src/coreclr/vm/dynamicmethod.cpp67
-rw-r--r--src/coreclr/vm/dynamicmethod.h2
-rw-r--r--src/coreclr/vm/gccover.cpp70
-rw-r--r--src/coreclr/vm/i386/cgencpu.h31
-rw-r--r--src/coreclr/vm/i386/cgenx86.cpp57
-rw-r--r--src/coreclr/vm/i386/stublinkerx86.cpp167
-rw-r--r--src/coreclr/vm/i386/stublinkerx86.h16
-rw-r--r--src/coreclr/vm/i386/virtualcallstubcpu.hpp24
-rw-r--r--src/coreclr/vm/jitinterface.cpp5
-rw-r--r--src/coreclr/vm/jitinterface.h4
-rw-r--r--src/coreclr/vm/method.cpp21
-rw-r--r--src/coreclr/vm/methoddescbackpatchinfo.cpp10
-rw-r--r--src/coreclr/vm/precode.cpp44
-rw-r--r--src/coreclr/vm/precode.h2
-rw-r--r--src/coreclr/vm/prestub.cpp14
-rw-r--r--src/coreclr/vm/readytoruninfo.h2
-rw-r--r--src/coreclr/vm/stubcache.cpp9
-rw-r--r--src/coreclr/vm/stublink.cpp87
-rw-r--r--src/coreclr/vm/stublink.h5
-rw-r--r--src/coreclr/vm/threadsuspend.cpp12
-rw-r--r--src/coreclr/vm/virtualcallstub.cpp20
44 files changed, 875 insertions, 606 deletions
diff --git a/src/coreclr/vm/amd64/cgenamd64.cpp b/src/coreclr/vm/amd64/cgenamd64.cpp
index 153993cb37c..d00f7b74df0 100644
--- a/src/coreclr/vm/amd64/cgenamd64.cpp
+++ b/src/coreclr/vm/amd64/cgenamd64.cpp
@@ -450,7 +450,7 @@ void EncodeLoadAndJumpThunk (LPBYTE pBuffer, LPVOID pv, LPVOID pTarget)
_ASSERTE(DbgIsExecutable(pBuffer, 22));
}
-void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target)
{
CONTRACT_VOID
{
@@ -460,7 +460,8 @@ void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
}
CONTRACT_END;
- BYTE *pBuffer = (BYTE*)pCOMMethod - COMMETHOD_CALL_PRESTUB_SIZE;
+ BYTE *pBufferRX = (BYTE*)pCOMMethodRX - COMMETHOD_CALL_PRESTUB_SIZE;
+ BYTE *pBufferRW = (BYTE*)pCOMMethodRW - COMMETHOD_CALL_PRESTUB_SIZE;
// We need the target to be in a 64-bit aligned memory location and the call instruction
// to immediately precede the ComCallMethodDesc. We'll generate an indirect call to avoid
@@ -471,21 +472,21 @@ void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
// nop 90
// call [$ - 10] ff 15 f0 ff ff ff
- *((UINT64 *)&pBuffer[COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET]) = (UINT64)target;
+ *((UINT64 *)&pBufferRW[COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET]) = (UINT64)target;
- pBuffer[-2] = 0x90;
- pBuffer[-1] = 0x90;
+ pBufferRW[-2] = 0x90;
+ pBufferRW[-1] = 0x90;
- pBuffer[0] = 0xFF;
- pBuffer[1] = 0x15;
- *((UINT32 UNALIGNED *)&pBuffer[2]) = (UINT32)(COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET - COMMETHOD_CALL_PRESTUB_SIZE);
+ pBufferRW[0] = 0xFF;
+ pBufferRW[1] = 0x15;
+ *((UINT32 UNALIGNED *)&pBufferRW[2]) = (UINT32)(COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET - COMMETHOD_CALL_PRESTUB_SIZE);
- _ASSERTE(DbgIsExecutable(pBuffer, COMMETHOD_CALL_PRESTUB_SIZE));
+ _ASSERTE(DbgIsExecutable(pBufferRX, COMMETHOD_CALL_PRESTUB_SIZE));
RETURN;
}
-void emitJump(LPBYTE pBuffer, LPVOID target)
+void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
CONTRACTL
{
@@ -493,25 +494,25 @@ void emitJump(LPBYTE pBuffer, LPVOID target)
GC_NOTRIGGER;
MODE_ANY;
- PRECONDITION(CheckPointer(pBuffer));
+ PRECONDITION(CheckPointer(pBufferRX));
}
CONTRACTL_END;
// mov rax, 123456789abcdef0h 48 b8 xx xx xx xx xx xx xx xx
// jmp rax ff e0
- pBuffer[0] = 0x48;
- pBuffer[1] = 0xB8;
+ pBufferRW[0] = 0x48;
+ pBufferRW[1] = 0xB8;
- *((UINT64 UNALIGNED *)&pBuffer[2]) = (UINT64)target;
+ *((UINT64 UNALIGNED *)&pBufferRW[2]) = (UINT64)target;
- pBuffer[10] = 0xFF;
- pBuffer[11] = 0xE0;
+ pBufferRW[10] = 0xFF;
+ pBufferRW[11] = 0xE0;
- _ASSERTE(DbgIsExecutable(pBuffer, 12));
+ _ASSERTE(DbgIsExecutable(pBufferRX, 12));
}
-void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
+void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam)
{
CONTRACTL
{
@@ -542,7 +543,7 @@ void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
m_jmpRAX[1] = 0xFF;
m_jmpRAX[2] = 0xE0;
- _ASSERTE(DbgIsExecutable(&m_movR10[0], &m_jmpRAX[3]-&m_movR10[0]));
+ _ASSERTE(DbgIsExecutable(&pEntryThunkCodeRX->m_movR10[0], &pEntryThunkCodeRX->m_jmpRAX[3]-&pEntryThunkCodeRX->m_movR10[0]));
}
void UMEntryThunkCode::Poison()
@@ -555,15 +556,18 @@ void UMEntryThunkCode::Poison()
}
CONTRACTL_END;
- m_execstub = (BYTE *)UMEntryThunk::ReportViolation;
+ ExecutableWriterHolder<UMEntryThunkCode> thunkWriterHolder(this, sizeof(UMEntryThunkCode));
+ UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW();
+
+ pThisRW->m_execstub = (BYTE *)UMEntryThunk::ReportViolation;
- m_movR10[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pThisRW->m_movR10[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
#ifdef _WIN32
// mov rcx, pUMEntryThunk // 48 b9 xx xx xx xx xx xx xx xx
- m_movR10[1] = 0xB9;
+ pThisRW->m_movR10[1] = 0xB9;
#else
// mov rdi, pUMEntryThunk // 48 bf xx xx xx xx xx xx xx xx
- m_movR10[1] = 0xBF;
+ pThisRW->m_movR10[1] = 0xBF;
#endif
ClrFlushInstructionCache(&m_movR10[0], &m_jmpRAX[3]-&m_movR10[0]);
@@ -647,7 +651,7 @@ INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMe
return static_cast<INT32>(offset);
}
-INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddr, bool emitJump)
+INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddrRX, PCODE jumpStubAddrRW, bool emitJump)
{
CONTRACTL
{
@@ -657,12 +661,12 @@ INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCO
CONTRACTL_END;
TADDR baseAddr = (TADDR)pRel32 + 4;
- _ASSERTE(FitsInI4(jumpStubAddr - baseAddr));
+ _ASSERTE(FitsInI4(jumpStubAddrRX - baseAddr));
INT_PTR offset = target - baseAddr;
if (!FitsInI4(offset) INDEBUG(|| PEDecoder::GetForceRelocs()))
{
- offset = jumpStubAddr - baseAddr;
+ offset = jumpStubAddrRX - baseAddr;
if (!FitsInI4(offset))
{
_ASSERTE(!"jump stub was not in expected range");
@@ -671,11 +675,11 @@ INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCO
if (emitJump)
{
- emitBackToBackJump((LPBYTE)jumpStubAddr, (LPVOID)target);
+ emitBackToBackJump((LPBYTE)jumpStubAddrRX, (LPBYTE)jumpStubAddrRW, (LPVOID)target);
}
else
{
- _ASSERTE(decodeBackToBackJump(jumpStubAddr) == target);
+ _ASSERTE(decodeBackToBackJump(jumpStubAddrRX) == target);
}
}
@@ -862,7 +866,9 @@ EXTERN_C PCODE VirtualMethodFixupWorker(TransitionBlock * pTransitionBlock, CORC
*(INT32 *)(pNewValue+1) = rel32UsingJumpStub((INT32*)(&pThunk->callJmp[1]), pCode, pMD, NULL);
_ASSERTE(IS_ALIGNED(pThunk, sizeof(INT64)));
- FastInterlockCompareExchangeLong((INT64*)pThunk, newValue, oldValue);
+
+ ExecutableWriterHolder<INT64> thunkWriterHolder((INT64*)pThunk, sizeof(INT64));
+ FastInterlockCompareExchangeLong(thunkWriterHolder.GetRW(), newValue, oldValue);
FlushInstructionCache(GetCurrentProcess(), pThunk, 8);
}
@@ -888,14 +894,17 @@ EXTERN_C PCODE VirtualMethodFixupWorker(TransitionBlock * pTransitionBlock, CORC
#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
SIZE_T cb = size; \
SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
- BYTE * pStart = (BYTE *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * pStartRX = (BYTE *)(void*)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ ExecutableWriterHolder<BYTE> startWriterHolder(pStartRX, cbAligned); \
+ BYTE * pStart = startWriterHolder.GetRW(); \
+ size_t rxOffset = pStartRX - pStart; \
BYTE * p = pStart;
#define END_DYNAMIC_HELPER_EMIT() \
_ASSERTE(pStart + cb == p); \
while (p < pStart + cbAligned) *p++ = X86_INSTR_INT3; \
- ClrFlushInstructionCache(pStart, cbAligned); \
- return (PCODE)pStart
+ ClrFlushInstructionCache(pStartRX, cbAligned); \
+ return (PCODE)pStartRX
PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
@@ -913,13 +922,13 @@ PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCOD
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
END_DYNAMIC_HELPER_EMIT();
}
-void DynamicHelpers::EmitHelperWithArg(BYTE*& p, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
CONTRACTL
{
@@ -940,7 +949,7 @@ void DynamicHelpers::EmitHelperWithArg(BYTE*& p, LoaderAllocator * pAllocator, T
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
}
@@ -948,7 +957,7 @@ PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR ar
{
BEGIN_DYNAMIC_HELPER_EMIT(15);
- EmitHelperWithArg(p, pAllocator, arg, target);
+ EmitHelperWithArg(p, rxOffset, pAllocator, arg, target);
END_DYNAMIC_HELPER_EMIT();
}
@@ -976,7 +985,7 @@ PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADD
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
END_DYNAMIC_HELPER_EMIT();
@@ -1005,7 +1014,7 @@ PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR ar
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
END_DYNAMIC_HELPER_EMIT();
@@ -1071,7 +1080,7 @@ PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADD
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
END_DYNAMIC_HELPER_EMIT();
@@ -1100,7 +1109,7 @@ PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADD
p += 8;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target, NULL, pAllocator);
p += 4;
END_DYNAMIC_HELPER_EMIT();
@@ -1117,9 +1126,10 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule));
GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT);
- pArgs->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
- pArgs->signature = pLookup->signature;
- pArgs->module = (CORINFO_MODULE_HANDLE)pModule;
+ ExecutableWriterHolder<GenericHandleArgs> argsWriterHolder(pArgs, sizeof(GenericHandleArgs));
+ argsWriterHolder.GetRW()->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
+ argsWriterHolder.GetRW()->signature = pLookup->signature;
+ argsWriterHolder.GetRW()->module = (CORINFO_MODULE_HANDLE)pModule;
WORD slotOffset = (WORD)(dictionaryIndexAndSlot & 0xFFFF) * sizeof(Dictionary*);
@@ -1131,7 +1141,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
// rcx/rdi contains the generic context parameter
// mov rdx/rsi,pArgs
// jmp helperAddress
- EmitHelperWithArg(p, pAllocator, (TADDR)pArgs, helperAddress);
+ EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
END_DYNAMIC_HELPER_EMIT();
}
@@ -1238,7 +1248,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
// mov rdx|rsi,pArgs
// jmp helperAddress
- EmitHelperWithArg(p, pAllocator, (TADDR)pArgs, helperAddress);
+ EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
}
}
diff --git a/src/coreclr/vm/amd64/cgencpu.h b/src/coreclr/vm/amd64/cgencpu.h
index 7312ad0a019..6300876fa33 100644
--- a/src/coreclr/vm/amd64/cgencpu.h
+++ b/src/coreclr/vm/amd64/cgencpu.h
@@ -370,11 +370,11 @@ INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMe
LoaderAllocator *pLoaderAllocator = NULL, bool throwOnOutOfMemoryWithinRange = true);
// Get Rel32 destination, emit jumpStub if necessary into a preallocated location
-INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddr, bool emitJump);
+INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddr, PCODE jumpStubAddrRW, bool emitJump);
-void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target);
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target);
-void emitJump(LPBYTE pBuffer, LPVOID target);
+void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target);
BOOL isJumpRel32(PCODE pCode);
PCODE decodeJump32(PCODE pCode);
@@ -388,11 +388,11 @@ PCODE decodeJump64(PCODE pCode);
// For all other platforms back to back jumps don't require anything special
// That is why we have these two wrapper functions that call emitJump and decodeJump
//
-inline void emitBackToBackJump(LPBYTE pBuffer, LPVOID target)
+inline void emitBackToBackJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
WRAPPER_NO_CONTRACT;
- emitJump(pBuffer, target);
+ emitJump(pBufferRX, pBufferRW, target);
}
inline BOOL isBackToBackJump(PCODE pCode)
@@ -438,7 +438,7 @@ struct DECLSPEC_ALIGN(8) UMEntryThunkCode
BYTE m_jmpRAX[3]; // JMP RAX
BYTE m_padding2[5];
- void Encode(BYTE* pTargetCode, void* pvSecretParam);
+ void Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam);
void Poison();
LPCBYTE GetEntryPoint() const
@@ -610,19 +610,19 @@ private:
#ifndef DACCESS_COMPILE
public:
- CallCountingStubShort(CallCount *remainingCallCountCell, PCODE targetForMethod)
+ CallCountingStubShort(CallCountingStubShort* stubRX, CallCount *remainingCallCountCell, PCODE targetForMethod)
: m_part0{ 0x48, 0xb8}, // mov rax,
m_remainingCallCountCell(remainingCallCountCell), // <imm64>
m_part1{ 0x66, 0xff, 0x08, // dec word ptr [rax]
0x0f, 0x85}, // jnz
m_rel32TargetForMethod( // <rel32>
GetRelative32BitOffset(
- &m_rel32TargetForMethod,
+ &stubRX->m_rel32TargetForMethod,
targetForMethod)),
m_part2{ 0xe8}, // call
m_rel32TargetForThresholdReached( // <rel32>
GetRelative32BitOffset(
- &m_rel32TargetForThresholdReached,
+ &stubRX->m_rel32TargetForThresholdReached,
TargetForThresholdReached)),
// (rip == stub-identifying token)
m_alignmentPadding{}
diff --git a/src/coreclr/vm/amd64/virtualcallstubcpu.hpp b/src/coreclr/vm/amd64/virtualcallstubcpu.hpp
index 860a681e213..70b2de58134 100644
--- a/src/coreclr/vm/amd64/virtualcallstubcpu.hpp
+++ b/src/coreclr/vm/amd64/virtualcallstubcpu.hpp
@@ -97,7 +97,7 @@ struct LookupHolder
{
static void InitializeStatic();
- void Initialize(PCODE resolveWorkerTarget, size_t dispatchToken);
+ void Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken);
LookupStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
@@ -317,7 +317,7 @@ struct DispatchHolder
{
static void InitializeStatic();
- void Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT,
+ void Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT,
DispatchStub::DispatchStubType type);
static size_t GetHolderSize(DispatchStub::DispatchStubType type)
@@ -453,7 +453,8 @@ struct ResolveHolder
{
static void InitializeStatic();
- void Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ void Initialize(ResolveHolder* pResolveHolderRX,
+ PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32* counterAddr);
@@ -573,7 +574,7 @@ void LookupHolder::InitializeStatic()
lookupInit.part3 [1] = 0xE0;
}
-void LookupHolder::Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
+void LookupHolder::Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken)
{
_stub = lookupInit;
@@ -632,7 +633,7 @@ void DispatchHolder::InitializeStatic()
dispatchLongInit.part5 [1] = 0xE0;
};
-void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT,
+void DispatchHolder::Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT,
DispatchStub::DispatchStubType type)
{
//
@@ -650,17 +651,18 @@ void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expe
//
if (type == DispatchStub::e_TYPE_SHORT)
{
- DispatchStubShort *shortStub = const_cast<DispatchStubShort *>(stub()->getShortStub());
+ DispatchStubShort *shortStubRW = const_cast<DispatchStubShort *>(stub()->getShortStub());
+ DispatchStubShort *shortStubRX = const_cast<DispatchStubShort *>(pDispatchHolderRX->stub()->getShortStub());
// initialize the static data
- *shortStub = dispatchShortInit;
+ *shortStubRW = dispatchShortInit;
// fill in the dynamic data
- size_t displ = (failTarget - ((PCODE) &shortStub->_failDispl + sizeof(DISPL)));
+ size_t displ = (failTarget - ((PCODE) &shortStubRX->_failDispl + sizeof(DISPL)));
CONSISTENCY_CHECK(FitsInI4(displ));
- shortStub->_failDispl = (DISPL) displ;
- shortStub->_implTarget = (size_t) implTarget;
- CONSISTENCY_CHECK((PCODE)&shortStub->_failDispl + sizeof(DISPL) + shortStub->_failDispl == failTarget);
+ shortStubRW->_failDispl = (DISPL) displ;
+ shortStubRW->_implTarget = (size_t) implTarget;
+ CONSISTENCY_CHECK((PCODE)&shortStubRX->_failDispl + sizeof(DISPL) + shortStubRX->_failDispl == failTarget);
}
else
{
@@ -769,7 +771,8 @@ void ResolveHolder::InitializeStatic()
resolveInit.part10 [1] = 0xE0;
};
-void ResolveHolder::Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+void ResolveHolder::Initialize(ResolveHolder* pResolveHolderRX,
+ PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32* counterAddr)
{
diff --git a/src/coreclr/vm/arm/cgencpu.h b/src/coreclr/vm/arm/cgencpu.h
index 4f6e1deb4fe..88d0c6802b6 100644
--- a/src/coreclr/vm/arm/cgencpu.h
+++ b/src/coreclr/vm/arm/cgencpu.h
@@ -232,7 +232,7 @@ inline void ClearITState(T_CONTEXT *context) {
}
#ifdef FEATURE_COMINTEROP
-void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target);
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target);
#endif // FEATURE_COMINTEROP
//------------------------------------------------------------------------
@@ -283,14 +283,14 @@ inline int16_t decodeUnconditionalBranchThumb(LPBYTE pBuffer)
}
//------------------------------------------------------------------------
-inline void emitJump(LPBYTE pBuffer, LPVOID target)
+inline void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
LIMITED_METHOD_CONTRACT;
// The PC-relative load we emit below requires 4-byte alignment for the offset to be calculated correctly.
- _ASSERTE(((UINT_PTR)pBuffer & 3) == 0);
+ _ASSERTE(((UINT_PTR)pBufferRX & 3) == 0);
- DWORD * pCode = (DWORD *)pBuffer;
+ DWORD * pCode = (DWORD *)pBufferRW;
// ldr pc, [pc, #0]
pCode[0] = 0xf000f8df;
@@ -335,10 +335,10 @@ inline BOOL isBackToBackJump(PCODE pBuffer)
}
//------------------------------------------------------------------------
-inline void emitBackToBackJump(LPBYTE pBuffer, LPVOID target)
+inline void emitBackToBackJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
WRAPPER_NO_CONTRACT;
- emitJump(pBuffer, target);
+ emitJump(pBufferRX, pBufferRW, target);
}
//------------------------------------------------------------------------
@@ -943,7 +943,7 @@ struct DECLSPEC_ALIGN(4) UMEntryThunkCode
TADDR m_pTargetCode;
TADDR m_pvSecretParam;
- void Encode(BYTE* pTargetCode, void* pvSecretParam);
+ void Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam);
void Poison();
LPCBYTE GetEntryPoint() const
@@ -1055,7 +1055,7 @@ struct StubPrecode {
TADDR m_pTarget;
TADDR m_pMethodDesc;
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+ void Init(StubPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
TADDR GetMethodDesc()
{
@@ -1078,7 +1078,8 @@ struct StubPrecode {
}
CONTRACTL_END;
- InterlockedExchange((LONG*)&m_pTarget, (LONG)GetPreStubEntryPoint());
+ ExecutableWriterHolder<StubPrecode> precodeWriterHolder(this, sizeof(StubPrecode));
+ InterlockedExchange((LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)GetPreStubEntryPoint());
}
BOOL SetTargetInterlocked(TADDR target, TADDR expected)
@@ -1090,8 +1091,9 @@ struct StubPrecode {
}
CONTRACTL_END;
+ ExecutableWriterHolder<StubPrecode> precodeWriterHolder(this, sizeof(StubPrecode));
return (TADDR)InterlockedCompareExchange(
- (LONG*)&m_pTarget, (LONG)target, (LONG)expected) == expected;
+ (LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)target, (LONG)expected) == expected;
}
#ifdef FEATURE_PREJIT
@@ -1114,7 +1116,7 @@ struct NDirectImportPrecode {
// takes advantage of this to detect NDirectImportPrecode.
TADDR m_pTarget;
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+ void Init(NDirectImportPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
TADDR GetMethodDesc()
{
@@ -1155,7 +1157,7 @@ struct FixupPrecode {
BYTE m_PrecodeChunkIndex;
TADDR m_pTarget;
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
+ void Init(FixupPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
TADDR GetBase()
{
@@ -1182,7 +1184,8 @@ struct FixupPrecode {
}
CONTRACTL_END;
- InterlockedExchange((LONG*)&m_pTarget, (LONG)GetEEFuncEntryPoint(PrecodeFixupThunk));
+ ExecutableWriterHolder<FixupPrecode> precodeWriterHolder(this, sizeof(FixupPrecode));
+ InterlockedExchange((LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)GetEEFuncEntryPoint(PrecodeFixupThunk));
}
BOOL SetTargetInterlocked(TADDR target, TADDR expected)
@@ -1194,8 +1197,9 @@ struct FixupPrecode {
}
CONTRACTL_END;
+ ExecutableWriterHolder<FixupPrecode> precodeWriterHolder(this, sizeof(FixupPrecode));
return (TADDR)InterlockedCompareExchange(
- (LONG*)&m_pTarget, (LONG)target, (LONG)expected) == expected;
+ (LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)target, (LONG)expected) == expected;
}
static BOOL IsFixupPrecodeByASM(PCODE addr)
@@ -1261,7 +1265,8 @@ struct ThisPtrRetBufPrecode {
}
CONTRACTL_END;
- return FastInterlockCompareExchange((LONG*)&m_pTarget, (LONG)target, (LONG)expected) == (LONG)expected;
+ ExecutableWriterHolder<ThisPtrRetBufPrecode> precodeWriterHolder(this, sizeof(ThisPtrRetBufPrecode));
+ return FastInterlockCompareExchange((LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)target, (LONG)expected) == (LONG)expected;
}
};
typedef DPTR(ThisPtrRetBufPrecode) PTR_ThisPtrRetBufPrecode;
@@ -1364,7 +1369,7 @@ private:
#ifndef DACCESS_COMPILE
public:
- CallCountingStubShort(CallCount *remainingCallCountCell, PCODE targetForMethod)
+ CallCountingStubShort(CallCountingStubShort* stubRX, CallCount *remainingCallCountCell, PCODE targetForMethod)
: m_part0{ 0xb401, // push {r0}
0xf8df, 0xc01c, // ldr r12, [pc, #(m_remainingCallCountCell)]
0xf8bc, 0x0000, // ldrh r0, [r12]
diff --git a/src/coreclr/vm/arm/stubs.cpp b/src/coreclr/vm/arm/stubs.cpp
index 1ca6fd09642..aac3e25b181 100644
--- a/src/coreclr/vm/arm/stubs.cpp
+++ b/src/coreclr/vm/arm/stubs.cpp
@@ -98,7 +98,7 @@ class ThumbCondJump : public InstructionFormat
//Encoding 1|0|1|1|op|0|i|1|imm5|Rn
//op = Bit3(variation)
//Rn = Bits2-0(variation)
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
@@ -109,8 +109,8 @@ class ThumbCondJump : public InstructionFormat
_ASSERTE((fixedUpReference & 0x1) == 0);
- pOutBuffer[0] = static_cast<BYTE>(((0x3e & fixedUpReference) << 2) | (0x7 & variationCode));
- pOutBuffer[1] = static_cast<BYTE>(0xb1 | (0x8 & variationCode)| ((0x40 & fixedUpReference)>>5));
+ pOutBufferRW[0] = static_cast<BYTE>(((0x3e & fixedUpReference) << 2) | (0x7 & variationCode));
+ pOutBufferRW[1] = static_cast<BYTE>(0xb1 | (0x8 & variationCode)| ((0x40 & fixedUpReference)>>5));
}
};
@@ -138,7 +138,7 @@ class ThumbNearJump : public InstructionFormat
return 0;
}
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT cond, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT cond, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
@@ -155,8 +155,8 @@ class ThumbNearJump : public InstructionFormat
_ASSERTE(!"Expected refSize to be 2");
//Emit T2 encoding of B<c> <label> instruction
- pOutBuffer[0] = static_cast<BYTE>((fixedUpReference & 0x1fe)>>1);
- pOutBuffer[1] = static_cast<BYTE>(0xe0 | ((fixedUpReference & 0xe00)>>9));
+ pOutBufferRW[0] = static_cast<BYTE>((fixedUpReference & 0x1fe)>>1);
+ pOutBufferRW[1] = static_cast<BYTE>(0xe0 | ((fixedUpReference & 0xe00)>>9));
}
else if(fixedUpReference >= -16777216 && fixedUpReference <= 16777214)
{
@@ -167,10 +167,10 @@ class ThumbNearJump : public InstructionFormat
int s = (fixedUpReference & 0x1000000) >> 24;
int i1 = (fixedUpReference & 0x800000) >> 23;
int i2 = (fixedUpReference & 0x400000) >> 22;
- pOutBuffer[0] = static_cast<BYTE>((fixedUpReference & 0xff000) >> 12);
- pOutBuffer[1] = static_cast<BYTE>(0xf0 | (s << 2) |( (fixedUpReference & 0x300000) >>20));
- pOutBuffer[2] = static_cast<BYTE>((fixedUpReference & 0x1fe) >> 1);
- pOutBuffer[3] = static_cast<BYTE>(0x90 | (~(i1^s)) << 5 | (~(i2^s)) << 3 | (fixedUpReference & 0xe00) >> 9);
+ pOutBufferRW[0] = static_cast<BYTE>((fixedUpReference & 0xff000) >> 12);
+ pOutBufferRW[1] = static_cast<BYTE>(0xf0 | (s << 2) |( (fixedUpReference & 0x300000) >>20));
+ pOutBufferRW[2] = static_cast<BYTE>((fixedUpReference & 0x1fe) >> 1);
+ pOutBufferRW[3] = static_cast<BYTE>(0x90 | (~(i1^s)) << 5 | (~(i2^s)) << 3 | (fixedUpReference & 0xe00) >> 9);
}
else
{
@@ -185,8 +185,8 @@ class ThumbNearJump : public InstructionFormat
_ASSERTE(!"Expected refSize to be 2");
//Emit T1 encoding of B<c> <label> instruction
- pOutBuffer[0] = static_cast<BYTE>((fixedUpReference & 0x1fe)>>1);
- pOutBuffer[1] = static_cast<BYTE>(0xd0 | (cond & 0xf));
+ pOutBufferRW[0] = static_cast<BYTE>((fixedUpReference & 0x1fe)>>1);
+ pOutBufferRW[1] = static_cast<BYTE>(0xd0 | (cond & 0xf));
}
else if(fixedUpReference >= -1048576 && fixedUpReference <= 1048574)
{
@@ -194,10 +194,10 @@ class ThumbNearJump : public InstructionFormat
_ASSERTE(!"Expected refSize to be 4");
//Emit T3 encoding of B<c> <label> instruction
- pOutBuffer[0] = static_cast<BYTE>(((cond & 0x3) << 6) | ((fixedUpReference & 0x3f000) >>12));
- pOutBuffer[1] = static_cast<BYTE>(0xf0 | ((fixedUpReference & 0x100000) >>18) | ((cond & 0xc) >> 2));
- pOutBuffer[2] = static_cast<BYTE>((fixedUpReference & 0x1fe) >> 1);
- pOutBuffer[3] = static_cast<BYTE>(0x80 | ((fixedUpReference & 0x40000) >> 13) | ((fixedUpReference & 0x80000) >> 16) | ((fixedUpReference & 0xe00) >> 9));
+ pOutBufferRW[0] = static_cast<BYTE>(((cond & 0x3) << 6) | ((fixedUpReference & 0x3f000) >>12));
+ pOutBufferRW[1] = static_cast<BYTE>(0xf0 | ((fixedUpReference & 0x100000) >>18) | ((cond & 0xc) >> 2));
+ pOutBufferRW[2] = static_cast<BYTE>((fixedUpReference & 0x1fe) >> 1);
+ pOutBufferRW[3] = static_cast<BYTE>(0x80 | ((fixedUpReference & 0x40000) >> 13) | ((fixedUpReference & 0x80000) >> 16) | ((fixedUpReference & 0xe00) >> 9));
}
else
{
@@ -714,7 +714,7 @@ void FixupPrecode::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
#ifndef DACCESS_COMPILE
-void StubPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+void StubPrecode::Init(StubPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
{
WRAPPER_NO_CONTRACT;
@@ -748,7 +748,7 @@ void StubPrecode::Fixup(DataImage *image)
}
#endif // FEATURE_NATIVE_IMAGE_GENERATION
-void NDirectImportPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+void NDirectImportPrecode::Init(NDirectImportPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
{
WRAPPER_NO_CONTRACT;
@@ -782,7 +782,7 @@ void NDirectImportPrecode::Fixup(DataImage *image)
}
#endif
-void FixupPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex /*=0*/, int iPrecodeChunkIndex /*=0*/)
+void FixupPrecode::Init(FixupPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex /*=0*/, int iPrecodeChunkIndex /*=0*/)
{
WRAPPER_NO_CONTRACT;
@@ -915,7 +915,7 @@ Note that ResolveWorkerChainLookupAsmStub currently points directly
to ResolveWorkerAsmStub; in the future, this could be separate.
*/
-void LookupHolder::Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
+void LookupHolder::Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken)
{
// Called directly by JITTED code
// See ResolveWorkerAsmStub
@@ -932,7 +932,7 @@ void LookupHolder::Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
_ASSERTE(4 == LookupStub::entryPointLen);
}
-void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT)
+void DispatchHolder::Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT)
{
// Called directly by JITTED code
// DispatchHolder._stub._entryPoint(r0:object, r1, r2, r3, r4:IndirectionCell)
@@ -1004,7 +1004,8 @@ void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expe
_stub._implTarget = implTarget;
}
-void ResolveHolder::Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+void ResolveHolder::Initialize(ResolveHolder* pResolveHolderRX,
+ PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32 * counterAddr)
{
@@ -1980,7 +1981,7 @@ class UMEntryThunk * UMEntryThunk::Decode(void *pCallback)
return NULL;
}
-void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
+void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam)
{
// ldr r12, [pc + 8]
m_code[0] = 0xf8df;
@@ -1992,19 +1993,22 @@ void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
m_pTargetCode = (TADDR)pTargetCode;
m_pvSecretParam = (TADDR)pvSecretParam;
- FlushInstructionCache(GetCurrentProcess(),&m_code,sizeof(m_code));
+ FlushInstructionCache(GetCurrentProcess(),&pEntryThunkCodeRX->m_code,sizeof(m_code));
}
#ifndef DACCESS_COMPILE
void UMEntryThunkCode::Poison()
{
- m_pTargetCode = (TADDR)UMEntryThunk::ReportViolation;
+ ExecutableWriterHolder<UMEntryThunkCode> thunkWriterHolder(this, sizeof(UMEntryThunkCode));
+ UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW();
+
+ pThisRW->m_pTargetCode = (TADDR)UMEntryThunk::ReportViolation;
// ldr r0, [pc + 8]
- m_code[0] = 0x4802;
+ pThisRW->m_code[0] = 0x4802;
// nop
- m_code[1] = 0xbf00;
+ pThisRW->m_code[1] = 0xbf00;
ClrFlushInstructionCache(&m_code,sizeof(m_code));
}
@@ -2052,7 +2056,7 @@ VOID ResetCurrentContext()
#ifdef FEATURE_COMINTEROP
-void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target)
{
WRAPPER_NO_CONTRACT;
@@ -2065,16 +2069,17 @@ void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
0xf8df, 0xf004
};
- BYTE *pBuffer = (BYTE*)pCOMMethod - COMMETHOD_CALL_PRESTUB_SIZE;
+ BYTE *pBufferRX = (BYTE*)pCOMMethodRX - COMMETHOD_CALL_PRESTUB_SIZE;
+ BYTE *pBufferRW = (BYTE*)pCOMMethodRW - COMMETHOD_CALL_PRESTUB_SIZE;
- memcpy(pBuffer, rgCode, sizeof(rgCode));
- *((PCODE*)(pBuffer + sizeof(rgCode) + 2)) = target;
+ memcpy(pBufferRW, rgCode, sizeof(rgCode));
+ *((PCODE*)(pBufferRW + sizeof(rgCode) + 2)) = target;
// Ensure that the updated instructions get actually written
- ClrFlushInstructionCache(pBuffer, COMMETHOD_CALL_PRESTUB_SIZE);
+ ClrFlushInstructionCache(pBufferRX, COMMETHOD_CALL_PRESTUB_SIZE);
- _ASSERTE(IS_ALIGNED(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
- *((PCODE*)(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == target);
+ _ASSERTE(IS_ALIGNED(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
+ *((PCODE*)(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == target);
}
#endif // FEATURE_COMINTEROP
@@ -2103,14 +2108,17 @@ void MovRegImm(BYTE* p, int reg, TADDR imm)
#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
SIZE_T cb = size; \
SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
- BYTE * pStart = (BYTE *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * pStartRX = (BYTE *)(void*)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ ExecutableWriterHolder<BYTE> startWriterHolder(pStartRX, cbAligned); \
+ BYTE * pStart = startWriterHolder.GetRW(); \
+ size_t rxOffset = pStartRX - pStart; \
BYTE * p = pStart;
#define END_DYNAMIC_HELPER_EMIT() \
_ASSERTE(pStart + cb == p); \
while (p < pStart + cbAligned) { *(WORD *)p = 0xdefe; p += 2; } \
- ClrFlushInstructionCache(pStart, cbAligned); \
- return (PCODE)((TADDR)pStart | THUMB_CODE)
+ ClrFlushInstructionCache(pStartRX, cbAligned); \
+ return (PCODE)((TADDR)pStartRX | THUMB_CODE)
PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
@@ -2133,7 +2141,7 @@ PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCOD
END_DYNAMIC_HELPER_EMIT();
}
-void DynamicHelpers::EmitHelperWithArg(BYTE*& p, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
// mov r1, arg
MovRegImm(p, 1, arg);
@@ -2152,7 +2160,7 @@ PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR ar
{
BEGIN_DYNAMIC_HELPER_EMIT(18);
- EmitHelperWithArg(p, pAllocator, arg, target);
+ EmitHelperWithArg(p, rxOffset, pAllocator, arg, target);
END_DYNAMIC_HELPER_EMIT();
}
@@ -2308,9 +2316,10 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule));
GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT);
- pArgs->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
- pArgs->signature = pLookup->signature;
- pArgs->module = (CORINFO_MODULE_HANDLE)pModule;
+ ExecutableWriterHolder<GenericHandleArgs> argsWriterHolder(pArgs, sizeof(GenericHandleArgs));
+ argsWriterHolder.GetRW()->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
+ argsWriterHolder.GetRW()->signature = pLookup->signature;
+ argsWriterHolder.GetRW()->module = (CORINFO_MODULE_HANDLE)pModule;
WORD slotOffset = (WORD)(dictionaryIndexAndSlot & 0xFFFF) * sizeof(Dictionary*);
@@ -2320,7 +2329,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
{
BEGIN_DYNAMIC_HELPER_EMIT(18);
- EmitHelperWithArg(p, pAllocator, (TADDR)pArgs, helperAddress);
+ EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
END_DYNAMIC_HELPER_EMIT();
}
@@ -2426,7 +2435,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
*(WORD *)p = 0x4618;
p += 2;
- EmitHelperWithArg(p, pAllocator, (TADDR)pArgs, helperAddress);
+ EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
}
END_DYNAMIC_HELPER_EMIT();
diff --git a/src/coreclr/vm/arm/virtualcallstubcpu.hpp b/src/coreclr/vm/arm/virtualcallstubcpu.hpp
index 0abf9ad5722..919845c5ad2 100644
--- a/src/coreclr/vm/arm/virtualcallstubcpu.hpp
+++ b/src/coreclr/vm/arm/virtualcallstubcpu.hpp
@@ -74,7 +74,7 @@ struct LookupHolder
{
static void InitializeStatic() { LIMITED_METHOD_CONTRACT; }
- void Initialize(PCODE resolveWorkerTarget, size_t dispatchToken);
+ void Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken);
LookupStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
@@ -122,7 +122,7 @@ struct DispatchStub
LIMITED_METHOD_CONTRACT;
_ASSERTE(slotTypeRef != nullptr);
- *slotTypeRef = EntryPointSlots::SlotType_Normal;
+ *slotTypeRef = EntryPointSlots::SlotType_Executable;
return (TADDR)&_implTarget;
}
@@ -168,7 +168,7 @@ struct DispatchHolder
static_assert_no_msg(((offsetof(DispatchHolder, _stub) + offsetof(DispatchStub, _implTarget)) % sizeof(void *)) == 0);
}
- void Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT);
+ void Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT);
DispatchStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
@@ -262,7 +262,8 @@ struct ResolveHolder
{
static void InitializeStatic() { LIMITED_METHOD_CONTRACT; }
- void Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ void Initialize(ResolveHolder* pResolveHolderRX,
+ PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32 * counterAddr);
diff --git a/src/coreclr/vm/arm64/cgencpu.h b/src/coreclr/vm/arm64/cgencpu.h
index 8f8d6a49135..83e56cfb9f9 100644
--- a/src/coreclr/vm/arm64/cgencpu.h
+++ b/src/coreclr/vm/arm64/cgencpu.h
@@ -274,7 +274,7 @@ inline NEON128 GetSimdMem(PCODE ip)
}
#ifdef FEATURE_COMINTEROP
-void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target);
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target);
#endif // FEATURE_COMINTEROP
inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
@@ -288,10 +288,10 @@ inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
}
//------------------------------------------------------------------------
-inline void emitJump(LPBYTE pBuffer, LPVOID target)
+inline void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
LIMITED_METHOD_CONTRACT;
- UINT32* pCode = (UINT32*)pBuffer;
+ UINT32* pCode = (UINT32*)pBufferRW;
// We require 8-byte alignment so the LDR instruction is aligned properly
_ASSERTE(((UINT_PTR)pCode & 7) == 0);
@@ -304,7 +304,7 @@ inline void emitJump(LPBYTE pBuffer, LPVOID target)
pCode[1] = 0xD61F0200UL; // br x16
// Ensure that the updated instructions get updated in the I-Cache
- ClrFlushInstructionCache(pCode, 8);
+ ClrFlushInstructionCache(pBufferRX, 8);
*((LPVOID *)(pCode + 2)) = target; // 64-bit target address
@@ -341,10 +341,10 @@ inline BOOL isBackToBackJump(PCODE pBuffer)
}
//------------------------------------------------------------------------
-inline void emitBackToBackJump(LPBYTE pBuffer, LPVOID target)
+inline void emitBackToBackJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
WRAPPER_NO_CONTRACT;
- emitJump(pBuffer, target);
+ emitJump(pBufferRX, pBufferRW, target);
}
//------------------------------------------------------------------------
@@ -515,7 +515,7 @@ struct DECLSPEC_ALIGN(16) UMEntryThunkCode
TADDR m_pTargetCode;
TADDR m_pvSecretParam;
- void Encode(BYTE* pTargetCode, void* pvSecretParam);
+ void Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam);
void Poison();
LPCBYTE GetEntryPoint() const
@@ -583,7 +583,7 @@ struct StubPrecode {
TADDR m_pTarget;
TADDR m_pMethodDesc;
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+ void Init(StubPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
TADDR GetMethodDesc()
{
@@ -606,7 +606,8 @@ struct StubPrecode {
}
CONTRACTL_END;
- InterlockedExchange64((LONGLONG*)&m_pTarget, (TADDR)GetPreStubEntryPoint());
+ ExecutableWriterHolder<StubPrecode> precodeWriterHolder(this, sizeof(StubPrecode));
+ InterlockedExchange64((LONGLONG*)&precodeWriterHolder.GetRW()->m_pTarget, (TADDR)GetPreStubEntryPoint());
}
BOOL SetTargetInterlocked(TADDR target, TADDR expected)
@@ -618,8 +619,9 @@ struct StubPrecode {
}
CONTRACTL_END;
+ ExecutableWriterHolder<StubPrecode> precodeWriterHolder(this, sizeof(StubPrecode));
return (TADDR)InterlockedCompareExchange64(
- (LONGLONG*)&m_pTarget, (TADDR)target, (TADDR)expected) == expected;
+ (LONGLONG*)&precodeWriterHolder.GetRW()->m_pTarget, (TADDR)target, (TADDR)expected) == expected;
}
#ifdef FEATURE_PREJIT
@@ -643,7 +645,7 @@ struct NDirectImportPrecode {
TADDR m_pTarget;
TADDR m_pMethodDesc;
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+ void Init(NDirectImportPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
TADDR GetMethodDesc()
{
@@ -689,7 +691,7 @@ struct FixupPrecode {
BYTE m_PrecodeChunkIndex;
TADDR m_pTarget;
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
+ void Init(FixupPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
void InitCommon()
{
WRAPPER_NO_CONTRACT;
@@ -730,7 +732,8 @@ struct FixupPrecode {
}
CONTRACTL_END;
- InterlockedExchange64((LONGLONG*)&m_pTarget, (TADDR)GetEEFuncEntryPoint(PrecodeFixupThunk));
+ ExecutableWriterHolder<FixupPrecode> precodeWriterHolder(this, sizeof(FixupPrecode));
+ InterlockedExchange64((LONGLONG*)&precodeWriterHolder.GetRW()->m_pTarget, (TADDR)GetEEFuncEntryPoint(PrecodeFixupThunk));
}
BOOL SetTargetInterlocked(TADDR target, TADDR expected)
@@ -742,8 +745,9 @@ struct FixupPrecode {
}
CONTRACTL_END;
+ ExecutableWriterHolder<FixupPrecode> precodeWriterHolder(this, sizeof(FixupPrecode));
return (TADDR)InterlockedCompareExchange64(
- (LONGLONG*)&m_pTarget, (TADDR)target, (TADDR)expected) == expected;
+ (LONGLONG*)&precodeWriterHolder.GetRW()->m_pTarget, (TADDR)target, (TADDR)expected) == expected;
}
static BOOL IsFixupPrecodeByASM(PCODE addr)
@@ -802,8 +806,9 @@ struct ThisPtrRetBufPrecode {
}
CONTRACTL_END;
+ ExecutableWriterHolder<ThisPtrRetBufPrecode> precodeWriterHolder(this, sizeof(ThisPtrRetBufPrecode));
return (TADDR)InterlockedCompareExchange64(
- (LONGLONG*)&m_pTarget, (TADDR)target, (TADDR)expected) == expected;
+ (LONGLONG*)&precodeWriterHolder.GetRW()->m_pTarget, (TADDR)target, (TADDR)expected) == expected;
}
};
typedef DPTR(ThisPtrRetBufPrecode) PTR_ThisPtrRetBufPrecode;
@@ -871,7 +876,7 @@ private:
#ifndef DACCESS_COMPILE
public:
- CallCountingStubShort(CallCount *remainingCallCountCell, PCODE targetForMethod)
+ CallCountingStubShort(CallCountingStubShort* stubRX, CallCount *remainingCallCountCell, PCODE targetForMethod)
: m_part0{ 0x58000149, // ldr x9, [pc, #(m_remainingCallCountCell)]
0x7940012a, // ldrh w10, [x9]
0x7100054a, // subs w10, w10, #1
diff --git a/src/coreclr/vm/arm64/stubs.cpp b/src/coreclr/vm/arm64/stubs.cpp
index 096a202af2c..a9fbb6df54f 100644
--- a/src/coreclr/vm/arm64/stubs.cpp
+++ b/src/coreclr/vm/arm64/stubs.cpp
@@ -58,7 +58,7 @@ class ConditionalBranchInstructionFormat : public InstructionFormat
// Encoding 0|1|0|1|0|1|0|0|imm19|0|cond
// cond = Bits3-0(variation)
// imm19 = bits19-0(fixedUpReference/4), will be SignExtended
- virtual VOID EmitInstruction(UINT refSize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refSize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT;
@@ -70,10 +70,10 @@ class ConditionalBranchInstructionFormat : public InstructionFormat
_ASSERTE((fixedUpReference & 0x3) == 0);
DWORD imm19 = (DWORD)(0x7FFFF & (fixedUpReference >> 2));
- pOutBuffer[0] = static_cast<BYTE>((0x7 & imm19 /* Bits2-0(imm19) */) << 5 | (0xF & variationCode /* cond */));
- pOutBuffer[1] = static_cast<BYTE>((0x7F8 & imm19 /* Bits10-3(imm19) */) >> 3);
- pOutBuffer[2] = static_cast<BYTE>((0x7F800 & imm19 /* Bits19-11(imm19) */) >> 11);
- pOutBuffer[3] = static_cast<BYTE>(0x54);
+ pOutBufferRW[0] = static_cast<BYTE>((0x7 & imm19 /* Bits2-0(imm19) */) << 5 | (0xF & variationCode /* cond */));
+ pOutBufferRW[1] = static_cast<BYTE>((0x7F8 & imm19 /* Bits10-3(imm19) */) >> 3);
+ pOutBufferRW[2] = static_cast<BYTE>((0x7F800 & imm19 /* Bits19-11(imm19) */) >> 11);
+ pOutBufferRW[3] = static_cast<BYTE>(0x54);
}
};
@@ -148,14 +148,14 @@ class BranchInstructionFormat : public InstructionFormat
}
}
- virtual VOID EmitInstruction(UINT refSize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refSize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT;
if (IsIndirect(variationCode))
{
_ASSERTE(((UINT_PTR)pDataBuffer & 7) == 0);
- __int64 dataOffset = pDataBuffer - pOutBuffer;
+ __int64 dataOffset = pDataBuffer - pOutBufferRW;
if (dataOffset < -1048576 || dataOffset > 1048572)
COMPlusThrow(kNotSupportedException);
@@ -165,25 +165,25 @@ class BranchInstructionFormat : public InstructionFormat
// +0: ldr x16, [pc, #dataOffset]
// +4: ldr x16, [x16]
// +8: b(l)r x16
- *((DWORD*)pOutBuffer) = (0x58000010 | (imm19 << 5));
- *((DWORD*)(pOutBuffer+4)) = 0xF9400210;
+ *((DWORD*)pOutBufferRW) = (0x58000010 | (imm19 << 5));
+ *((DWORD*)(pOutBufferRW+4)) = 0xF9400210;
if (IsCall(variationCode))
{
- *((DWORD*)(pOutBuffer+8)) = 0xD63F0200; // blr x16
+ *((DWORD*)(pOutBufferRW+8)) = 0xD63F0200; // blr x16
}
else
{
- *((DWORD*)(pOutBuffer+8)) = 0xD61F0200; // br x16
+ *((DWORD*)(pOutBufferRW+8)) = 0xD61F0200; // br x16
}
- *((__int64*)pDataBuffer) = fixedUpReference + (__int64)pOutBuffer;
+ *((__int64*)pDataBuffer) = fixedUpReference + (__int64)pOutBufferRX;
}
else
{
_ASSERTE(((UINT_PTR)pDataBuffer & 7) == 0);
- __int64 dataOffset = pDataBuffer - pOutBuffer;
+ __int64 dataOffset = pDataBuffer - pOutBufferRW;
if (dataOffset < -1048576 || dataOffset > 1048572)
COMPlusThrow(kNotSupportedException);
@@ -192,17 +192,17 @@ class BranchInstructionFormat : public InstructionFormat
// +0: ldr x16, [pc, #dataOffset]
// +4: b(l)r x16
- *((DWORD*)pOutBuffer) = (0x58000010 | (imm19 << 5));
+ *((DWORD*)pOutBufferRW) = (0x58000010 | (imm19 << 5));
if (IsCall(variationCode))
{
- *((DWORD*)(pOutBuffer+4)) = 0xD63F0200; // blr x16
+ *((DWORD*)(pOutBufferRW+4)) = 0xD63F0200; // blr x16
}
else
{
- *((DWORD*)(pOutBuffer+4)) = 0xD61F0200; // br x16
+ *((DWORD*)(pOutBufferRW+4)) = 0xD61F0200; // br x16
}
- if (!ClrSafeInt<__int64>::addition(fixedUpReference, (__int64)pOutBuffer, fixedUpReference))
+ if (!ClrSafeInt<__int64>::addition(fixedUpReference, (__int64)pOutBufferRX, fixedUpReference))
COMPlusThrowArithmetic();
*((__int64*)pDataBuffer) = fixedUpReference;
}
@@ -239,7 +239,7 @@ class LoadFromLabelInstructionFormat : public InstructionFormat
return fExternal;
}
- virtual VOID EmitInstruction(UINT refSize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refSize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT;
// VariationCode is used to indicate the register the label is going to be loaded
@@ -252,11 +252,11 @@ class LoadFromLabelInstructionFormat : public InstructionFormat
_ASSERTE((variationCode & 0x1F) != 31);
// adrp Xt, #Page_of_fixedUpReference
- *((DWORD*)pOutBuffer) = ((9<<28) | ((imm & 3)<<29) | (imm>>2)<<5 | (variationCode&0x1F));
+ *((DWORD*)pOutBufferRW) = ((9<<28) | ((imm & 3)<<29) | (imm>>2)<<5 | (variationCode&0x1F));
// ldr Xt, [Xt, #offset_of_fixedUpReference_to_its_page]
- UINT64 target = (UINT64)(fixedUpReference + pOutBuffer)>>3;
- *((DWORD*)(pOutBuffer+4)) = ( 0xF9400000 | ((target & 0x1FF)<<10) | (variationCode & 0x1F)<<5 | (variationCode & 0x1F));
+ UINT64 target = (UINT64)(fixedUpReference + pOutBufferRX)>>3;
+ *((DWORD*)(pOutBufferRW+4)) = ( 0xF9400000 | ((target & 0x1FF)<<10) | (variationCode & 0x1F)<<5 | (variationCode & 0x1F));
}
};
@@ -567,7 +567,7 @@ void FixupPrecode::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
#endif // DACCESS_COMPILE
#ifndef DACCESS_COMPILE
-void StubPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+void StubPrecode::Init(StubPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
{
WRAPPER_NO_CONTRACT;
@@ -604,7 +604,7 @@ void StubPrecode::Fixup(DataImage *image)
}
#endif // FEATURE_NATIVE_IMAGE_GENERATION
-void NDirectImportPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+void NDirectImportPrecode::Init(NDirectImportPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
{
WRAPPER_NO_CONTRACT;
@@ -641,7 +641,7 @@ void NDirectImportPrecode::Fixup(DataImage *image)
}
#endif
-void FixupPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex /*=0*/, int iPrecodeChunkIndex /*=0*/)
+void FixupPrecode::Init(FixupPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex /*=0*/, int iPrecodeChunkIndex /*=0*/)
{
WRAPPER_NO_CONTRACT;
@@ -670,7 +670,7 @@ void FixupPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int
*(void**)GetBase() = (BYTE*)pMD - (iMethodDescChunkIndex * MethodDesc::ALIGNMENT);
}
- _ASSERTE(GetMethodDesc() == (TADDR)pMD);
+ _ASSERTE(pPrecodeRX->GetMethodDesc() == (TADDR)pMD);
if (pLoaderAllocator != NULL)
{
@@ -1034,7 +1034,7 @@ void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
#ifdef FEATURE_COMINTEROP
-void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target)
{
WRAPPER_NO_CONTRACT;
@@ -1051,16 +1051,17 @@ void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
0xd61f0140
};
- BYTE *pBuffer = (BYTE*)pCOMMethod - COMMETHOD_CALL_PRESTUB_SIZE;
+ BYTE *pBufferRX = (BYTE*)pCOMMethodRX - COMMETHOD_CALL_PRESTUB_SIZE;
+ BYTE *pBufferRW = (BYTE*)pCOMMethodRW - COMMETHOD_CALL_PRESTUB_SIZE;
- memcpy(pBuffer, rgCode, sizeof(rgCode));
- *((PCODE*)(pBuffer + sizeof(rgCode) + 4)) = target;
+ memcpy(pBufferRW, rgCode, sizeof(rgCode));
+ *((PCODE*)(pBufferRW + sizeof(rgCode) + 4)) = target;
// Ensure that the updated instructions get actually written
- ClrFlushInstructionCache(pBuffer, COMMETHOD_CALL_PRESTUB_SIZE);
+ ClrFlushInstructionCache(pBufferRX, COMMETHOD_CALL_PRESTUB_SIZE);
- _ASSERTE(IS_ALIGNED(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
- *((PCODE*)(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == target);
+ _ASSERTE(IS_ALIGNED(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
+ *((PCODE*)(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == target);
}
#endif // FEATURE_COMINTEROP
@@ -1224,7 +1225,7 @@ UMEntryThunk * UMEntryThunk::Decode(void *pCallback)
return NULL;
}
-void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
+void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam)
{
#if defined(HOST_OSX) && defined(HOST_ARM64)
auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
@@ -1244,7 +1245,7 @@ void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
m_pTargetCode = (TADDR)pTargetCode;
m_pvSecretParam = (TADDR)pvSecretParam;
- FlushInstructionCache(GetCurrentProcess(),&m_code,sizeof(m_code));
+ FlushInstructionCache(GetCurrentProcess(),&pEntryThunkCodeRX->m_code,sizeof(m_code));
}
#ifndef DACCESS_COMPILE
@@ -1254,11 +1255,14 @@ void UMEntryThunkCode::Poison()
#if defined(HOST_OSX) && defined(HOST_ARM64)
auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+ ExecutableWriterHolder<UMEntryThunkCode> thunkWriterHolder(this, sizeof(UMEntryThunkCode));
+ UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW();
- m_pTargetCode = (TADDR)UMEntryThunk::ReportViolation;
+ pThisRW->m_pTargetCode = (TADDR)UMEntryThunk::ReportViolation;
// ldp x16, x0, [x12]
- m_code[1] = 0xa9400190;
+ pThisRW->m_code[1] = 0xa9400190;
+
ClrFlushInstructionCache(&m_code,sizeof(m_code));
}
@@ -1869,6 +1873,7 @@ void StubLinkerCPU::EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall)
SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
BYTE * pStart = (BYTE *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
BYTE * p = pStart; \
+ static const size_t rxOffset = 0; \
auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
#define END_DYNAMIC_HELPER_EMIT() \
@@ -1880,14 +1885,17 @@ void StubLinkerCPU::EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall)
#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
SIZE_T cb = size; \
SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
- BYTE * pStart = (BYTE *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * pStartRX = (BYTE *)(void*)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ ExecutableWriterHolder<BYTE> startWriterHolder(pStartRX, cbAligned); \
+ BYTE * pStart = startWriterHolder.GetRW(); \
+ size_t rxOffset = pStartRX - pStart; \
BYTE * p = pStart;
#define END_DYNAMIC_HELPER_EMIT() \
_ASSERTE(pStart + cb == p); \
while (p < pStart + cbAligned) { *(DWORD*)p = 0xBADC0DF0; p += 4; }\
- ClrFlushInstructionCache(pStart, cbAligned); \
- return (PCODE)pStart
+ ClrFlushInstructionCache(pStartRX, cbAligned); \
+ return (PCODE)pStartRX
#endif // defined(HOST_OSX) && defined(HOST_ARM64)
// Uses x8 as scratch register to store address of data label
@@ -1932,7 +1940,7 @@ PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCOD
}
// Caller must ensure sufficient byte are allocated including padding (if applicable)
-void DynamicHelpers::EmitHelperWithArg(BYTE*& p, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
STANDARD_VM_CONTRACT;
@@ -1970,7 +1978,7 @@ PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR ar
BEGIN_DYNAMIC_HELPER_EMIT(32);
- EmitHelperWithArg(p, pAllocator, arg, target);
+ EmitHelperWithArg(p, rxOffset, pAllocator, arg, target);
END_DYNAMIC_HELPER_EMIT();
}
@@ -2172,9 +2180,10 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule));
GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT);
- pArgs->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
- pArgs->signature = pLookup->signature;
- pArgs->module = (CORINFO_MODULE_HANDLE)pModule;
+ ExecutableWriterHolder<GenericHandleArgs> argsWriterHolder(pArgs, sizeof(GenericHandleArgs));
+ argsWriterHolder.GetRW()->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
+ argsWriterHolder.GetRW()->signature = pLookup->signature;
+ argsWriterHolder.GetRW()->module = (CORINFO_MODULE_HANDLE)pModule;
WORD slotOffset = (WORD)(dictionaryIndexAndSlot & 0xFFFF) * sizeof(Dictionary*);
@@ -2187,7 +2196,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
// reuse EmitHelperWithArg for below two operations
// X1 <- pArgs
// branch to helperAddress
- EmitHelperWithArg(p, pAllocator, (TADDR)pArgs, helperAddress);
+ EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
END_DYNAMIC_HELPER_EMIT();
}
@@ -2325,7 +2334,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
// reuse EmitHelperWithArg for below two operations
// X1 <- pArgs
// branch to helperAddress
- EmitHelperWithArg(p, pAllocator, (TADDR)pArgs, helperAddress);
+ EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
}
// datalabel:
diff --git a/src/coreclr/vm/arm64/virtualcallstubcpu.hpp b/src/coreclr/vm/arm64/virtualcallstubcpu.hpp
index 890cf39cbaa..bdb78617e84 100644
--- a/src/coreclr/vm/arm64/virtualcallstubcpu.hpp
+++ b/src/coreclr/vm/arm64/virtualcallstubcpu.hpp
@@ -46,7 +46,7 @@ private:
public:
static void InitializeStatic() { }
- void Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
+ void Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken)
{
// adr x9, _resolveWorkerTarget
// ldp x10, x12, [x9]
@@ -80,7 +80,7 @@ struct DispatchStub
LIMITED_METHOD_CONTRACT;
_ASSERTE(slotTypeRef != nullptr);
- *slotTypeRef = EntryPointSlots::SlotType_Normal;
+ *slotTypeRef = EntryPointSlots::SlotType_Executable;
return (TADDR)&_implTarget;
}
@@ -106,7 +106,7 @@ struct DispatchHolder
static_assert_no_msg(((offsetof(DispatchHolder, _stub) + offsetof(DispatchStub, _implTarget)) % sizeof(void *)) == 0);
}
- void Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT)
+ void Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT)
{
// ldr x13, [x0] ; methodTable from object in x0
// adr x9, _expectedMT ; _expectedMT is at offset 28 from pc
@@ -180,7 +180,8 @@ struct ResolveHolder
{
static void InitializeStatic() { }
- void Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ void Initialize(ResolveHolder* pResolveHolderRX,
+ PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32 * counterAddr)
{
diff --git a/src/coreclr/vm/array.cpp b/src/coreclr/vm/array.cpp
index 8ef78b688a9..ae91f700fbc 100644
--- a/src/coreclr/vm/array.cpp
+++ b/src/coreclr/vm/array.cpp
@@ -1191,6 +1191,11 @@ class ArrayStubCache : public StubCacheBase
virtual UINT Length(const BYTE *pRawStub);
public:
+public:
+ ArrayStubCache(LoaderHeap* heap) : StubCacheBase(heap)
+ {
+ }
+
static ArrayStubCache * GetArrayStubCache()
{
STANDARD_VM_CONTRACT;
@@ -1199,7 +1204,7 @@ public:
if (s_pArrayStubCache == NULL)
{
- ArrayStubCache * pArrayStubCache = new ArrayStubCache();
+ ArrayStubCache * pArrayStubCache = new ArrayStubCache(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap());
if (FastInterlockCompareExchangePointer(&s_pArrayStubCache, pArrayStubCache, NULL) != NULL)
delete pArrayStubCache;
}
diff --git a/src/coreclr/vm/callcounting.cpp b/src/coreclr/vm/callcounting.cpp
index 1a19c43a67f..80108bd15aa 100644
--- a/src/coreclr/vm/callcounting.cpp
+++ b/src/coreclr/vm/callcounting.cpp
@@ -281,7 +281,9 @@ const CallCountingStub *CallCountingManager::CallCountingStubAllocator::Allocate
if (CallCountingStubShort::CanUseFor(allocationAddressHolder, targetForMethod))
#endif
{
- stub = new(allocationAddressHolder) CallCountingStubShort(remainingCallCountCell, targetForMethod);
+ ExecutableWriterHolder<void> writerHolder(allocationAddressHolder, sizeInBytes);
+ new(writerHolder.GetRW()) CallCountingStubShort((CallCountingStubShort*)(void*)allocationAddressHolder, remainingCallCountCell, targetForMethod);
+ stub = (CallCountingStub*)(void*)allocationAddressHolder;
allocationAddressHolder.SuppressRelease();
break;
}
@@ -290,7 +292,9 @@ const CallCountingStub *CallCountingManager::CallCountingStubAllocator::Allocate
#ifdef TARGET_AMD64
sizeInBytes = sizeof(CallCountingStubLong);
void *allocationAddress = (void *)heap->AllocAlignedMem(sizeInBytes, CallCountingStub::Alignment);
- stub = new(allocationAddress) CallCountingStubLong(remainingCallCountCell, targetForMethod);
+ ExecutableWriterHolder<void> writerHolder(allocationAddress, sizeInBytes);
+ new(writerHolder.GetRW()) CallCountingStubLong(remainingCallCountCell, targetForMethod);
+ stub = (CallCountingStub*)allocationAddress;
#else
UNREACHABLE();
#endif
diff --git a/src/coreclr/vm/ceeload.cpp b/src/coreclr/vm/ceeload.cpp
index d95c02ac949..1dff1e92480 100644
--- a/src/coreclr/vm/ceeload.cpp
+++ b/src/coreclr/vm/ceeload.cpp
@@ -6329,13 +6329,16 @@ void Module::FixupVTables()
(UINT_PTR)&(pPointers[iMethod]), pMD->m_pszDebugMethodName, pMD));
UMEntryThunk *pUMEntryThunk = (UMEntryThunk*)(void*)(GetDllThunkHeap()->AllocAlignedMem(sizeof(UMEntryThunk), CODE_SIZE_ALIGN)); // UMEntryThunk contains code
- FillMemory(pUMEntryThunk, sizeof(*pUMEntryThunk), 0);
+ ExecutableWriterHolder<UMEntryThunk> uMEntryThunkWriterHolder(pUMEntryThunk, sizeof(UMEntryThunk));
+ FillMemory(uMEntryThunkWriterHolder.GetRW(), sizeof(UMEntryThunk), 0);
UMThunkMarshInfo *pUMThunkMarshInfo = (UMThunkMarshInfo*)(void*)(GetThunkHeap()->AllocAlignedMem(sizeof(UMThunkMarshInfo), CODE_SIZE_ALIGN));
- FillMemory(pUMThunkMarshInfo, sizeof(*pUMThunkMarshInfo), 0);
+ ExecutableWriterHolder<UMThunkMarshInfo> uMThunkMarshInfoWriterHolder(pUMThunkMarshInfo, sizeof(UMThunkMarshInfo));
+ FillMemory(uMThunkMarshInfoWriterHolder.GetRW(), sizeof(UMThunkMarshInfo), 0);
+
+ uMThunkMarshInfoWriterHolder.GetRW()->LoadTimeInit(pMD);
+ uMEntryThunkWriterHolder.GetRW()->LoadTimeInit(pUMEntryThunk, NULL, NULL, pUMThunkMarshInfo, pMD);
- pUMThunkMarshInfo->LoadTimeInit(pMD);
- pUMEntryThunk->LoadTimeInit(NULL, NULL, pUMThunkMarshInfo, pMD);
SetTargetForVTableEntry(hInstThis, (BYTE **)&pPointers[iMethod], (BYTE *)pUMEntryThunk->GetCode());
pData->MarkMethodFixedUp(iFixup, iMethod);
diff --git a/src/coreclr/vm/ceemain.cpp b/src/coreclr/vm/ceemain.cpp
index eb31d0f928d..6ccf233d9a3 100644
--- a/src/coreclr/vm/ceemain.cpp
+++ b/src/coreclr/vm/ceemain.cpp
@@ -489,15 +489,9 @@ void InitGSCookie()
volatile GSCookie * pGSCookiePtr = GetProcessGSCookiePtr();
-#ifdef TARGET_UNIX
- // On Unix, the GS cookie is stored in a read only data segment
- DWORD newProtection = PAGE_READWRITE;
-#else // TARGET_UNIX
- DWORD newProtection = PAGE_EXECUTE_READWRITE;
-#endif // !TARGET_UNIX
-
+ // The GS cookie is stored in a read only data segment
DWORD oldProtection;
- if(!ClrVirtualProtect((LPVOID)pGSCookiePtr, sizeof(GSCookie), newProtection, &oldProtection))
+ if(!ClrVirtualProtect((LPVOID)pGSCookiePtr, sizeof(GSCookie), PAGE_READWRITE, &oldProtection))
{
ThrowLastError();
}
diff --git a/src/coreclr/vm/clrtocomcall.cpp b/src/coreclr/vm/clrtocomcall.cpp
index f8339b0ccc5..e56d6621939 100644
--- a/src/coreclr/vm/clrtocomcall.cpp
+++ b/src/coreclr/vm/clrtocomcall.cpp
@@ -1014,17 +1014,20 @@ LPVOID ComPlusCall::GetRetThunk(UINT numStackBytes)
{
// cache miss -> create a new thunk
AllocMemTracker dummyAmTracker;
- pRetThunk = (LPVOID)dummyAmTracker.Track(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T((numStackBytes == 0) ? 1 : 3)));
+ size_t thunkSize = (numStackBytes == 0) ? 1 : 3;
+ pRetThunk = (LPVOID)dummyAmTracker.Track(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T(thunkSize)));
+
+ ExecutableWriterHolder<BYTE> thunkWriterHolder((BYTE *)pRetThunk, thunkSize);
+ BYTE *pThunkRW = thunkWriterHolder.GetRW();
- BYTE *pThunk = (BYTE *)pRetThunk;
if (numStackBytes == 0)
{
- pThunk[0] = 0xc3;
+ pThunkRW[0] = 0xc3;
}
else
{
- pThunk[0] = 0xc2;
- *(USHORT *)&pThunk[1] = (USHORT)numStackBytes;
+ pThunkRW[0] = 0xc2;
+ *(USHORT *)&pThunkRW[1] = (USHORT)numStackBytes;
}
// add it to the cache
diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp
index 06b942e8337..0aabeda4b2c 100644
--- a/src/coreclr/vm/codeman.cpp
+++ b/src/coreclr/vm/codeman.cpp
@@ -1981,11 +1981,14 @@ void CodeFragmentHeap::RealBackoutMem(void *pMem
{
CrstHolder ch(&m_CritSec);
+ {
#if defined(HOST_OSX) && defined(HOST_ARM64)
- auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
#endif // defined(HOST_OSX) && defined(HOST_ARM64)
- ZeroMemory((BYTE *)pMem, dwSize);
+ ExecutableWriterHolder<BYTE> memWriterHolder((BYTE*)pMem, dwSize);
+ ZeroMemory(memWriterHolder.GetRW(), dwSize);
+ }
//
// Try to coalesce blocks if possible
@@ -2300,7 +2303,8 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
));
#ifdef TARGET_64BIT
- emitJump(pHp->CLRPersonalityRoutine, (void *)ProcessCLRException);
+ ExecutableWriterHolder<BYTE> personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12);
+ emitJump(pHp->CLRPersonalityRoutine, personalityRoutineWriterHolder.GetRW(), (void *)ProcessCLRException);
#endif // TARGET_64BIT
pCodeHeap.SuppressRelease();
@@ -3001,13 +3005,13 @@ JumpStubBlockHeader * EEJitManager::allocJumpStubBlock(MethodDesc* pMD, DWORD n
requestInfo.setThrowOnOutOfMemoryWithinRange(throwOnOutOfMemoryWithinRange);
TADDR mem;
- JumpStubBlockHeader * pBlock;
+ ExecutableWriterHolder<JumpStubBlockHeader> blockWriterHolder;
// Scope the lock
{
CrstHolder ch(&m_CodeHeapCritSec);
- mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(TADDR), blockSize, CODE_SIZE_ALIGN, &pCodeHeap);
+ mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), blockSize, CODE_SIZE_ALIGN, &pCodeHeap);
if (mem == NULL)
{
_ASSERTE(!throwOnOutOfMemoryWithinRange);
@@ -3016,27 +3020,28 @@ JumpStubBlockHeader * EEJitManager::allocJumpStubBlock(MethodDesc* pMD, DWORD n
// CodeHeader comes immediately before the block
CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader));
- pCodeHdr->SetStubCodeBlockKind(STUB_CODE_BLOCK_JUMPSTUB);
+ ExecutableWriterHolder<CodeHeader> codeHdrWriterHolder(pCodeHdr, sizeof(CodeHeader));
+ codeHdrWriterHolder.GetRW()->SetStubCodeBlockKind(STUB_CODE_BLOCK_JUMPSTUB);
NibbleMapSetUnlocked(pCodeHeap, mem, TRUE);
- pBlock = (JumpStubBlockHeader *)mem;
+ blockWriterHolder = ExecutableWriterHolder<JumpStubBlockHeader>((JumpStubBlockHeader *)mem, sizeof(JumpStubBlockHeader));
- _ASSERTE(IS_ALIGNED(pBlock, CODE_SIZE_ALIGN));
+ _ASSERTE(IS_ALIGNED(blockWriterHolder.GetRW(), CODE_SIZE_ALIGN));
}
- pBlock->m_next = NULL;
- pBlock->m_used = 0;
- pBlock->m_allocated = numJumps;
+ blockWriterHolder.GetRW()->m_next = NULL;
+ blockWriterHolder.GetRW()->m_used = 0;
+ blockWriterHolder.GetRW()->m_allocated = numJumps;
if (pMD && pMD->IsLCGMethod())
- pBlock->SetHostCodeHeap(static_cast<HostCodeHeap*>(pCodeHeap->pHeap));
+ blockWriterHolder.GetRW()->SetHostCodeHeap(static_cast<HostCodeHeap*>(pCodeHeap->pHeap));
else
- pBlock->SetLoaderAllocator(pLoaderAllocator);
+ blockWriterHolder.GetRW()->SetLoaderAllocator(pLoaderAllocator);
LOG((LF_JIT, LL_INFO1000, "Allocated new JumpStubBlockHeader for %d stubs at" FMT_ADDR " in loader allocator " FMT_ADDR "\n",
- numJumps, DBG_ADDR(pBlock) , DBG_ADDR(pLoaderAllocator) ));
+ numJumps, DBG_ADDR(mem) , DBG_ADDR(pLoaderAllocator) ));
- RETURN(pBlock);
+ RETURN((JumpStubBlockHeader*)mem);
}
void * EEJitManager::allocCodeFragmentBlock(size_t blockSize, unsigned alignment, LoaderAllocator *pLoaderAllocator, StubCodeBlockKind kind)
@@ -3067,7 +3072,8 @@ void * EEJitManager::allocCodeFragmentBlock(size_t blockSize, unsigned alignment
// CodeHeader comes immediately before the block
CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader));
- pCodeHdr->SetStubCodeBlockKind(kind);
+ ExecutableWriterHolder<CodeHeader> codeHdrWriterHolder(pCodeHdr, sizeof(CodeHeader));
+ codeHdrWriterHolder.GetRW()->SetStubCodeBlockKind(kind);
NibbleMapSetUnlocked(pCodeHeap, mem, TRUE);
@@ -3574,10 +3580,6 @@ void EEJitManager::DeleteCodeHeap(HeapList *pHeapList)
(const BYTE*)pHeapList->startAddress,
(const BYTE*)pHeapList->endAddress ));
- // pHeapList is allocated in pHeap, so only need to delete the CodeHeap itself
- // !!! For SoC, compiler inserts code to write a special cookie at pHeapList->pHeap after delete operator, at least for debug code.
- // !!! Since pHeapList is deleted at the same time as pHeap, this causes AV.
- // delete pHeapList->pHeap;
CodeHeap* pHeap = pHeapList->pHeap;
delete pHeap;
delete pHeapList;
@@ -5130,9 +5132,18 @@ PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target,
POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange);
} CONTRACT_END;
- DWORD numJumpStubs = DEFAULT_JUMPSTUBS_PER_BLOCK; // a block of 32 JumpStubs
BYTE * jumpStub = NULL;
+ BYTE * jumpStubRW = NULL;
bool isLCG = pMD && pMD->IsLCGMethod();
+ // For LCG we request a small block of 4 jumpstubs, because we can not share them
+ // with any other methods and very frequently our method only needs one jump stub.
+ // Using 4 gives a request size of (32 + 4*12) or 80 bytes.
+ // Also note that request sizes are rounded up to a multiples of 16.
+ // The request size is calculated into 'blockSize' in allocJumpStubBlock.
+ // For x64 the value of BACK_TO_BACK_JUMP_ALLOCATE_SIZE is 12 bytes
+ // and the sizeof(JumpStubBlockHeader) is 32.
+ //
+ DWORD numJumpStubs = isLCG ? 4 : DEFAULT_JUMPSTUBS_PER_BLOCK;
JumpStubCache * pJumpStubCache = (JumpStubCache *) pLoaderAllocator->m_pJumpStubCache;
if (isLCG)
@@ -5144,6 +5155,7 @@ PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target,
JumpStubBlockHeader ** ppHead = &(pJumpStubCache->m_pBlocks);
JumpStubBlockHeader * curBlock = *ppHead;
+ ExecutableWriterHolder<JumpStubBlockHeader> curBlockWriterHolder;
// allocate a new jumpstub from 'curBlock' if it is not fully allocated
//
@@ -5158,6 +5170,9 @@ PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target,
if ((loAddr <= jumpStub) && (jumpStub <= hiAddr))
{
// We will update curBlock->m_used at "DONE"
+ size_t blockSize = sizeof(JumpStubBlockHeader) + (size_t) numJumpStubs * BACK_TO_BACK_JUMP_ALLOCATE_SIZE;
+ curBlockWriterHolder = ExecutableWriterHolder<JumpStubBlockHeader>(curBlock, blockSize);
+ jumpStubRW = (BYTE *)((TADDR)jumpStub + (TADDR)curBlockWriterHolder.GetRW() - (TADDR)curBlock);
goto DONE;
}
}
@@ -5168,17 +5183,6 @@ PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target,
if (isLCG)
{
- // For LCG we request a small block of 4 jumpstubs, because we can not share them
- // with any other methods and very frequently our method only needs one jump stub.
- // Using 4 gives a request size of (32 + 4*12) or 80 bytes.
- // Also note that request sizes are rounded up to a multiples of 16.
- // The request size is calculated into 'blockSize' in allocJumpStubBlock.
- // For x64 the value of BACK_TO_BACK_JUMP_ALLOCATE_SIZE is 12 bytes
- // and the sizeof(JumpStubBlockHeader) is 32.
- //
-
- numJumpStubs = 4;
-
#ifdef TARGET_AMD64
// Note this these values are not requirements, instead we are
// just confirming the values that are mentioned in the comments.
@@ -5207,11 +5211,14 @@ PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target,
RETURN(NULL);
}
+ curBlockWriterHolder = ExecutableWriterHolder<JumpStubBlockHeader>(curBlock, sizeof(JumpStubBlockHeader) + ((size_t) (curBlock->m_used + 1) * BACK_TO_BACK_JUMP_ALLOCATE_SIZE));
+
+ jumpStubRW = (BYTE *) curBlockWriterHolder.GetRW() + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
jumpStub = (BYTE *) curBlock + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
_ASSERTE((loAddr <= jumpStub) && (jumpStub <= hiAddr));
- curBlock->m_next = *ppHead;
+ curBlockWriterHolder.GetRW()->m_next = *ppHead;
*ppHead = curBlock;
DONE:
@@ -5223,7 +5230,7 @@ DONE:
_ASSERTE(((UINT_PTR)jumpStub & 7) == 0);
#endif
- emitBackToBackJump(jumpStub, (void*) target);
+ emitBackToBackJump(jumpStub, jumpStubRW, (void*) target);
#ifdef FEATURE_PERFMAP
PerfMap::LogStubs(__FUNCTION__, "emitBackToBackJump", (PCODE)jumpStub, BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
@@ -5240,7 +5247,7 @@ DONE:
pJumpStubCache->m_Table.Add(entry);
- curBlock->m_used++; // record that we have used up one more jumpStub in the block
+ curBlockWriterHolder.GetRW()->m_used++; // record that we have used up one more jumpStub in the block
// Every time we create a new jumpStub thunk one of these counters is incremented
if (isLCG)
diff --git a/src/coreclr/vm/comcallablewrapper.cpp b/src/coreclr/vm/comcallablewrapper.cpp
index 018cb91076e..eb5b5abd368 100644
--- a/src/coreclr/vm/comcallablewrapper.cpp
+++ b/src/coreclr/vm/comcallablewrapper.cpp
@@ -553,10 +553,11 @@ extern "C" PCODE ComPreStubWorker(ComPrestubMethodFrame *pPFrame, UINT64 *pError
UINT_PTR* ppofs = (UINT_PTR*) (((BYTE*)pCMD) - COMMETHOD_CALL_PRESTUB_SIZE + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET);
+ ExecutableWriterHolder<UINT_PTR> ppofsWriterHolder(ppofs, sizeof(UINT_PTR));
#ifdef TARGET_X86
- *ppofs = ((UINT_PTR)pStub - (size_t)pCMD);
+ *ppofsWriterHolder.GetRW() = ((UINT_PTR)pStub - (size_t)pCMD);
#else
- *ppofs = ((UINT_PTR)pStub);
+ *ppofsWriterHolder.GetRW() = ((UINT_PTR)pStub);
#endif
// Return the address of the prepad. The prepad will regenerate the hidden parameter and due
@@ -3213,6 +3214,7 @@ void ComMethodTable::LayOutClassMethodTable()
unsigned cbAlloc = 0;
NewExecutableHolder<BYTE> pMDMemoryPtr = NULL;
BYTE* pMethodDescMemory = NULL;
+ size_t writeableOffset = 0;
unsigned cbNumParentVirtualMethods = 0;
unsigned cbTotalParentFields = 0;
unsigned cbParentComMTSlots = 0;
@@ -3309,6 +3311,7 @@ void ComMethodTable::LayOutClassMethodTable()
if (!m_pMT->HasGenericClassInstantiationInHierarchy())
{
+ ExecutableWriterHolder<BYTE> methodDescMemoryWriteableHolder;
//
// Allocate method desc's for the rest of the slots.
//
@@ -3317,12 +3320,15 @@ void ComMethodTable::LayOutClassMethodTable()
if (cbAlloc > 0)
{
pMDMemoryPtr = (BYTE*) new (executable) BYTE[cbAlloc + sizeof(UINT_PTR)];
- pMethodDescMemory = (BYTE*)pMDMemoryPtr;
+ pMethodDescMemory = pMDMemoryPtr;
+
+ methodDescMemoryWriteableHolder = ExecutableWriterHolder<BYTE>(pMethodDescMemory, cbAlloc + sizeof(UINT_PTR));
+ writeableOffset = methodDescMemoryWriteableHolder.GetRW() - pMethodDescMemory;
// initialize the method desc memory to zero
- FillMemory(pMethodDescMemory, cbAlloc, 0x0);
+ FillMemory(pMethodDescMemory + writeableOffset, cbAlloc, 0x0);
- *(UINT_PTR *)pMethodDescMemory = cbMethodDescs; // fill in the size of the method desc's
+ *(UINT_PTR *)(pMethodDescMemory + writeableOffset) = cbMethodDescs; // fill in the size of the method desc's
// move past the size
pMethodDescMemory += sizeof(UINT_PTR);
@@ -3396,11 +3402,12 @@ void ComMethodTable::LayOutClassMethodTable()
{
// some bytes are reserved for CALL xxx before the method desc
ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ ComCallMethodDesc* pNewMDRW = (ComCallMethodDesc *) (pMethodDescMemory + writeableOffset + COMMETHOD_PREPAD);
NewCOMMethodDescs.Append(pNewMD);
- pNewMD->InitMethod(pMD, NULL);
+ pNewMDRW->InitMethod(pMD, NULL);
- emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+ emitCOMStubCall(pNewMD, pNewMDRW, GetEEFuncEntryPoint(ComCallPreStub));
FillInComVtableSlot(pComVtable, cbPrevSlots++, pNewMD);
@@ -3428,11 +3435,12 @@ void ComMethodTable::LayOutClassMethodTable()
{
// some bytes are reserved for CALL xxx before the method desc
ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ ComCallMethodDesc* pNewMDRW = (ComCallMethodDesc *) (pMethodDescMemory + writeableOffset + COMMETHOD_PREPAD);
NewCOMMethodDescs.Append(pNewMD);
- pNewMD->InitMethod(pMD, NULL);
+ pNewMDRW->InitMethod(pMD, NULL);
- emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+ emitCOMStubCall(pNewMD, pNewMDRW, GetEEFuncEntryPoint(ComCallPreStub));
FillInComVtableSlot(pComVtable, cbPrevSlots++, pNewMD);
@@ -3457,11 +3465,13 @@ void ComMethodTable::LayOutClassMethodTable()
{
// some bytes are reserved for CALL xxx before the method desc
ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ ComCallMethodDesc* pNewMDRW = (ComCallMethodDesc *) (pMethodDescMemory + writeableOffset + COMMETHOD_PREPAD);
+
NewCOMMethodDescs.Append(pNewMD);
- pNewMD->InitMethod(pMD, NULL);
+ pNewMDRW->InitMethod(pMD, NULL);
- emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+ emitCOMStubCall(pNewMD, pNewMDRW, GetEEFuncEntryPoint(ComCallPreStub));
FillInComVtableSlot(pComVtable, cbPrevSlots++, pNewMD);
@@ -3485,11 +3495,12 @@ void ComMethodTable::LayOutClassMethodTable()
// set up a getter method
// some bytes are reserved for CALL xxx before the method desc
ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ ComCallMethodDesc* pNewMDRW = (ComCallMethodDesc *) (pMethodDescMemory + writeableOffset + COMMETHOD_PREPAD);
NewCOMMethodDescs.Append(pNewMD);
- pNewMD->InitField(pFD, TRUE);
+ pNewMDRW->InitField(pFD, TRUE);
- emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+ emitCOMStubCall(pNewMD, pNewMDRW, GetEEFuncEntryPoint(ComCallPreStub));
FillInComVtableSlot(pComVtable, cbPrevSlots++, pNewMD);
@@ -3498,11 +3509,12 @@ void ComMethodTable::LayOutClassMethodTable()
// setup a setter method
// some bytes are reserved for CALL xxx before the method desc
pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ pNewMDRW = (ComCallMethodDesc *) (pMethodDescMemory + writeableOffset + COMMETHOD_PREPAD);
NewCOMMethodDescs.Append(pNewMD);
- pNewMD->InitField(pFD, FALSE);
+ pNewMDRW->InitField(pFD, FALSE);
- emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+ emitCOMStubCall(pNewMD, pNewMDRW, GetEEFuncEntryPoint(ComCallPreStub));
FillInComVtableSlot(pComVtable, cbPrevSlots++, pNewMD);
@@ -3520,16 +3532,18 @@ void ComMethodTable::LayOutClassMethodTable()
if (IsLayoutComplete())
return;
+ ExecutableWriterHolder<ComMethodTable> comMTWriterHolder(this, sizeof(ComMethodTable) + cbTempVtable.Value());
+
// IDispatch vtable follows the header
- CopyMemory(this + 1, pDispVtable, cbTempVtable.Value());
+ CopyMemory(comMTWriterHolder.GetRW() + 1, pDispVtable, cbTempVtable.Value());
// Set the layout complete flag and release the lock.
- m_Flags |= enum_LayoutComplete;
+ comMTWriterHolder.GetRW()->m_Flags |= enum_LayoutComplete;
// We've successfully laid out the class method table so we need to suppress the release of the
// memory for the ComCallMethodDescs and store it inside the ComMethodTable so we can
// release it when we clean up the ComMethodTable.
- m_pMDescr = (BYTE*)pMDMemoryPtr;
+ comMTWriterHolder.GetRW()->m_pMDescr = (BYTE*)pMDMemoryPtr;
pMDMemoryPtr.SuppressRelease();
NewCOMMethodDescsHolder.SuppressRelease();
}
@@ -3673,12 +3687,16 @@ BOOL ComMethodTable::LayOutInterfaceMethodTable(MethodTable* pClsMT)
if (IsLayoutComplete())
return TRUE;
+ ExecutableWriterHolder<ComMethodTable> comMTWriterHolder(this, sizeof(ComMethodTable) + cbTempVtable.Value());
+ size_t writeableOffset = (BYTE*)comMTWriterHolder.GetRW() - (BYTE*)this;
+
// IUnk vtable follows the header
- CopyMemory(this + 1, pUnkVtable, cbTempVtable.Value());
+ CopyMemory(comMTWriterHolder.GetRW() + 1, pUnkVtable, cbTempVtable.Value());
// Finish by emitting stubs and initializing the slots
pUnkVtable = (IUnkVtable *)(this + 1);
pComVtable = ((SLOT*)pUnkVtable) + cbExtraSlots;
+ SLOT *pComVtableRW = (SLOT*)((BYTE*)pComVtable + writeableOffset);
// Method descs are at the end of the vtable
// m_cbSlots interfaces methods + IUnk methods
@@ -3687,18 +3705,20 @@ BOOL ComMethodTable::LayOutInterfaceMethodTable(MethodTable* pClsMT)
for (i = 0; i < cbSlots; i++)
{
ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ ComCallMethodDesc* pNewMDRW = (ComCallMethodDesc *) (pMethodDescMemory + writeableOffset + COMMETHOD_PREPAD);
+
MethodDesc* pIntfMD = m_pMT->GetMethodDescForSlot(i);
- emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+ emitCOMStubCall(pNewMD, pNewMDRW, GetEEFuncEntryPoint(ComCallPreStub));
UINT slotIndex = (pIntfMD->GetComSlot() - cbExtraSlots);
- FillInComVtableSlot(pComVtable, slotIndex, pNewMD);
+ FillInComVtableSlot(pComVtableRW, slotIndex, pNewMD);
pMethodDescMemory += (COMMETHOD_PREPAD + sizeof(ComCallMethodDesc));
}
// Set the layout complete flag and release the lock.
- m_Flags |= enum_LayoutComplete;
+ comMTWriterHolder.GetRW()->m_Flags |= enum_LayoutComplete;
NewCOMMethodDescsHolder.SuppressRelease();
}
@@ -3812,8 +3832,9 @@ DispatchInfo *ComMethodTable::GetDispatchInfo()
// Synchronize the DispatchInfo with the actual object.
pDispInfo->SynchWithManagedView();
+ ExecutableWriterHolder<ComMethodTable> comMTWriterHolder(this, sizeof(ComMethodTable));
// Swap the lock into the class member in a thread safe manner.
- if (NULL == FastInterlockCompareExchangePointer(&m_pDispatchInfo, pDispInfo.GetValue(), NULL))
+ if (NULL == FastInterlockCompareExchangePointer(&comMTWriterHolder.GetRW()->m_pDispatchInfo, pDispInfo.GetValue(), NULL))
pDispInfo.SuppressRelease();
}
@@ -4476,29 +4497,31 @@ ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForClass(MethodTable
_ASSERTE(!cbNewSlots.IsOverflow() && !cbTotalSlots.IsOverflow() && !cbVtable.IsOverflow());
+ ExecutableWriterHolder<ComMethodTable> comMTWriterHolder(pComMT, cbToAlloc.Value());
+ ComMethodTable* pComMTRW = comMTWriterHolder.GetRW();
// set up the header
- pComMT->m_ptReserved = (SLOT)(size_t)0xDEADC0FF; // reserved
- pComMT->m_pMT = pClassMT; // pointer to the class method table
- pComMT->m_cbRefCount = 0;
- pComMT->m_pMDescr = NULL;
- pComMT->m_pITypeInfo = NULL;
- pComMT->m_pDispatchInfo = NULL;
- pComMT->m_cbSlots = cbTotalSlots.Value(); // number of slots not counting IDisp methods.
- pComMT->m_IID = GUID_NULL;
+ pComMTRW->m_ptReserved = (SLOT)(size_t)0xDEADC0FF; // reserved
+ pComMTRW->m_pMT = pClassMT; // pointer to the class method table
+ pComMTRW->m_cbRefCount = 0;
+ pComMTRW->m_pMDescr = NULL;
+ pComMTRW->m_pITypeInfo = NULL;
+ pComMTRW->m_pDispatchInfo = NULL;
+ pComMTRW->m_cbSlots = cbTotalSlots.Value(); // number of slots not counting IDisp methods.
+ pComMTRW->m_IID = GUID_NULL;
// Set the flags.
- pComMT->m_Flags = enum_ClassVtableMask | ClassItfType;
+ pComMTRW->m_Flags = enum_ClassVtableMask | ClassItfType;
// Determine if the interface is visible from COM.
if (IsTypeVisibleFromCom(TypeHandle(pComMT->m_pMT)))
- pComMT->m_Flags |= enum_ComVisible;
+ pComMTRW->m_Flags |= enum_ComVisible;
#if _DEBUG
{
// In debug set all the vtable slots to 0xDEADCA11.
- SLOT *pComVTable = (SLOT*)(pComMT + 1);
+ SLOT *pComVTable = (SLOT*)(pComMTRW + 1);
for (unsigned iComSlots = 0; iComSlots < cbTotalSlots.Value() + cbExtraSlots; iComSlots++)
*(pComVTable + iComSlots) = (SLOT)(size_t)0xDEADCA11;
}
@@ -4552,34 +4575,37 @@ ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForInterface(MethodT
_ASSERTE(!cbVtable.IsOverflow() && !cbMethDescs.IsOverflow());
+ ExecutableWriterHolder<ComMethodTable> comMTWriterHolder(pComMT, cbToAlloc.Value());
+ ComMethodTable* pComMTRW = comMTWriterHolder.GetRW();
+
// set up the header
- pComMT->m_ptReserved = (SLOT)(size_t)0xDEADC0FF; // reserved
- pComMT->m_pMT = pInterfaceMT; // pointer to the interface's method table
- pComMT->m_cbSlots = cbComSlots; // number of slots not counting IUnk
- pComMT->m_cbRefCount = 0;
- pComMT->m_pMDescr = NULL;
- pComMT->m_pITypeInfo = NULL;
- pComMT->m_pDispatchInfo = NULL;
+ pComMTRW->m_ptReserved = (SLOT)(size_t)0xDEADC0FF; // reserved
+ pComMTRW->m_pMT = pInterfaceMT; // pointer to the interface's method table
+ pComMTRW->m_cbSlots = cbComSlots; // number of slots not counting IUnk
+ pComMTRW->m_cbRefCount = 0;
+ pComMTRW->m_pMDescr = NULL;
+ pComMTRW->m_pITypeInfo = NULL;
+ pComMTRW->m_pDispatchInfo = NULL;
// Set the flags.
- pComMT->m_Flags = ItfType;
+ pComMTRW->m_Flags = ItfType;
// Set the IID of the interface.
- pInterfaceMT->GetGuid(&pComMT->m_IID, TRUE);
- pComMT->m_Flags |= enum_GuidGenerated;
+ pInterfaceMT->GetGuid(&pComMTRW->m_IID, TRUE);
+ pComMTRW->m_Flags |= enum_GuidGenerated;
// Determine if the interface is visible from COM.
if (IsTypeVisibleFromCom(TypeHandle(pComMT->m_pMT)))
- pComMT->m_Flags |= enum_ComVisible;
+ pComMTRW->m_Flags |= enum_ComVisible;
// Determine if the interface is a COM imported class interface.
if (pItfClass->GetClass()->IsComClassInterface())
- pComMT->m_Flags |= enum_ComClassItf;
+ pComMTRW->m_Flags |= enum_ComClassItf;
#ifdef _DEBUG
{
// In debug set all the vtable slots to 0xDEADCA11.
- SLOT *pComVTable = (SLOT*)(pComMT + 1);
+ SLOT *pComVTable = (SLOT*)(pComMTRW + 1);
for (unsigned iComSlots = 0; iComSlots < cbComSlots + cbExtraSlots; iComSlots++)
*(pComVTable + iComSlots) = (SLOT)(size_t)0xDEADCA11;
}
@@ -4612,36 +4638,38 @@ ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForBasic(MethodTable
unsigned cbToAlloc = sizeof(ComMethodTable) + cbVtable;
NewExecutableHolder<ComMethodTable> pComMT = (ComMethodTable*) new (executable) BYTE[cbToAlloc];
+ ExecutableWriterHolder<ComMethodTable> comMTWriterHolder(pComMT, cbToAlloc);
+ ComMethodTable* pComMTRW = comMTWriterHolder.GetRW();
// set up the header
- pComMT->m_ptReserved = (SLOT)(size_t)0xDEADC0FF;
- pComMT->m_pMT = pMT;
- pComMT->m_cbSlots = 0; // number of slots not counting IUnk
- pComMT->m_cbRefCount = 0;
- pComMT->m_pMDescr = NULL;
- pComMT->m_pITypeInfo = NULL;
- pComMT->m_pDispatchInfo = NULL;
+ pComMTRW->m_ptReserved = (SLOT)(size_t)0xDEADC0FF;
+ pComMTRW->m_pMT = pMT;
+ pComMTRW->m_cbSlots = 0; // number of slots not counting IUnk
+ pComMTRW->m_cbRefCount = 0;
+ pComMTRW->m_pMDescr = NULL;
+ pComMTRW->m_pITypeInfo = NULL;
+ pComMTRW->m_pDispatchInfo = NULL;
// Initialize the flags.
- pComMT->m_Flags = enum_IsBasic;
- pComMT->m_Flags |= enum_ClassVtableMask | ClassItfType;
+ pComMTRW->m_Flags = enum_IsBasic;
+ pComMTRW->m_Flags |= enum_ClassVtableMask | ClassItfType;
// Set the IID of the interface.
- pComMT->m_IID = IID_IUnknown;
- pComMT->m_Flags |= enum_GuidGenerated;
+ pComMTRW->m_IID = IID_IUnknown;
+ pComMTRW->m_Flags |= enum_GuidGenerated;
// Determine if the interface is visible from COM.
if (IsTypeVisibleFromCom(TypeHandle(pComMT->m_pMT)))
- pComMT->m_Flags |= enum_ComVisible;
+ pComMTRW->m_Flags |= enum_ComVisible;
// Determine if the interface is a COM imported class interface.
if (pMT->GetClass()->IsComClassInterface())
- pComMT->m_Flags |= enum_ComClassItf;
+ pComMTRW->m_Flags |= enum_ComClassItf;
#ifdef _DEBUG_0xDEADCA11
{
// In debug set all the vtable slots to 0xDEADCA11.
- SLOT *pComVTable = (SLOT*)(pComMT + 1);
+ SLOT *pComVTable = (SLOT*)(pComMTRW + 1);
for (unsigned iComSlots = 0; iComSlots < DEBUG_AssertSlots + cbExtraSlots; iComSlots++)
*(pComVTable + iComSlots) = (SLOT)(size_t)0xDEADCA11;
}
diff --git a/src/coreclr/vm/comcallablewrapper.h b/src/coreclr/vm/comcallablewrapper.h
index 4d3335e76fe..2581ddf832f 100644
--- a/src/coreclr/vm/comcallablewrapper.h
+++ b/src/coreclr/vm/comcallablewrapper.h
@@ -503,7 +503,8 @@ struct ComMethodTable
{
LIMITED_METHOD_CONTRACT;
- return InterlockedIncrement(&m_cbRefCount);
+ ExecutableWriterHolder<ComMethodTable> comMTWriterHolder(this, sizeof(ComMethodTable));
+ return InterlockedIncrement(&comMTWriterHolder.GetRW()->m_cbRefCount);
}
LONG Release()
@@ -517,9 +518,10 @@ struct ComMethodTable
}
CONTRACTL_END;
+ ExecutableWriterHolder<ComMethodTable> comMTWriterHolder(this, sizeof(ComMethodTable));
// use a different var here becuase cleanup will delete the object
// so can no longer make member refs
- LONG cbRef = InterlockedDecrement(&m_cbRefCount);
+ LONG cbRef = InterlockedDecrement(&comMTWriterHolder.GetRW()->m_cbRefCount);
if (cbRef == 0)
Cleanup();
@@ -759,8 +761,9 @@ struct ComMethodTable
// Generate the IClassX IID if it hasn't been generated yet.
if (!(m_Flags & enum_GuidGenerated))
{
- GenerateClassItfGuid(TypeHandle(m_pMT), &m_IID);
- m_Flags |= enum_GuidGenerated;
+ ExecutableWriterHolder<ComMethodTable> comMTWriterHolder(this, sizeof(ComMethodTable));
+ GenerateClassItfGuid(TypeHandle(m_pMT), &comMTWriterHolder.GetRW()->m_IID);
+ comMTWriterHolder.GetRW()->m_Flags |= enum_GuidGenerated;
}
return m_IID;
diff --git a/src/coreclr/vm/comdelegate.cpp b/src/coreclr/vm/comdelegate.cpp
index 87fc358c1fb..599c75a2c9e 100644
--- a/src/coreclr/vm/comdelegate.cpp
+++ b/src/coreclr/vm/comdelegate.cpp
@@ -820,7 +820,8 @@ Stub* COMDelegate::SetupShuffleThunk(MethodTable * pDelMT, MethodDesc *pTargetMe
{
if (FastInterlockCompareExchangePointer(&pClass->m_pInstRetBuffCallStub, pShuffleThunk, NULL ) != NULL)
{
- pShuffleThunk->DecRef();
+ ExecutableWriterHolder<Stub> shuffleThunkWriterHolder(pShuffleThunk, sizeof(Stub));
+ shuffleThunkWriterHolder.GetRW()->DecRef();
pShuffleThunk = pClass->m_pInstRetBuffCallStub;
}
}
@@ -828,7 +829,8 @@ Stub* COMDelegate::SetupShuffleThunk(MethodTable * pDelMT, MethodDesc *pTargetMe
{
if (FastInterlockCompareExchangePointer(&pClass->m_pStaticCallStub, pShuffleThunk, NULL ) != NULL)
{
- pShuffleThunk->DecRef();
+ ExecutableWriterHolder<Stub> shuffleThunkWriterHolder(pShuffleThunk, sizeof(Stub));
+ shuffleThunkWriterHolder.GetRW()->DecRef();
pShuffleThunk = pClass->m_pStaticCallStub;
}
}
@@ -1256,7 +1258,9 @@ LPVOID COMDelegate::ConvertToCallback(OBJECTREF pDelegateObj)
GCX_PREEMP();
pUMThunkMarshInfo = new UMThunkMarshInfo();
- pUMThunkMarshInfo->LoadTimeInit(pInvokeMeth);
+
+ ExecutableWriterHolder<UMThunkMarshInfo> uMThunkMarshInfoWriterHolder(pUMThunkMarshInfo, sizeof(UMThunkMarshInfo));
+ uMThunkMarshInfoWriterHolder.GetRW()->LoadTimeInit(pInvokeMeth);
g_IBCLogger.LogEEClassCOWTableAccess(pMT);
if (FastInterlockCompareExchangePointer(&(pClass->m_pUMThunkMarshInfo),
@@ -1282,8 +1286,11 @@ LPVOID COMDelegate::ConvertToCallback(OBJECTREF pDelegateObj)
// This target should not ever be used. We are storing it in the thunk for better diagnostics of "call on collected delegate" crashes.
PCODE pManagedTargetForDiagnostics = (pDelegate->GetMethodPtrAux() != NULL) ? pDelegate->GetMethodPtrAux() : pDelegate->GetMethodPtr();
+ ExecutableWriterHolder<UMEntryThunk> uMEntryThunkWriterHolder(pUMEntryThunk, sizeof(UMEntryThunk));
+
// MethodDesc is passed in for profiling to know the method desc of target
- pUMEntryThunk->LoadTimeInit(
+ uMEntryThunkWriterHolder.GetRW()->LoadTimeInit(
+ pUMEntryThunk,
pManagedTargetForDiagnostics,
objhnd,
pUMThunkMarshInfo, pInvokeMeth);
@@ -1930,7 +1937,8 @@ PCODE COMDelegate::TheDelegateInvokeStub()
if (InterlockedCompareExchangeT<PCODE>(&s_pInvokeStub, pCandidate->GetEntryPoint(), NULL) != NULL)
{
// if we are here someone managed to set the stub before us so we release the current
- pCandidate->DecRef();
+ ExecutableWriterHolder<Stub> candidateWriterHolder(pCandidate, sizeof(Stub));
+ candidateWriterHolder.GetRW()->DecRef();
}
}
@@ -2349,7 +2357,9 @@ FCIMPL1(PCODE, COMDelegate::GetMulticastInvoke, Object* refThisIn)
Stub *pCandidate = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap(), NEWSTUB_FL_MULTICAST);
Stub *pWinner = m_pMulticastStubCache->AttemptToSetStub(hash,pCandidate);
- pCandidate->DecRef();
+ ExecutableWriterHolder<Stub> candidateWriterHolder(pCandidate, sizeof(Stub));
+ candidateWriterHolder.GetRW()->DecRef();
+
if (!pWinner)
COMPlusThrowOM();
diff --git a/src/coreclr/vm/comtoclrcall.cpp b/src/coreclr/vm/comtoclrcall.cpp
index ca553e1e5ce..008df2efa13 100644
--- a/src/coreclr/vm/comtoclrcall.cpp
+++ b/src/coreclr/vm/comtoclrcall.cpp
@@ -788,14 +788,17 @@ PCODE ComCallMethodDesc::CreateCOMToCLRStub(DWORD dwStubFlags, MethodDesc **ppSt
// make sure our native stack computation in code:ComCallMethodDesc.InitNativeInfo is right
_ASSERTE(HasMarshalError() || !pStubMD->IsILStub() || pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() == m_StackBytes);
#else // TARGET_X86
+
+ ExecutableWriterHolder<ComCallMethodDesc> comCallMDWriterHolder(this, sizeof(ComCallMethodDesc));
+
if (pStubMD->IsILStub())
{
- m_StackBytes = pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize();
+ comCallMDWriterHolder.GetRW()->m_StackBytes = pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize();
_ASSERTE(m_StackBytes == pStubMD->SizeOfArgStack());
}
else
{
- m_StackBytes = pStubMD->SizeOfArgStack();
+ comCallMDWriterHolder.GetRW()->m_StackBytes = pStubMD->SizeOfArgStack();
}
#endif // TARGET_X86
@@ -882,10 +885,11 @@ void ComCallMethodDesc::InitRuntimeNativeInfo(MethodDesc *pStubMD)
}
// write the computed data into this ComCallMethodDesc
- m_dwSlotInfo = (wSourceSlotEDX | (wStubStackSlotCount << 16));
+ ExecutableWriterHolder<ComCallMethodDesc> comCallMDWriterHolder(this, sizeof(ComCallMethodDesc));
+ comCallMDWriterHolder.GetRW()->m_dwSlotInfo = (wSourceSlotEDX | (wStubStackSlotCount << 16));
if (pwStubStackSlotOffsets != NULL)
{
- if (FastInterlockCompareExchangePointer(&m_pwStubStackSlotOffsets, pwStubStackSlotOffsets.GetValue(), NULL) == NULL)
+ if (FastInterlockCompareExchangePointer(&comCallMDWriterHolder.GetRW()->m_pwStubStackSlotOffsets, pwStubStackSlotOffsets.GetValue(), NULL) == NULL)
{
pwStubStackSlotOffsets.SuppressRelease();
}
@@ -895,22 +899,23 @@ void ComCallMethodDesc::InitRuntimeNativeInfo(MethodDesc *pStubMD)
// Fill in return thunk with proper native arg size.
//
- BYTE *pMethodDescMemory = ((BYTE*)this) + GetOffsetOfReturnThunk();
+ BYTE *pMethodDescMemoryRX = ((BYTE*)this) + GetOffsetOfReturnThunk();
+ BYTE *pMethodDescMemoryRW = ((BYTE*)comCallMDWriterHolder.GetRW()) + GetOffsetOfReturnThunk();
//
// encodes a "ret nativeArgSize" to return and
// pop off the args off the stack
//
- pMethodDescMemory[0] = 0xc2;
+ pMethodDescMemoryRW[0] = 0xc2;
UINT16 nativeArgSize = GetNumStackBytes();
if (!(nativeArgSize < 0x7fff))
COMPlusThrow(kTypeLoadException, IDS_EE_SIGTOOCOMPLEX);
- *(SHORT *)&pMethodDescMemory[1] = nativeArgSize;
+ *(SHORT *)&pMethodDescMemoryRW[1] = nativeArgSize;
- FlushInstructionCache(GetCurrentProcess(), pMethodDescMemory, sizeof pMethodDescMemory[0] + sizeof(SHORT));
+ FlushInstructionCache(GetCurrentProcess(), pMethodDescMemoryRX, sizeof pMethodDescMemoryRX[0] + sizeof(SHORT));
#endif // TARGET_X86
}
#endif //CROSSGEN_COMPILE
@@ -1436,7 +1441,8 @@ PCODE ComCall::GetComCallMethodStub(ComCallMethodDesc *pCMD)
// Compute stack layout and prepare the return thunk on x86
pCMD->InitRuntimeNativeInfo(pStubMD);
- InterlockedCompareExchangeT<PCODE>(pCMD->GetAddrOfILStubField(), pTempILStub, NULL);
+ ExecutableWriterHolder<PCODE> addrOfILStubWriterHolder(pCMD->GetAddrOfILStubField(), sizeof(PCODE));
+ InterlockedCompareExchangeT<PCODE>(addrOfILStubWriterHolder.GetRW(), pTempILStub, NULL);
#ifdef TARGET_X86
// Finally, we need to build a stub that represents the entire call. This
diff --git a/src/coreclr/vm/crossgencompile.cpp b/src/coreclr/vm/crossgencompile.cpp
index a4fdab8e69b..1e14e79be7f 100644
--- a/src/coreclr/vm/crossgencompile.cpp
+++ b/src/coreclr/vm/crossgencompile.cpp
@@ -253,7 +253,7 @@ INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMe
return 0;
}
-INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddr, bool emitJump)
+INT32 rel32UsingPreallocatedJumpStub(INT32 UNALIGNED * pRel32, PCODE target, PCODE jumpStubAddrRX, PCODE jumpStubAddrRW, bool emitJump)
{
// crossgen does not have jump stubs
return 0;
diff --git a/src/coreclr/vm/dataimage.cpp b/src/coreclr/vm/dataimage.cpp
index d619efc1ebf..de8569cb342 100644
--- a/src/coreclr/vm/dataimage.cpp
+++ b/src/coreclr/vm/dataimage.cpp
@@ -1246,7 +1246,7 @@ public:
StubPrecode precode;
- precode.Init(m_pMD);
+ precode.Init(&precode, m_pMD);
SSIZE_T offset;
ZapNode * pNode = pImage->m_pDataImage->GetNodeForStructure(m_pMD, &offset);
@@ -1275,7 +1275,7 @@ public:
StubPrecode precode;
- precode.Init(m_pMD);
+ precode.Init(&precode, m_pMD);
SSIZE_T offset;
ZapNode * pNode = pImage->m_pDataImage->GetNodeForStructure(m_pMD, &offset);
diff --git a/src/coreclr/vm/dllimportcallback.cpp b/src/coreclr/vm/dllimportcallback.cpp
index d21259f60bd..1420d182784 100644
--- a/src/coreclr/vm/dllimportcallback.cpp
+++ b/src/coreclr/vm/dllimportcallback.cpp
@@ -64,7 +64,7 @@ public:
return pThunk;
}
- void AddToList(UMEntryThunk *pThunk)
+ void AddToList(UMEntryThunk *pThunkRX, UMEntryThunk *pThunkRW)
{
CONTRACTL
{
@@ -80,16 +80,17 @@ public:
if (m_pHead == NULL)
{
- m_pHead = pThunk;
- m_pTail = pThunk;
+ m_pHead = pThunkRX;
+ m_pTail = pThunkRX;
}
else
{
- m_pTail->m_pNextFreeThunk = pThunk;
- m_pTail = pThunk;
+ ExecutableWriterHolder<UMEntryThunk> tailThunkWriterHolder(m_pTail, sizeof(UMEntryThunk));
+ tailThunkWriterHolder.GetRW()->m_pNextFreeThunk = pThunkRX;
+ m_pTail = pThunkRX;
}
- pThunk->m_pNextFreeThunk = NULL;
+ pThunkRW->m_pNextFreeThunk = NULL;
++m_count;
}
@@ -170,7 +171,9 @@ UMEntryThunk *UMEntryThunkCache::GetUMEntryThunk(MethodDesc *pMD)
miHolder.Assign(pMarshInfo);
pMarshInfo->LoadTimeInit(pMD);
- pThunk->LoadTimeInit(NULL, NULL, pMarshInfo, pMD);
+
+ ExecutableWriterHolder<UMEntryThunk> thunkWriterHolder(pThunk, sizeof(UMEntryThunk));
+ thunkWriterHolder.GetRW()->LoadTimeInit(pThunk, NULL, NULL, pMarshInfo, pMD);
// add it to the cache
CacheElement element;
@@ -284,8 +287,8 @@ void STDCALL UMEntryThunk::DoRunTimeInit(UMEntryThunk* pUMEntryThunk)
#if defined(HOST_OSX) && defined(HOST_ARM64)
auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
#endif // defined(HOST_OSX) && defined(HOST_ARM64)
-
- pUMEntryThunk->RunTimeInit();
+ ExecutableWriterHolder<UMEntryThunk> uMEntryThunkWriterHolder(pUMEntryThunk, sizeof(UMEntryThunk));
+ uMEntryThunkWriterHolder.GetRW()->RunTimeInit(pUMEntryThunk);
}
UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
@@ -323,6 +326,7 @@ void UMEntryThunk::Terminate()
}
CONTRACTL_END;
+ ExecutableWriterHolder<UMEntryThunk> thunkWriterHolder(this, sizeof(UMEntryThunk));
m_code.Poison();
if (GetObjectHandle())
@@ -332,10 +336,10 @@ void UMEntryThunk::Terminate()
#endif // defined(HOST_OSX) && defined(HOST_ARM64)
DestroyLongWeakHandle(GetObjectHandle());
- m_pObjectHandle = 0;
+ thunkWriterHolder.GetRW()->m_pObjectHandle = 0;
}
- s_thunkFreeList.AddToList(this);
+ s_thunkFreeList.AddToList(this, thunkWriterHolder.GetRW());
}
VOID UMEntryThunk::FreeUMEntryThunk(UMEntryThunk* p)
diff --git a/src/coreclr/vm/dllimportcallback.h b/src/coreclr/vm/dllimportcallback.h
index ec57ebb009d..f5c5d29b519 100644
--- a/src/coreclr/vm/dllimportcallback.h
+++ b/src/coreclr/vm/dllimportcallback.h
@@ -137,7 +137,8 @@ public:
static VOID FreeUMEntryThunk(UMEntryThunk* p);
#ifndef DACCESS_COMPILE
- VOID LoadTimeInit(PCODE pManagedTarget,
+ VOID LoadTimeInit(UMEntryThunk *pUMEntryThunkRX,
+ PCODE pManagedTarget,
OBJECTHANDLE pObjectHandle,
UMThunkMarshInfo *pUMThunkMarshInfo,
MethodDesc *pMD)
@@ -162,7 +163,7 @@ public:
m_pMD = pMD; // For debugging and profiling, so they can identify the target
- m_code.Encode((BYTE*)TheUMThunkPreStub(), this);
+ m_code.Encode(&pUMEntryThunkRX->m_code, (BYTE*)TheUMThunkPreStub(), pUMEntryThunkRX);
#ifdef _DEBUG
m_state = kLoadTimeInited;
@@ -171,20 +172,21 @@ public:
void Terminate();
- VOID RunTimeInit()
+ VOID RunTimeInit(UMEntryThunk *pUMEntryThunkRX)
{
STANDARD_VM_CONTRACT;
// Ensure method's module is activate in app domain
m_pMD->EnsureActive();
- m_pUMThunkMarshInfo->RunTimeInit();
+ ExecutableWriterHolder<UMThunkMarshInfo> uMThunkMarshInfoWriterHolder(m_pUMThunkMarshInfo, sizeof(UMThunkMarshInfo));
+ uMThunkMarshInfoWriterHolder.GetRW()->RunTimeInit();
// Ensure that we have either the managed target or the delegate.
if (m_pObjectHandle == NULL && m_pManagedTarget == NULL)
m_pManagedTarget = m_pMD->GetMultiCallableAddrOfCode();
- m_code.Encode((BYTE*)m_pUMThunkMarshInfo->GetExecStubEntryPoint(), this);
+ m_code.Encode(&pUMEntryThunkRX->m_code, (BYTE*)m_pUMThunkMarshInfo->GetExecStubEntryPoint(), pUMEntryThunkRX);
#ifdef _DEBUG
#if defined(HOST_OSX) && defined(HOST_ARM64)
diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp
index 1a5320bb5fb..565243c3177 100644
--- a/src/coreclr/vm/dynamicmethod.cpp
+++ b/src/coreclr/vm/dynamicmethod.cpp
@@ -463,7 +463,8 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo)
pHp->reserveForJumpStubs = 0;
#ifdef HOST_64BIT
- emitJump(pHp->CLRPersonalityRoutine, (void *)ProcessCLRException);
+ ExecutableWriterHolder<BYTE> personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12);
+ emitJump(pHp->CLRPersonalityRoutine, personalityRoutineWriterHolder.GetRW(), (void *)ProcessCLRException);
#endif
size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(pHp->maxCodeHeapSize));
@@ -498,6 +499,14 @@ HostCodeHeap::TrackAllocation* HostCodeHeap::AllocFromFreeList(size_t header, si
// found a block
LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Block found, size 0x%X\n", this, pCurrent->size));
+ ExecutableWriterHolder<TrackAllocation> previousWriterHolder;
+ if (pPrevious)
+ {
+ previousWriterHolder = ExecutableWriterHolder<TrackAllocation>(pPrevious, sizeof(TrackAllocation));
+ }
+
+ ExecutableWriterHolder<TrackAllocation> currentWriterHolder(pCurrent, sizeof(TrackAllocation));
+
// The space left is not big enough for a new block, let's just
// update the TrackAllocation record for the current block
if (pCurrent->size - realSize < max(HOST_CODEHEAP_SIZE_ALIGN, sizeof(TrackAllocation)))
@@ -506,7 +515,7 @@ HostCodeHeap::TrackAllocation* HostCodeHeap::AllocFromFreeList(size_t header, si
// remove current
if (pPrevious)
{
- pPrevious->pNext = pCurrent->pNext;
+ previousWriterHolder.GetRW()->pNext = pCurrent->pNext;
}
else
{
@@ -517,12 +526,15 @@ HostCodeHeap::TrackAllocation* HostCodeHeap::AllocFromFreeList(size_t header, si
{
// create a new TrackAllocation after the memory we just allocated and insert it into the free list
TrackAllocation *pNewCurrent = (TrackAllocation*)((BYTE*)pCurrent + realSize);
- pNewCurrent->pNext = pCurrent->pNext;
- pNewCurrent->size = pCurrent->size - realSize;
+
+ ExecutableWriterHolder<TrackAllocation> newCurrentWriterHolder(pNewCurrent, sizeof(TrackAllocation));
+ newCurrentWriterHolder.GetRW()->pNext = pCurrent->pNext;
+ newCurrentWriterHolder.GetRW()->size = pCurrent->size - realSize;
+
LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Item changed %p, new size 0x%X\n", this, pNewCurrent, pNewCurrent->size));
if (pPrevious)
{
- pPrevious->pNext = pNewCurrent;
+ previousWriterHolder.GetRW()->pNext = pNewCurrent;
}
else
{
@@ -530,10 +542,10 @@ HostCodeHeap::TrackAllocation* HostCodeHeap::AllocFromFreeList(size_t header, si
}
// We only need to update the size of the current block if we are creating a new block
- pCurrent->size = realSize;
+ currentWriterHolder.GetRW()->size = realSize;
}
- pCurrent->pHeap = this;
+ currentWriterHolder.GetRW()->pHeap = this;
LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Allocation returned %p, size 0x%X - data -> %p\n", this, pCurrent, pCurrent->size, pPointer));
return pCurrent;
@@ -546,7 +558,7 @@ HostCodeHeap::TrackAllocation* HostCodeHeap::AllocFromFreeList(size_t header, si
return NULL;
}
-void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert)
+void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert, TrackAllocation *pBlockToInsertRW)
{
CONTRACTL
{
@@ -572,10 +584,13 @@ void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert)
if (pCurrent > pBlockToInsert)
{
// found the point of insertion
- pBlockToInsert->pNext = pCurrent;
+ pBlockToInsertRW->pNext = pCurrent;
+ ExecutableWriterHolder<TrackAllocation> previousWriterHolder;
+
if (pPrevious)
{
- pPrevious->pNext = pBlockToInsert;
+ previousWriterHolder = ExecutableWriterHolder<TrackAllocation>(pPrevious, sizeof(TrackAllocation));
+ previousWriterHolder.GetRW()->pNext = pBlockToInsert;
LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] -> [%p, 0x%X] -> [%p, 0x%X]\n", this,
pPrevious, pPrevious->size,
pBlockToInsert, pBlockToInsert->size,
@@ -595,8 +610,8 @@ void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert)
pBlockToInsert, pBlockToInsert->size,
pCurrent, pCurrent->size,
pCurrent->size + pBlockToInsert->size));
- pBlockToInsert->pNext = pCurrent->pNext;
- pBlockToInsert->size += pCurrent->size;
+ pBlockToInsertRW->pNext = pCurrent->pNext;
+ pBlockToInsertRW->size += pCurrent->size;
}
if (pPrevious && (BYTE*)pPrevious + pPrevious->size == (BYTE*)pBlockToInsert)
@@ -606,8 +621,8 @@ void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert)
pPrevious, pPrevious->size,
pBlockToInsert, pBlockToInsert->size,
pPrevious->size + pBlockToInsert->size));
- pPrevious->pNext = pBlockToInsert->pNext;
- pPrevious->size += pBlockToInsert->size;
+ previousWriterHolder.GetRW()->pNext = pBlockToInsert->pNext;
+ previousWriterHolder.GetRW()->size += pBlockToInsert->size;
}
return;
@@ -616,8 +631,10 @@ void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert)
pCurrent = pCurrent->pNext;
}
_ASSERTE(pPrevious && pCurrent == NULL);
- pBlockToInsert->pNext = NULL;
+ pBlockToInsertRW->pNext = NULL;
// last in the list
+ ExecutableWriterHolder<TrackAllocation> previousWriterHolder2(pPrevious, sizeof(TrackAllocation));
+
if ((BYTE*)pPrevious + pPrevious->size == (BYTE*)pBlockToInsert)
{
// coalesce with previous
@@ -625,11 +642,11 @@ void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert)
pPrevious, pPrevious->size,
pBlockToInsert, pBlockToInsert->size,
pPrevious->size + pBlockToInsert->size));
- pPrevious->size += pBlockToInsert->size;
+ previousWriterHolder2.GetRW()->size += pBlockToInsert->size;
}
else
{
- pPrevious->pNext = pBlockToInsert;
+ previousWriterHolder2.GetRW()->pNext = pBlockToInsert;
LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] to end after [%p, 0x%X]\n", this,
pBlockToInsert, pBlockToInsert->size,
pPrevious, pPrevious->size));
@@ -639,7 +656,7 @@ void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert)
}
// first in the list
- pBlockToInsert->pNext = m_pFreeList;
+ pBlockToInsertRW->pNext = m_pFreeList;
m_pFreeList = pBlockToInsert;
LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] to head\n", this,
m_pFreeList, m_pFreeList->size));
@@ -674,7 +691,8 @@ void* HostCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD al
// Pointer to the TrackAllocation record is stored just before the code header
CodeHeader * pHdr = (CodeHeader *)pCode - 1;
- *((TrackAllocation **)(pHdr) - 1) = pTracker;
+ ExecutableWriterHolder<TrackAllocation *> trackerWriterHolder((TrackAllocation **)(pHdr) - 1, sizeof(TrackAllocation *));
+ *trackerWriterHolder.GetRW() = pTracker;
_ASSERTE(pCode + size <= (BYTE*)pTracker + pTracker->size);
@@ -742,10 +760,12 @@ HostCodeHeap::TrackAllocation* HostCodeHeap::AllocMemory_NoThrow(size_t header,
}
TrackAllocation *pBlockToInsert = (TrackAllocation*)(void*)m_pLastAvailableCommittedAddr;
- pBlockToInsert->pNext = NULL;
- pBlockToInsert->size = sizeToCommit;
+ ExecutableWriterHolder<TrackAllocation> blockToInsertWriterHolder(pBlockToInsert, sizeof(TrackAllocation));
+
+ blockToInsertWriterHolder.GetRW()->pNext = NULL;
+ blockToInsertWriterHolder.GetRW()->size = sizeToCommit;
m_pLastAvailableCommittedAddr += sizeToCommit;
- AddToFreeList(pBlockToInsert);
+ AddToFreeList(pBlockToInsert, blockToInsertWriterHolder.GetRW());
pTracker = AllocFromFreeList(header, size, alignment, reserveForJumpStubs);
_ASSERTE(pTracker != NULL);
}
@@ -831,7 +851,8 @@ void HostCodeHeap::FreeMemForCode(void * codeStart)
LIMITED_METHOD_CONTRACT;
TrackAllocation *pTracker = HostCodeHeap::GetTrackAllocation((TADDR)codeStart);
- AddToFreeList(pTracker);
+ ExecutableWriterHolder<TrackAllocation> trackerWriterHolder(pTracker, sizeof(TrackAllocation));
+ AddToFreeList(pTracker, trackerWriterHolder.GetRW());
m_ApproximateLargestBlock += pTracker->size;
diff --git a/src/coreclr/vm/dynamicmethod.h b/src/coreclr/vm/dynamicmethod.h
index c287e31456d..3690d55d41c 100644
--- a/src/coreclr/vm/dynamicmethod.h
+++ b/src/coreclr/vm/dynamicmethod.h
@@ -286,7 +286,7 @@ private:
HostCodeHeap(EEJitManager *pJitManager);
HeapList* InitializeHeapList(CodeHeapRequestInfo *pInfo);
TrackAllocation* AllocFromFreeList(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs);
- void AddToFreeList(TrackAllocation *pBlockToInsert);
+ void AddToFreeList(TrackAllocation *pBlockToInsert, TrackAllocation *pBlockToInsertRW);
TrackAllocation* AllocMemory_NoThrow(size_t header, size_t size, DWORD alignment, size_t reserveForJumpStubs);
diff --git a/src/coreclr/vm/gccover.cpp b/src/coreclr/vm/gccover.cpp
index 9765633eb59..26c07bad1c5 100644
--- a/src/coreclr/vm/gccover.cpp
+++ b/src/coreclr/vm/gccover.cpp
@@ -491,6 +491,8 @@ void GCCoverageInfo::SprinkleBreakpoints(
#if (defined(TARGET_X86) || defined(TARGET_AMD64)) && USE_DISASSEMBLER
BYTE * codeStart = (BYTE *)pCode;
+ ExecutableWriterHolder<BYTE> codeWriterHolder;
+ size_t writeableOffset;
memcpy(saveAddr, codeStart, codeSize);
@@ -499,6 +501,12 @@ void GCCoverageInfo::SprinkleBreakpoints(
{
DWORD oldProtect;
ClrVirtualProtect(codeStart, codeSize, PAGE_EXECUTE_READWRITE, &oldProtect);
+ writeableOffset = 0;
+ }
+ else
+ {
+ codeWriterHolder = ExecutableWriterHolder<BYTE>(codeStart, codeSize);
+ writeableOffset = codeWriterHolder.GetRW() - codeStart;
}
PBYTE cur;
@@ -579,7 +587,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
if(safePointDecoder.IsSafePoint((UINT32)(cur + len - codeStart + regionOffsetAdj)))
#endif
{
- *cur = INTERRUPT_INSTR_CALL; // return value. May need to protect
+ *(cur + writeableOffset) = INTERRUPT_INSTR_CALL; // return value. May need to protect
}
break;
@@ -614,7 +622,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
if (prevDirectCallTargetMD != 0)
{
- ReplaceInstrAfterCall(cur, prevDirectCallTargetMD);
+ ReplaceInstrAfterCall(cur + writeableOffset, prevDirectCallTargetMD);
}
// For fully interruptible code, we end up whacking every instruction
@@ -625,7 +633,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
_ASSERTE(FitsIn<DWORD>(dwRelOffset));
if (codeMan->IsGcSafe(&codeInfo, static_cast<DWORD>(dwRelOffset)))
{
- *cur = INTERRUPT_INSTR;
+ *(cur + writeableOffset) = INTERRUPT_INSTR;
}
#ifdef TARGET_X86
@@ -633,7 +641,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
// our unwinding logic works there.
if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfoToken, NULL))
{
- *cur = INTERRUPT_INSTR;
+ *(cur + writeableOffset) = INTERRUPT_INSTR;
}
#endif
@@ -801,14 +809,21 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
// instruction will not be a call instruction.
//_ASSERTE(instructionIsACallThroughRegister ^ instructionIsACallThroughImmediate);
+#if defined(TARGET_ARM)
+ size_t instrLen = sizeof(WORD);
+#else
+ size_t instrLen = sizeof(DWORD);
+#endif
+
+ ExecutableWriterHolder<BYTE> instrPtrWriterHolder(instrPtr - instrLen, 2 * instrLen);
if(instructionIsACallThroughRegister)
{
// If it is call by register then cannot know MethodDesc so replace the call instruction with illegal instruction
// safe point will be replaced with appropriate illegal instruction at execution time when reg value is known
#if defined(TARGET_ARM)
- *((WORD*)instrPtr - 1) = INTERRUPT_INSTR_CALL;
+ *((WORD*)instrPtrWriterHolder.GetRW()) = INTERRUPT_INSTR_CALL;
#elif defined(TARGET_ARM64)
- *((DWORD*)instrPtr - 1) = INTERRUPT_INSTR_CALL;
+ *((DWORD*)instrPtrWriterHolder.GetRW()) = INTERRUPT_INSTR_CALL;
#endif // _TARGET_XXXX_
}
else if(instructionIsACallThroughImmediate)
@@ -856,7 +871,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID
if (fGcStressOnDirectCalls.val(CLRConfig::INTERNAL_GcStressOnDirectCalls))
{
- ReplaceInstrAfterCall(instrPtr, targetMD);
+ ReplaceInstrAfterCall(instrPtrWriterHolder.GetRW() + instrLen, targetMD);
}
}
}
@@ -910,25 +925,27 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto
// Need to do two iterations if interruptible range spans across hot & cold region
while(acrossHotRegion--)
{
- PBYTE instrPtr = rangeStart;
- while(instrPtr < rangeStop)
+ ExecutableWriterHolder<BYTE> instrPtrWriterHolder(rangeStart, rangeStop - rangeStart);
+ PBYTE instrPtrRW = instrPtrWriterHolder.GetRW();
+ PBYTE rangeStopRW = instrPtrRW + (rangeStop - rangeStart);
+ while(instrPtrRW < rangeStopRW)
{
// The instruction about to be replaced cannot already be a gcstress instruction
- _ASSERTE(!IsGcCoverageInterruptInstruction(instrPtr));
+ _ASSERTE(!IsGcCoverageInterruptInstruction(instrPtrRW));
#if defined(TARGET_ARM)
- size_t instrLen = GetARMInstructionLength(instrPtr);
+ size_t instrLen = GetARMInstructionLength(instrPtrRW);
if (instrLen == 2)
- *((WORD*)instrPtr) = INTERRUPT_INSTR;
+ *((WORD*)instrPtrRW) = INTERRUPT_INSTR;
else
{
- *((DWORD*)instrPtr) = INTERRUPT_INSTR_32;
+ *((DWORD*)instrPtrRW) = INTERRUPT_INSTR_32;
}
- instrPtr += instrLen;
+ instrPtrRW += instrLen;
#elif defined(TARGET_ARM64)
- *((DWORD*)instrPtr) = INTERRUPT_INSTR;
- instrPtr += 4;
+ *((DWORD*)instrPtrRW) = INTERRUPT_INSTR;
+ instrPtrRW += 4;
#endif // TARGET_XXXX_
}
@@ -1242,16 +1259,16 @@ void RemoveGcCoverageInterrupt(TADDR instrPtr, BYTE * savedInstrPtr, GCCoverageI
#if defined(HOST_OSX) && defined(HOST_ARM64)
auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
#endif // defined(HOST_OSX) && defined(HOST_ARM64)
-
+ ExecutableWriterHolder<void> instrPtrWriterHolder((void*)instrPtr, 4);
#ifdef TARGET_ARM
if (GetARMInstructionLength(savedInstrPtr) == 2)
- *(WORD *)instrPtr = *(WORD *)savedInstrPtr;
+ *(WORD *)instrPtrWriterHolder.GetRW() = *(WORD *)savedInstrPtr;
else
- *(DWORD *)instrPtr = *(DWORD *)savedInstrPtr;
+ *(DWORD *)instrPtrWriterHolder.GetRW() = *(DWORD *)savedInstrPtr;
#elif defined(TARGET_ARM64)
- *(DWORD *)instrPtr = *(DWORD *)savedInstrPtr;
+ *(DWORD *)instrPtrWriterHolder.GetRW() = *(DWORD *)savedInstrPtr;
#else
- *(BYTE *)instrPtr = *savedInstrPtr;
+ *(BYTE *)instrPtrWriterHolder.GetRW() = *savedInstrPtr;
#endif
#ifdef TARGET_X86
@@ -1623,6 +1640,7 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion)
PBYTE target = getTargetOfCall((BYTE*) instrPtr, regs, (BYTE**)&nextInstr);
if (target != 0)
{
+ ExecutableWriterHolder<BYTE> nextInstrWriterHolder(nextInstr, sizeof(DWORD));
if (!pThread->PreemptiveGCDisabled())
{
// We are in preemptive mode in JITTed code. This implies that we are into IL stub
@@ -1630,13 +1648,13 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion)
#ifdef TARGET_ARM
size_t instrLen = GetARMInstructionLength(nextInstr);
if (instrLen == 2)
- *(WORD*)nextInstr = INTERRUPT_INSTR;
+ *(WORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR;
else
- *(DWORD*)nextInstr = INTERRUPT_INSTR_32;
+ *(DWORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR_32;
#elif defined(TARGET_ARM64)
- *(DWORD*)nextInstr = INTERRUPT_INSTR;
+ *(DWORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR;
#else
- *nextInstr = INTERRUPT_INSTR;
+ *nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR;
#endif
}
else
@@ -1649,7 +1667,7 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion)
// It could become a problem if 64bit does partially interrupt work.
// OK, we have the MD, mark the instruction after the CALL
// appropriately
- ReplaceInstrAfterCall(nextInstr, targetMD);
+ ReplaceInstrAfterCall(nextInstrWriterHolder.GetRW(), targetMD);
}
}
}
diff --git a/src/coreclr/vm/i386/cgencpu.h b/src/coreclr/vm/i386/cgencpu.h
index 04de3f584ae..6d6ef230b80 100644
--- a/src/coreclr/vm/i386/cgencpu.h
+++ b/src/coreclr/vm/i386/cgencpu.h
@@ -278,17 +278,18 @@ inline INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDe
}
#ifdef FEATURE_COMINTEROP
-inline void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
+inline void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target)
{
WRAPPER_NO_CONTRACT;
- BYTE *pBuffer = (BYTE*)pCOMMethod - COMMETHOD_CALL_PRESTUB_SIZE;
+ BYTE *pBufferRW = (BYTE*)pCOMMethodRW - COMMETHOD_CALL_PRESTUB_SIZE;
+ BYTE *pBufferRX = (BYTE*)pCOMMethodRX - COMMETHOD_CALL_PRESTUB_SIZE;
- pBuffer[0] = X86_INSTR_CALL_REL32; //CALLNEAR32
- *((LPVOID*)(1+pBuffer)) = (LPVOID) (((LPBYTE)target) - (pBuffer+5));
+ pBufferRW[0] = X86_INSTR_CALL_REL32; //CALLNEAR32
+ *((LPVOID*)(1+pBufferRW)) = (LPVOID) (((LPBYTE)target) - (pBufferRX+5));
- _ASSERTE(IS_ALIGNED(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
- *((SSIZE_T*)(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == ((LPBYTE)target - (LPBYTE)pCOMMethod));
+ _ASSERTE(IS_ALIGNED(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
+ *((SSIZE_T*)(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == ((LPBYTE)target - (LPBYTE)pCOMMethodRX));
}
#endif // FEATURE_COMINTEROP
@@ -378,12 +379,12 @@ inline BOOL isCallRegisterIndirect(const BYTE *pRetAddr)
}
//------------------------------------------------------------------------
-inline void emitJump(LPBYTE pBuffer, LPVOID target)
+inline void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
LIMITED_METHOD_CONTRACT;
- pBuffer[0] = X86_INSTR_JMP_REL32; //JUMPNEAR32
- *((LPVOID*)(1+pBuffer)) = (LPVOID) (((LPBYTE)target) - (pBuffer+5));
+ pBufferRW[0] = X86_INSTR_JMP_REL32; //JUMPNEAR32
+ *((LPVOID*)(1+pBufferRW)) = (LPVOID) (((LPBYTE)target) - (pBufferRX+5));
}
//------------------------------------------------------------------------
@@ -420,10 +421,10 @@ inline PCODE decodeJump(PCODE pCode)
//
//------------------------------------------------------------------------
-inline void emitBackToBackJump(LPBYTE pBuffer, LPVOID target)
+inline void emitBackToBackJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
WRAPPER_NO_CONTRACT;
- emitJump(pBuffer, target);
+ emitJump(pBufferRX, pBufferRW, target);
}
//------------------------------------------------------------------------
@@ -457,7 +458,7 @@ struct DECLSPEC_ALIGN(4) UMEntryThunkCode
BYTE m_jmp; //JMP NEAR32
const BYTE * m_execstub; // pointer to destination code // make sure the backpatched portion is dword aligned.
- void Encode(BYTE* pTargetCode, void* pvSecretParam);
+ void Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam);
void Poison();
LPCBYTE GetEntryPoint() const
@@ -620,19 +621,19 @@ private:
#ifndef DACCESS_COMPILE
public:
- CallCountingStubShort(CallCount *remainingCallCountCell, PCODE targetForMethod)
+ CallCountingStubShort(CallCountingStubShort* stubRX, CallCount *remainingCallCountCell, PCODE targetForMethod)
: m_part0{ 0xb8}, // mov eax,
m_remainingCallCountCell(remainingCallCountCell), // <imm32>
m_part1{ 0x66, 0xff, 0x08, // dec word ptr [eax]
0x0f, 0x85}, // jnz
m_rel32TargetForMethod( // <rel32>
GetRelative32BitOffset(
- &m_rel32TargetForMethod,
+ &stubRX->m_rel32TargetForMethod,
targetForMethod)),
m_part2{ 0xe8}, // call
m_rel32TargetForThresholdReached( // <rel32>
GetRelative32BitOffset(
- &m_rel32TargetForThresholdReached,
+ &stubRX->m_rel32TargetForThresholdReached,
TargetForThresholdReached)),
// (eip == stub-identifying token)
m_alignmentPadding{ 0xcc} // int 3
diff --git a/src/coreclr/vm/i386/cgenx86.cpp b/src/coreclr/vm/i386/cgenx86.cpp
index c8a38e2f3b3..6af331cc6a3 100644
--- a/src/coreclr/vm/i386/cgenx86.cpp
+++ b/src/coreclr/vm/i386/cgenx86.cpp
@@ -1199,7 +1199,7 @@ extern "C" DWORD __stdcall xmmYmmStateSupport()
#endif // !TARGET_UNIX
-void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
+void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam)
{
LIMITED_METHOD_CONTRACT;
@@ -1210,19 +1210,22 @@ void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
m_movEAX = X86_INSTR_MOV_EAX_IMM32;
m_uet = pvSecretParam;
m_jmp = X86_INSTR_JMP_REL32;
- m_execstub = (BYTE*) ((pTargetCode) - (4+((BYTE*)&m_execstub)));
+ m_execstub = (BYTE*) ((pTargetCode) - (4+((BYTE*)&pEntryThunkCodeRX->m_execstub)));
- FlushInstructionCache(GetCurrentProcess(),GetEntryPoint(),sizeof(UMEntryThunkCode));
+ FlushInstructionCache(GetCurrentProcess(),pEntryThunkCodeRX->GetEntryPoint(),sizeof(UMEntryThunkCode));
}
void UMEntryThunkCode::Poison()
{
LIMITED_METHOD_CONTRACT;
- m_execstub = (BYTE*) ((BYTE*)UMEntryThunk::ReportViolation - (4+((BYTE*)&m_execstub)));
+ ExecutableWriterHolder<UMEntryThunkCode> thunkWriterHolder(this, sizeof(UMEntryThunkCode));
+ UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW();
+
+ pThisRW->m_execstub = (BYTE*) ((BYTE*)UMEntryThunk::ReportViolation - (4+((BYTE*)&m_execstub)));
// mov ecx, imm32
- m_movEAX = 0xb9;
+ pThisRW->m_movEAX = 0xb9;
ClrFlushInstructionCache(GetEntryPoint(),sizeof(UMEntryThunkCode));
}
@@ -1354,7 +1357,9 @@ EXTERN_C PVOID STDCALL VirtualMethodFixupWorker(Object * pThisPtr, CORCOMPILE_V
*(INT32 *)(&pNewValue[1]) = (INT32) pcRelOffset;
_ASSERTE(IS_ALIGNED(pThunk, sizeof(INT64)));
- FastInterlockCompareExchangeLong((INT64*)pThunk, newValue, oldValue);
+
+ ExecutableWriterHolder<INT64> thunkWriterHolder((INT64*)pThunk, sizeof(INT64));
+ FastInterlockCompareExchangeLong(thunkWriterHolder.GetRW(), newValue, oldValue);
FlushInstructionCache(GetCurrentProcess(), pThunk, 8);
}
@@ -1375,14 +1380,17 @@ EXTERN_C PVOID STDCALL VirtualMethodFixupWorker(Object * pThisPtr, CORCOMPILE_V
#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
SIZE_T cb = size; \
SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
- BYTE * pStart = (BYTE *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * pStartRX = (BYTE *)(void*)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ ExecutableWriterHolder<BYTE> startWriterHolder(pStartRX, cbAligned); \
+ BYTE * pStart = startWriterHolder.GetRW(); \
+ size_t rxOffset = pStartRX - pStart; \
BYTE * p = pStart;
#define END_DYNAMIC_HELPER_EMIT() \
_ASSERTE(pStart + cb == p); \
while (p < pStart + cbAligned) *p++ = X86_INSTR_INT3; \
- ClrFlushInstructionCache(pStart, cbAligned); \
- return (PCODE)pStart
+ ClrFlushInstructionCache(pStartRX, cbAligned); \
+ return (PCODE)pStartRX
PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
@@ -1395,13 +1403,13 @@ PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCOD
p += 4;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target);
p += 4;
END_DYNAMIC_HELPER_EMIT();
}
-void DynamicHelpers::EmitHelperWithArg(BYTE*& p, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
CONTRACTL
{
@@ -1417,7 +1425,7 @@ void DynamicHelpers::EmitHelperWithArg(BYTE*& p, LoaderAllocator * pAllocator, T
p += 4;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target);
p += 4;
}
@@ -1425,7 +1433,7 @@ PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR ar
{
BEGIN_DYNAMIC_HELPER_EMIT(10);
- EmitHelperWithArg(p, pAllocator, arg, target);
+ EmitHelperWithArg(p, rxOffset, pAllocator, arg, target);
END_DYNAMIC_HELPER_EMIT();
}
@@ -1443,7 +1451,7 @@ PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADD
p += 4;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target);
p += 4;
END_DYNAMIC_HELPER_EMIT();
@@ -1461,7 +1469,7 @@ PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR ar
p += 4;
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target);
p += 4;
END_DYNAMIC_HELPER_EMIT();
@@ -1547,9 +1555,9 @@ PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADD
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
#ifdef UNIX_X86_ABI
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, (PCODE)DynamicHelperArgsStub);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), (PCODE)DynamicHelperArgsStub);
#else
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target);
#endif
p += 4;
@@ -1596,9 +1604,9 @@ PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADD
*p++ = X86_INSTR_JMP_REL32; // jmp rel32
#ifdef UNIX_X86_ABI
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, (PCODE)DynamicHelperArgsStub);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), (PCODE)DynamicHelperArgsStub);
#else
- *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)(p + rxOffset), target);
#endif
p += 4;
@@ -1616,9 +1624,10 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule));
GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT);
- pArgs->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
- pArgs->signature = pLookup->signature;
- pArgs->module = (CORINFO_MODULE_HANDLE)pModule;
+ ExecutableWriterHolder<GenericHandleArgs> argsWriterHolder(pArgs, sizeof(GenericHandleArgs));
+ argsWriterHolder.GetRW()->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
+ argsWriterHolder.GetRW()->signature = pLookup->signature;
+ argsWriterHolder.GetRW()->module = (CORINFO_MODULE_HANDLE)pModule;
WORD slotOffset = (WORD)(dictionaryIndexAndSlot & 0xFFFF) * sizeof(Dictionary*);
@@ -1630,7 +1639,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
// ecx contains the generic context parameter
// mov edx,pArgs
// jmp helperAddress
- EmitHelperWithArg(p, pAllocator, (TADDR)pArgs, helperAddress);
+ EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
END_DYNAMIC_HELPER_EMIT();
}
@@ -1707,7 +1716,7 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
// mov edx,pArgs
// jmp helperAddress
- EmitHelperWithArg(p, pAllocator, (TADDR)pArgs, helperAddress);
+ EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
}
}
diff --git a/src/coreclr/vm/i386/stublinkerx86.cpp b/src/coreclr/vm/i386/stublinkerx86.cpp
index 8817bf1cc4f..61c5dfd90cb 100644
--- a/src/coreclr/vm/i386/stublinkerx86.cpp
+++ b/src/coreclr/vm/i386/stublinkerx86.cpp
@@ -145,7 +145,7 @@ class X64NearJumpSetup : public InstructionFormat
}
}
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
if (k8 == refsize)
@@ -158,19 +158,19 @@ class X64NearJumpSetup : public InstructionFormat
}
else if (k64Small == refsize)
{
- UINT64 TargetAddress = (INT64)pOutBuffer + fixedUpReference + GetSizeOfInstruction(refsize, variationCode);
+ UINT64 TargetAddress = (INT64)pOutBufferRX + fixedUpReference + GetSizeOfInstruction(refsize, variationCode);
_ASSERTE(FitsInU4(TargetAddress));
// mov eax, imm32 ; zero-extended
- pOutBuffer[0] = 0xB8;
- *((UINT32*)&pOutBuffer[1]) = (UINT32)TargetAddress;
+ pOutBufferRW[0] = 0xB8;
+ *((UINT32*)&pOutBufferRW[1]) = (UINT32)TargetAddress;
}
else if (k64 == refsize)
{
// mov rax, imm64
- pOutBuffer[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
- pOutBuffer[1] = 0xB8;
- *((UINT64*)&pOutBuffer[2]) = (UINT64)(((INT64)pOutBuffer) + fixedUpReference + GetSizeOfInstruction(refsize, variationCode));
+ pOutBufferRW[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pOutBufferRW[1] = 0xB8;
+ *((UINT64*)&pOutBufferRW[2]) = (UINT64)(((INT64)pOutBufferRX) + fixedUpReference + GetSizeOfInstruction(refsize, variationCode));
}
else
{
@@ -274,32 +274,32 @@ class X64NearJumpExecute : public InstructionFormat
}
}
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
if (k8 == refsize)
{
- pOutBuffer[0] = 0xeb;
- *((__int8*)(pOutBuffer+1)) = (__int8)fixedUpReference;
+ pOutBufferRW[0] = 0xeb;
+ *((__int8*)(pOutBufferRW+1)) = (__int8)fixedUpReference;
}
else if (k32 == refsize)
{
- pOutBuffer[0] = 0xe9;
- *((__int32*)(pOutBuffer+1)) = (__int32)fixedUpReference;
+ pOutBufferRW[0] = 0xe9;
+ *((__int32*)(pOutBufferRW+1)) = (__int32)fixedUpReference;
}
else if (k64Small == refsize)
{
// REX.W jmp rax
- pOutBuffer[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
- pOutBuffer[1] = 0xFF;
- pOutBuffer[2] = 0xE0;
+ pOutBufferRW[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pOutBufferRW[1] = 0xFF;
+ pOutBufferRW[2] = 0xE0;
}
else if (k64 == refsize)
{
// REX.W jmp rax
- pOutBuffer[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
- pOutBuffer[1] = 0xFF;
- pOutBuffer[2] = 0xE0;
+ pOutBufferRW[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pOutBufferRW[1] = 0xFF;
+ pOutBufferRW[2] = 0xE0;
}
else
{
@@ -410,43 +410,43 @@ class X86NearJump : public InstructionFormat
}
}
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
if (k8 == refsize)
{
- pOutBuffer[0] = 0xeb;
- *((__int8*)(pOutBuffer+1)) = (__int8)fixedUpReference;
+ pOutBufferRW[0] = 0xeb;
+ *((__int8*)(pOutBufferRW+1)) = (__int8)fixedUpReference;
}
else if (k32 == refsize)
{
- pOutBuffer[0] = 0xe9;
- *((__int32*)(pOutBuffer+1)) = (__int32)fixedUpReference;
+ pOutBufferRW[0] = 0xe9;
+ *((__int32*)(pOutBufferRW+1)) = (__int32)fixedUpReference;
}
#ifdef TARGET_AMD64
else if (k64Small == refsize)
{
- UINT64 TargetAddress = (INT64)pOutBuffer + fixedUpReference + GetSizeOfInstruction(refsize, variationCode);
+ UINT64 TargetAddress = (INT64)pOutBufferRX + fixedUpReference + GetSizeOfInstruction(refsize, variationCode);
_ASSERTE(FitsInU4(TargetAddress));
// mov eax, imm32 ; zero-extended
- pOutBuffer[0] = 0xB8;
- *((UINT32*)&pOutBuffer[1]) = (UINT32)TargetAddress;
+ pOutBufferRW[0] = 0xB8;
+ *((UINT32*)&pOutBufferRW[1]) = (UINT32)TargetAddress;
// jmp rax
- pOutBuffer[5] = 0xFF;
- pOutBuffer[6] = 0xE0;
+ pOutBufferRW[5] = 0xFF;
+ pOutBufferRW[6] = 0xE0;
}
else if (k64 == refsize)
{
// mov rax, imm64
- pOutBuffer[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
- pOutBuffer[1] = 0xB8;
- *((UINT64*)&pOutBuffer[2]) = (UINT64)(((INT64)pOutBuffer) + fixedUpReference + GetSizeOfInstruction(refsize, variationCode));
+ pOutBufferRW[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pOutBufferRW[1] = 0xB8;
+ *((UINT64*)&pOutBufferRW[2]) = (UINT64)(((INT64)pOutBufferRX) + fixedUpReference + GetSizeOfInstruction(refsize, variationCode));
// jmp rax
- pOutBuffer[10] = 0xFF;
- pOutBuffer[11] = 0xE0;
+ pOutBufferRW[10] = 0xFF;
+ pOutBufferRW[11] = 0xE0;
}
#endif // TARGET_AMD64
else
@@ -544,19 +544,19 @@ class X86CondJump : public InstructionFormat
return (refsize == k8 ? 2 : 6);
}
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
if (refsize == k8)
{
- pOutBuffer[0] = static_cast<BYTE>(0x70 | variationCode);
- *((__int8*)(pOutBuffer+1)) = (__int8)fixedUpReference;
+ pOutBufferRW[0] = static_cast<BYTE>(0x70 | variationCode);
+ *((__int8*)(pOutBufferRW+1)) = (__int8)fixedUpReference;
}
else
{
- pOutBuffer[0] = 0x0f;
- pOutBuffer[1] = static_cast<BYTE>(0x80 | variationCode);
- *((__int32*)(pOutBuffer+2)) = (__int32)fixedUpReference;
+ pOutBufferRW[0] = 0x0f;
+ pOutBufferRW[1] = static_cast<BYTE>(0x80 | variationCode);
+ *((__int32*)(pOutBufferRW+2)) = (__int32)fixedUpReference;
}
}
};
@@ -601,42 +601,42 @@ class X86Call : public InstructionFormat
}
}
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
switch (refsize)
{
case k32:
- pOutBuffer[0] = 0xE8;
- *((__int32*)(1+pOutBuffer)) = (__int32)fixedUpReference;
+ pOutBufferRW[0] = 0xE8;
+ *((__int32*)(1+pOutBufferRW)) = (__int32)fixedUpReference;
break;
#ifdef TARGET_AMD64
case k64Small:
UINT64 TargetAddress;
- TargetAddress = (INT64)pOutBuffer + fixedUpReference + GetSizeOfInstruction(refsize, variationCode);
+ TargetAddress = (INT64)pOutBufferRX + fixedUpReference + GetSizeOfInstruction(refsize, variationCode);
_ASSERTE(FitsInU4(TargetAddress));
// mov eax,<fixedUpReference> ; zero-extends
- pOutBuffer[0] = 0xB8;
- *((UINT32*)&pOutBuffer[1]) = (UINT32)TargetAddress;
+ pOutBufferRW[0] = 0xB8;
+ *((UINT32*)&pOutBufferRW[1]) = (UINT32)TargetAddress;
// call rax
- pOutBuffer[5] = 0xff;
- pOutBuffer[6] = 0xd0;
+ pOutBufferRW[5] = 0xff;
+ pOutBufferRW[6] = 0xd0;
break;
case k64:
// mov rax,<fixedUpReference>
- pOutBuffer[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
- pOutBuffer[1] = 0xB8;
- *((UINT64*)&pOutBuffer[2]) = (UINT64)(((INT64)pOutBuffer) + fixedUpReference + GetSizeOfInstruction(refsize, variationCode));
+ pOutBufferRW[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pOutBufferRW[1] = 0xB8;
+ *((UINT64*)&pOutBufferRW[2]) = (UINT64)(((INT64)pOutBufferRX) + fixedUpReference + GetSizeOfInstruction(refsize, variationCode));
// call rax
- pOutBuffer[10] = 0xff;
- pOutBuffer[11] = 0xd0;
+ pOutBufferRW[10] = 0xff;
+ pOutBufferRW[11] = 0xd0;
break;
#endif // TARGET_AMD64
@@ -720,14 +720,14 @@ class X86PushImm32 : public InstructionFormat
return 5;
}
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT;
- pOutBuffer[0] = 0x68;
+ pOutBufferRW[0] = 0x68;
// only support absolute pushimm32 of the label address. The fixedUpReference is
// the offset to the label from the current point, so add to get address
- *((__int32*)(1+pOutBuffer)) = (__int32)(fixedUpReference);
+ *((__int32*)(1+pOutBufferRW)) = (__int32)(fixedUpReference);
}
};
@@ -790,7 +790,7 @@ class X64LeaRIP : public InstructionFormat
}
}
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT;
@@ -803,12 +803,12 @@ class X64LeaRIP : public InstructionFormat
reg = X86RegFromAMD64Reg(reg);
}
- pOutBuffer[0] = rex;
- pOutBuffer[1] = 0x8D;
- pOutBuffer[2] = 0x05 | (reg << 3);
+ pOutBufferRW[0] = rex;
+ pOutBufferRW[1] = 0x8D;
+ pOutBufferRW[2] = 0x05 | (reg << 3);
// only support absolute pushimm32 of the label address. The fixedUpReference is
// the offset to the label from the current point, so add to get address
- *((__int32*)(3+pOutBuffer)) = (__int32)(fixedUpReference);
+ *((__int32*)(3+pOutBufferRW)) = (__int32)(fixedUpReference);
}
};
@@ -5193,7 +5193,7 @@ void FixupPrecode::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
#ifndef DACCESS_COMPILE
-void rel32SetInterlocked(/*PINT32*/ PVOID pRel32, TADDR target, MethodDesc* pMD)
+void rel32SetInterlocked(/*PINT32*/ PVOID pRel32, /*PINT32*/ PVOID pRel32RW, TADDR target, MethodDesc* pMD)
{
CONTRACTL
{
@@ -5204,11 +5204,11 @@ void rel32SetInterlocked(/*PINT32*/ PVOID pRel32, TADDR target, MethodDesc* pMD)
INT32 targetRel32 = rel32UsingJumpStub((INT32*)pRel32, target, pMD);
- _ASSERTE(IS_ALIGNED(pRel32, sizeof(INT32)));
- FastInterlockExchange((LONG*)pRel32, (LONG)targetRel32);
+ _ASSERTE(IS_ALIGNED(pRel32RW, sizeof(INT32)));
+ FastInterlockExchange((LONG*)pRel32RW, (LONG)targetRel32);
}
-BOOL rel32SetInterlocked(/*PINT32*/ PVOID pRel32, TADDR target, TADDR expected, MethodDesc* pMD)
+BOOL rel32SetInterlocked(/*PINT32*/ PVOID pRel32, /*PINT32*/ PVOID pRel32RW, TADDR target, TADDR expected, MethodDesc* pMD)
{
CONTRACTL
{
@@ -5222,11 +5222,11 @@ BOOL rel32SetInterlocked(/*PINT32*/ PVOID pRel32, TADDR target, TADDR expected,
INT32 targetRel32 = rel32UsingJumpStub((INT32*)pRel32, target, pMD);
- _ASSERTE(IS_ALIGNED(pRel32, sizeof(INT32)));
- return FastInterlockCompareExchange((LONG*)pRel32, (LONG)targetRel32, (LONG)expectedRel32) == (LONG)expectedRel32;
+ _ASSERTE(IS_ALIGNED(pRel32RW, sizeof(INT32)));
+ return FastInterlockCompareExchange((LONG*)pRel32RW, (LONG)targetRel32, (LONG)expectedRel32) == (LONG)expectedRel32;
}
-void StubPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator /* = NULL */,
+void StubPrecode::Init(StubPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator /* = NULL */,
BYTE type /* = StubPrecode::Type */, TADDR target /* = NULL */)
{
WRAPPER_NO_CONTRACT;
@@ -5244,23 +5244,23 @@ void StubPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator /* = N
// that has the same lifetime like as the precode itself
if (target == NULL)
target = GetPreStubEntryPoint();
- m_rel32 = rel32UsingJumpStub(&m_rel32, target, NULL /* pMD */, pLoaderAllocator);
+ m_rel32 = rel32UsingJumpStub(&pPrecodeRX->m_rel32, target, NULL /* pMD */, pLoaderAllocator);
}
}
#ifdef HAS_NDIRECT_IMPORT_PRECODE
-void NDirectImportPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+void NDirectImportPrecode::Init(NDirectImportPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
{
WRAPPER_NO_CONTRACT;
- StubPrecode::Init(pMD, pLoaderAllocator, NDirectImportPrecode::Type, GetEEFuncEntryPoint(NDirectImportThunk));
+ StubPrecode::Init(pPrecodeRX, pMD, pLoaderAllocator, NDirectImportPrecode::Type, GetEEFuncEntryPoint(NDirectImportThunk));
}
#endif // HAS_NDIRECT_IMPORT_PRECODE
#ifdef HAS_FIXUP_PRECODE
-void FixupPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex /*=0*/, int iPrecodeChunkIndex /*=0*/)
+void FixupPrecode::Init(FixupPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex /*=0*/, int iPrecodeChunkIndex /*=0*/)
{
WRAPPER_NO_CONTRACT;
@@ -5292,13 +5292,13 @@ void FixupPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int
#ifdef FIXUP_PRECODE_PREALLOCATE_DYNAMIC_METHOD_JUMP_STUBS
if (pMD->IsLCGMethod())
{
- m_rel32 = rel32UsingPreallocatedJumpStub(&m_rel32, target, GetDynamicMethodPrecodeFixupJumpStub(), false /* emitJump */);
+ m_rel32 = rel32UsingPreallocatedJumpStub(&pPrecodeRX->m_rel32, target, pPrecodeRX->GetDynamicMethodPrecodeFixupJumpStub(), GetDynamicMethodPrecodeFixupJumpStub(), false /* emitJump */);
return;
}
#endif // FIXUP_PRECODE_PREALLOCATE_DYNAMIC_METHOD_JUMP_STUBS
if (pLoaderAllocator != NULL)
{
- m_rel32 = rel32UsingJumpStub(&m_rel32, target, NULL /* pMD */, pLoaderAllocator);
+ m_rel32 = rel32UsingJumpStub(&pPrecodeRX->m_rel32, target, NULL /* pMD */, pLoaderAllocator);
}
}
@@ -5328,7 +5328,9 @@ void FixupPrecode::ResetTargetInterlocked()
newValue.m_rel32 = rel32UsingJumpStub(&m_rel32, target, pMD);
_ASSERTE(IS_ALIGNED(this, sizeof(INT64)));
- FastInterlockExchangeLong((INT64*)this, *(INT64*)&newValue);
+
+ ExecutableWriterHolder<FixupPrecode> precodeWriterHolder(this, sizeof(FixupPrecode));
+ FastInterlockExchangeLong((INT64*)precodeWriterHolder.GetRW(), *(INT64*)&newValue);
}
BOOL FixupPrecode::SetTargetInterlocked(TADDR target, TADDR expected)
@@ -5376,15 +5378,24 @@ BOOL FixupPrecode::SetTargetInterlocked(TADDR target, TADDR expected)
return FALSE;
}
+#ifdef FIXUP_PRECODE_PREALLOCATE_DYNAMIC_METHOD_JUMP_STUBS
+ ExecutableWriterHolder<void> dynamicMethodEntryJumpStubWriterHolder;
+ if (pMD->IsLCGMethod())
+ {
+ dynamicMethodEntryJumpStubWriterHolder = ExecutableWriterHolder<void>((void*)GetDynamicMethodEntryJumpStub(), 12);
+ }
+#endif
*(INT32*)(&pNewValue[offsetof(FixupPrecode, m_rel32)]) =
#ifdef FIXUP_PRECODE_PREALLOCATE_DYNAMIC_METHOD_JUMP_STUBS
pMD->IsLCGMethod() ?
- rel32UsingPreallocatedJumpStub(&m_rel32, target, GetDynamicMethodEntryJumpStub(), true /* emitJump */) :
+ rel32UsingPreallocatedJumpStub(&m_rel32, target, GetDynamicMethodEntryJumpStub(), (PCODE)dynamicMethodEntryJumpStubWriterHolder.GetRW(), true /* emitJump */) :
#endif // FIXUP_PRECODE_PREALLOCATE_DYNAMIC_METHOD_JUMP_STUBS
rel32UsingJumpStub(&m_rel32, target, pMD);
_ASSERTE(IS_ALIGNED(this, sizeof(INT64)));
- return FastInterlockCompareExchangeLong((INT64*) this, newValue, oldValue) == oldValue;
+
+ ExecutableWriterHolder<FixupPrecode> precodeWriterHolder(this, sizeof(FixupPrecode));
+ return FastInterlockCompareExchangeLong((INT64*)precodeWriterHolder.GetRW(), newValue, oldValue) == oldValue;
}
#ifdef FEATURE_NATIVE_IMAGE_GENERATION
@@ -5488,7 +5499,9 @@ BOOL ThisPtrRetBufPrecode::SetTargetInterlocked(TADDR target, TADDR expected)
INT32 newRel32 = rel32UsingJumpStub(&m_rel32, target, NULL /* pMD */, ((MethodDesc *)GetMethodDesc())->GetLoaderAllocator());
_ASSERTE(IS_ALIGNED(&m_rel32, sizeof(INT32)));
- FastInterlockExchange((LONG *)&m_rel32, (LONG)newRel32);
+ ExecutableWriterHolder<INT32> rel32WriterHolder(&m_rel32, sizeof(INT32));
+ FastInterlockExchange((LONG*)rel32WriterHolder.GetRW(), (LONG)newRel32);
+
return TRUE;
}
#endif // !DACCESS_COMPILE
diff --git a/src/coreclr/vm/i386/stublinkerx86.h b/src/coreclr/vm/i386/stublinkerx86.h
index e8730c2725b..af5244d0771 100644
--- a/src/coreclr/vm/i386/stublinkerx86.h
+++ b/src/coreclr/vm/i386/stublinkerx86.h
@@ -457,8 +457,8 @@ inline TADDR rel32Decode(/*PTR_INT32*/ TADDR pRel32)
return pRel32 + 4 + *PTR_INT32(pRel32);
}
-void rel32SetInterlocked(/*PINT32*/ PVOID pRel32, TADDR target, MethodDesc* pMD);
-BOOL rel32SetInterlocked(/*PINT32*/ PVOID pRel32, TADDR target, TADDR expected, MethodDesc* pMD);
+void rel32SetInterlocked(/*PINT32*/ PVOID pRel32, /*PINT32*/ PVOID pRel32RW, TADDR target, MethodDesc* pMD);
+BOOL rel32SetInterlocked(/*PINT32*/ PVOID pRel32, /*PINT32*/ PVOID pRel32RW, TADDR target, TADDR expected, MethodDesc* pMD);
//------------------------------------------------------------------------
//
@@ -521,7 +521,7 @@ struct StubPrecode {
BYTE m_jmp;
INT32 m_rel32;
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator = NULL, BYTE type = StubPrecode::Type, TADDR target = NULL);
+ void Init(StubPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator = NULL, BYTE type = StubPrecode::Type, TADDR target = NULL);
TADDR GetMethodDesc()
{
@@ -546,7 +546,8 @@ struct StubPrecode {
}
CONTRACTL_END;
- rel32SetInterlocked(&m_rel32, GetPreStubEntryPoint(), (MethodDesc*)GetMethodDesc());
+ ExecutableWriterHolder<INT32> rel32WriterHolder(&m_rel32, sizeof(INT32));
+ rel32SetInterlocked(&m_rel32, rel32WriterHolder.GetRW(), GetPreStubEntryPoint(), (MethodDesc*)GetMethodDesc());
}
BOOL SetTargetInterlocked(TADDR target, TADDR expected)
@@ -558,7 +559,8 @@ struct StubPrecode {
}
CONTRACTL_END;
- return rel32SetInterlocked(&m_rel32, target, expected, (MethodDesc*)GetMethodDesc());
+ ExecutableWriterHolder<void> rel32Holder(&m_rel32, 4);
+ return rel32SetInterlocked(&m_rel32, rel32Holder.GetRW(), target, expected, (MethodDesc*)GetMethodDesc());
}
};
IN_TARGET_64BIT(static_assert_no_msg(offsetof(StubPrecode, m_movR10) == OFFSETOF_PRECODE_TYPE);)
@@ -586,7 +588,7 @@ struct NDirectImportPrecode : StubPrecode {
// jmp NDirectImportThunk
#endif // HOST_64BIT
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+ void Init(NDirectImportPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
LPVOID GetEntrypoint()
{
@@ -633,7 +635,7 @@ struct FixupPrecode {
TADDR m_pMethodDesc;
#endif
- void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
+ void Init(FixupPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
#ifdef HAS_FIXUP_PRECODE_CHUNKS
TADDR GetBase()
diff --git a/src/coreclr/vm/i386/virtualcallstubcpu.hpp b/src/coreclr/vm/i386/virtualcallstubcpu.hpp
index 38680e402d6..c6515abfaed 100644
--- a/src/coreclr/vm/i386/virtualcallstubcpu.hpp
+++ b/src/coreclr/vm/i386/virtualcallstubcpu.hpp
@@ -84,7 +84,7 @@ struct LookupHolder
{
static void InitializeStatic();
- void Initialize(PCODE resolveWorkerTarget, size_t dispatchToken);
+ void Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken);
LookupStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
@@ -198,7 +198,7 @@ struct DispatchHolder
{
static void InitializeStatic();
- void Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT);
+ void Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT);
DispatchStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
@@ -347,7 +347,8 @@ struct ResolveHolder
{
static void InitializeStatic();
- void Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ void Initialize(ResolveHolder* pResolveHolderRX,
+ PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32 * counterAddr
#ifndef UNIX_X86_ABI
@@ -741,14 +742,14 @@ void LookupHolder::InitializeStatic()
lookupInit._resolveWorkerDispl = 0xcccccccc;
}
-void LookupHolder::Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
+void LookupHolder::Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken)
{
_stub = lookupInit;
//fill in the stub specific fields
//@TODO: Get rid of this duplication of data.
_stub._token = dispatchToken;
- _stub._resolveWorkerDispl = resolveWorkerTarget - ((PCODE) &_stub._resolveWorkerDispl + sizeof(DISPL));
+ _stub._resolveWorkerDispl = resolveWorkerTarget - ((PCODE) &pLookupHolderRX->_stub._resolveWorkerDispl + sizeof(DISPL));
}
LookupHolder* LookupHolder::FromLookupEntry(PCODE lookupEntry)
@@ -811,14 +812,14 @@ void DispatchHolder::InitializeStatic()
#endif //STUB_LOGGING
};
-void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT)
+void DispatchHolder::Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT)
{
_stub = dispatchInit;
//fill in the stub specific fields
_stub._expectedMT = (size_t) expectedMT;
- _stub._failDispl = failTarget - ((PCODE) &_stub._failDispl + sizeof(DISPL));
- _stub._implDispl = implTarget - ((PCODE) &_stub._implDispl + sizeof(DISPL));
+ _stub._failDispl = failTarget - ((PCODE) &pDispatchHolderRX->_stub._failDispl + sizeof(DISPL));
+ _stub._implDispl = implTarget - ((PCODE) &pDispatchHolderRX->_stub._implDispl + sizeof(DISPL));
}
DispatchHolder* DispatchHolder::FromDispatchEntry(PCODE dispatchEntry)
@@ -943,7 +944,8 @@ void ResolveHolder::InitializeStatic()
resolveInit.toResolveStub = (offsetof(ResolveStub, _resolveEntryPoint) - (offsetof(ResolveStub, toResolveStub) + 1)) & 0xFF;
};
-void ResolveHolder::Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+void ResolveHolder::Initialize(ResolveHolder* pResolveHolderRX,
+ PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32 * counterAddr
#ifndef UNIX_X86_ABI
@@ -960,8 +962,8 @@ void ResolveHolder::Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
_stub._token = dispatchToken;
// _stub._hashedTokenMov = hashedToken;
_stub._tokenPush = dispatchToken;
- _stub._resolveWorkerDispl = resolveWorkerTarget - ((PCODE) &_stub._resolveWorkerDispl + sizeof(DISPL));
- _stub._backpatcherDispl = patcherTarget - ((PCODE) &_stub._backpatcherDispl + sizeof(DISPL));
+ _stub._resolveWorkerDispl = resolveWorkerTarget - ((PCODE) &pResolveHolderRX->_stub._resolveWorkerDispl + sizeof(DISPL));
+ _stub._backpatcherDispl = patcherTarget - ((PCODE) &pResolveHolderRX->_stub._backpatcherDispl + sizeof(DISPL));
#ifndef UNIX_X86_ABI
_stub._stackArgumentsSize = stackArgumentsSize;
#endif
diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp
index c5363172f79..4be62db4c67 100644
--- a/src/coreclr/vm/jitinterface.cpp
+++ b/src/coreclr/vm/jitinterface.cpp
@@ -11241,7 +11241,8 @@ void CEEJitInfo::WriteCode(EEJitManager * jitMgr)
if (m_CodeHeaderRW != m_CodeHeader)
{
- memcpy(m_CodeHeader, m_CodeHeaderRW, m_codeWriteBufferSize);
+ ExecutableWriterHolder<void> codeWriterHolder((void *)m_CodeHeader, m_codeWriteBufferSize);
+ memcpy(codeWriterHolder.GetRW(), m_CodeHeaderRW, m_codeWriteBufferSize);
}
// Now that the code header was written to the final location, publish the code via the nibble map
@@ -13462,7 +13463,7 @@ void Module::LoadHelperTable()
*curEntry = X86_INSTR_JMP_REL32;
*(INT32 *)(curEntry + 1) = rel32UsingJumpStub((INT32 *)(curEntry + 1), pfnHelper, NULL, GetLoaderAllocator());
#else // all other platforms
- emitJump(curEntry, (LPVOID)pfnHelper);
+ emitJump(curEntry, curEntry, (LPVOID)pfnHelper);
_ASSERTE(HELPER_TABLE_ENTRY_LEN >= JUMP_ALLOCATE_SIZE);
#endif
diff --git a/src/coreclr/vm/jitinterface.h b/src/coreclr/vm/jitinterface.h
index eda60f61cc6..29e566a7342 100644
--- a/src/coreclr/vm/jitinterface.h
+++ b/src/coreclr/vm/jitinterface.h
@@ -961,8 +961,8 @@ protected :
EEJitManager* m_jitManager; // responsible for allocating memory
- CodeHeader* m_CodeHeader; // descriptor for JITTED code
- CodeHeader* m_CodeHeaderRW;
+ CodeHeader* m_CodeHeader; // descriptor for JITTED code - read/execute address
+ CodeHeader* m_CodeHeaderRW; // descriptor for JITTED code - code write scratch buffer address
size_t m_codeWriteBufferSize;
#ifdef USE_INDIRECT_CODEHEADER
BYTE* m_pRealCodeHeader;
diff --git a/src/coreclr/vm/method.cpp b/src/coreclr/vm/method.cpp
index 5f19b8ce3f4..bd3984d8697 100644
--- a/src/coreclr/vm/method.cpp
+++ b/src/coreclr/vm/method.cpp
@@ -4168,10 +4168,10 @@ static const struct CentralJumpCode {
BYTE m_jmp[1];
INT32 m_rel32;
- inline void Setup(MethodDesc* pMD, PCODE target, LoaderAllocator *pLoaderAllocator) {
+ inline void Setup(CentralJumpCode* pCodeRX, MethodDesc* pMD, PCODE target, LoaderAllocator *pLoaderAllocator) {
WRAPPER_NO_CONTRACT;
m_pBaseMD = pMD;
- m_rel32 = rel32UsingJumpStub(&m_rel32, target, pMD, pLoaderAllocator);
+ m_rel32 = rel32UsingJumpStub(&pCodeRX->m_rel32, target, pMD, pLoaderAllocator);
}
inline BOOL CheckTarget(TADDR target) {
@@ -4200,10 +4200,10 @@ static const struct CentralJumpCode {
BYTE m_jmp[1];
INT32 m_rel32;
- inline void Setup(MethodDesc* pMD, PCODE target, LoaderAllocator *pLoaderAllocator) {
+ inline void Setup(CentralJumpCode* pCodeRX, MethodDesc* pMD, PCODE target, LoaderAllocator *pLoaderAllocator) {
WRAPPER_NO_CONTRACT;
m_pBaseMD = pMD;
- m_rel32 = rel32UsingJumpStub(&m_rel32, target, pMD, pLoaderAllocator);
+ m_rel32 = rel32UsingJumpStub(&pCodeRX->m_rel32, target, pMD, pLoaderAllocator);
}
inline BOOL CheckTarget(TADDR target) {
@@ -4488,15 +4488,17 @@ TADDR MethodDescChunk::AllocateCompactEntryPoints(LoaderAllocator *pLoaderAlloca
SIZE_T size = SizeOfCompactEntryPoints(count);
TADDR temporaryEntryPoints = (TADDR)pamTracker->Track(pLoaderAllocator->GetPrecodeHeap()->AllocAlignedMem(size, sizeof(TADDR)));
+ ExecutableWriterHolder<void> temporaryEntryPointsWriterHolder((void *)temporaryEntryPoints, size);
+ size_t rxOffset = temporaryEntryPoints - (TADDR)temporaryEntryPointsWriterHolder.GetRW();
#ifdef TARGET_ARM
- BYTE* p = (BYTE*)temporaryEntryPoints + COMPACT_ENTRY_ARM_CODE;
+ BYTE* p = (BYTE*)temporaryEntryPointsWriterHolder.GetRW() + COMPACT_ENTRY_ARM_CODE;
int relOffset = count * TEP_ENTRY_SIZE - TEP_ENTRY_SIZE; // relative offset for the short jump
_ASSERTE (relOffset < MAX_OFFSET_UNCONDITIONAL_BRANCH_THUMB);
#else // TARGET_ARM
// make the temporary entrypoints unaligned, so they are easy to identify
- BYTE* p = (BYTE*)temporaryEntryPoints + 1;
+ BYTE* p = (BYTE*)temporaryEntryPointsWriterHolder.GetRW() + 1;
int indexInBlock = TEP_MAX_BLOCK_INDEX; // recompute relOffset in first iteration
int relOffset = 0; // relative offset for the short jump
#endif // TARGET_ARM
@@ -4541,10 +4543,11 @@ TADDR MethodDescChunk::AllocateCompactEntryPoints(LoaderAllocator *pLoaderAlloca
if (relOffset == 0)
{
CentralJumpCode* pCode = (CentralJumpCode*)p;
+ CentralJumpCode* pCodeRX = (CentralJumpCode*)(p + rxOffset);
memcpy(pCode, &c_CentralJumpCode, TEP_CENTRAL_JUMP_SIZE);
- pCode->Setup(pBaseMD, GetPreStubEntryPoint(), pLoaderAllocator);
+ pCode->Setup(pCodeRX, pBaseMD, GetPreStubEntryPoint(), pLoaderAllocator);
p += TEP_CENTRAL_JUMP_SIZE;
@@ -4565,11 +4568,11 @@ TADDR MethodDescChunk::AllocateCompactEntryPoints(LoaderAllocator *pLoaderAlloca
memcpy(pCode, &c_CentralJumpCode, TEP_CENTRAL_JUMP_SIZE);
pCode->Setup (GetPreStubCompactARMEntryPoint(), this);
- _ASSERTE(p + TEP_CENTRAL_JUMP_SIZE == (BYTE*)temporaryEntryPoints + size);
+ _ASSERTE(p + TEP_CENTRAL_JUMP_SIZE == (BYTE*)temporaryEntryPointsWriterHolder.GetRW() + size);
#else // TARGET_ARM
- _ASSERTE(p == (BYTE*)temporaryEntryPoints + size);
+ _ASSERTE(p == (BYTE*)temporaryEntryPointsWriterHolder.GetRW() + size);
#endif // TARGET_ARM
diff --git a/src/coreclr/vm/methoddescbackpatchinfo.cpp b/src/coreclr/vm/methoddescbackpatchinfo.cpp
index 000b56dbc5b..5a6bd15ae0c 100644
--- a/src/coreclr/vm/methoddescbackpatchinfo.cpp
+++ b/src/coreclr/vm/methoddescbackpatchinfo.cpp
@@ -43,15 +43,21 @@ void EntryPointSlots::Backpatch_Locked(TADDR slot, SlotType slotType, PCODE entr
break;
case SlotType_Executable:
- *(PCODE *)slot = entryPoint;
+ {
+ ExecutableWriterHolder<void> slotWriterHolder((void*)slot, sizeof(PCODE*));
+ *(PCODE *)slotWriterHolder.GetRW() = entryPoint;
goto Flush;
+ }
case SlotType_ExecutableRel32:
+ {
// A rel32 may require a jump stub on some architectures, and is currently not supported
_ASSERTE(sizeof(void *) <= 4);
- *(PCODE *)slot = entryPoint - ((PCODE)slot + sizeof(PCODE));
+ ExecutableWriterHolder<void> slotWriterHolder((void*)slot, sizeof(PCODE*));
+ *(PCODE *)slotWriterHolder.GetRW() = entryPoint - ((PCODE)slot + sizeof(PCODE));
// fall through
+ }
Flush:
ClrFlushInstructionCache((LPCVOID)slot, sizeof(PCODE));
diff --git a/src/coreclr/vm/precode.cpp b/src/coreclr/vm/precode.cpp
index 2ef201d6296..718b4cb1db2 100644
--- a/src/coreclr/vm/precode.cpp
+++ b/src/coreclr/vm/precode.cpp
@@ -360,7 +360,8 @@ Precode* Precode::Allocate(PrecodeType t, MethodDesc* pMD,
}
Precode* pPrecode = (Precode*)pamTracker->Track(pLoaderAllocator->GetPrecodeHeap()->AllocAlignedMem(size, AlignOf(t)));
- pPrecode->Init(t, pMD, pLoaderAllocator);
+ ExecutableWriterHolder<Precode> precodeWriterHolder(pPrecode, size);
+ precodeWriterHolder.GetRW()->Init(pPrecode, t, pMD, pLoaderAllocator);
#ifndef CROSSGEN_COMPILE
ClrFlushInstructionCache(pPrecode, size);
@@ -369,22 +370,22 @@ Precode* Precode::Allocate(PrecodeType t, MethodDesc* pMD,
return pPrecode;
}
-void Precode::Init(PrecodeType t, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+void Precode::Init(Precode* pPrecodeRX, PrecodeType t, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
{
LIMITED_METHOD_CONTRACT;
switch (t) {
case PRECODE_STUB:
- ((StubPrecode*)this)->Init(pMD, pLoaderAllocator);
+ ((StubPrecode*)this)->Init((StubPrecode*)pPrecodeRX, pMD, pLoaderAllocator);
break;
#ifdef HAS_NDIRECT_IMPORT_PRECODE
case PRECODE_NDIRECT_IMPORT:
- ((NDirectImportPrecode*)this)->Init(pMD, pLoaderAllocator);
+ ((NDirectImportPrecode*)this)->Init((NDirectImportPrecode*)pPrecodeRX, pMD, pLoaderAllocator);
break;
#endif // HAS_NDIRECT_IMPORT_PRECODE
#ifdef HAS_FIXUP_PRECODE
case PRECODE_FIXUP:
- ((FixupPrecode*)this)->Init(pMD, pLoaderAllocator);
+ ((FixupPrecode*)this)->Init((FixupPrecode*)pPrecodeRX, pMD, pLoaderAllocator);
break;
#endif // HAS_FIXUP_PRECODE
#ifdef HAS_THISPTR_RETBUF_PRECODE
@@ -482,7 +483,21 @@ void Precode::Reset()
WRAPPER_NO_CONTRACT;
MethodDesc* pMD = GetMethodDesc();
- Init(GetType(), pMD, pMD->GetLoaderAllocator());
+ SIZE_T size;
+ PrecodeType t = GetType();
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ if (t == PRECODE_FIXUP)
+ {
+ size = sizeof(FixupPrecode) + sizeof(PTR_MethodDesc);
+ }
+ else
+#endif
+ {
+ size = Precode::SizeOf(t);
+ }
+
+ ExecutableWriterHolder<Precode> precodeWriterHolder(this, size);
+ precodeWriterHolder.GetRW()->Init(this, GetType(), pMD, pMD->GetLoaderAllocator());
ClrFlushInstructionCache(this, SizeOf());
}
@@ -566,28 +581,34 @@ TADDR Precode::AllocateTemporaryEntryPoints(MethodDescChunk * pChunk,
#endif
TADDR temporaryEntryPoints = (TADDR)pamTracker->Track(pLoaderAllocator->GetPrecodeHeap()->AllocAlignedMem(totalSize, AlignOf(t)));
+ ExecutableWriterHolder<void> entryPointsWriterHolder((void*)temporaryEntryPoints, totalSize);
#ifdef HAS_FIXUP_PRECODE_CHUNKS
if (t == PRECODE_FIXUP)
{
#ifdef FIXUP_PRECODE_PREALLOCATE_DYNAMIC_METHOD_JUMP_STUBS
+ PCODE precodeFixupJumpStubRW = NULL;
PCODE precodeFixupJumpStub = NULL;
if (preallocateJumpStubs)
{
// Emit the jump for the precode fixup jump stub now. This jump stub immediately follows the MethodDesc (see
// GetDynamicMethodPrecodeFixupJumpStub()).
precodeFixupJumpStub = temporaryEntryPoints + count * sizeof(FixupPrecode) + sizeof(PTR_MethodDesc);
+ // TODO: how to get the size?
#ifndef CROSSGEN_COMPILE
- emitBackToBackJump((LPBYTE)precodeFixupJumpStub, (LPVOID)GetEEFuncEntryPoint(PrecodeFixupThunk));
+ precodeFixupJumpStubRW = (TADDR)entryPointsWriterHolder.GetRW() + count * sizeof(FixupPrecode) + sizeof(PTR_MethodDesc);
+ emitBackToBackJump((BYTE*)precodeFixupJumpStub, (BYTE*)precodeFixupJumpStubRW, (LPVOID)GetEEFuncEntryPoint(PrecodeFixupThunk));
#endif // !CROSSGEN_COMPILE
}
#endif // FIXUP_PRECODE_PREALLOCATE_DYNAMIC_METHOD_JUMP_STUBS
TADDR entryPoint = temporaryEntryPoints;
+ TADDR entryPointRW = (TADDR)entryPointsWriterHolder.GetRW();
+
MethodDesc * pMD = pChunk->GetFirstMethodDesc();
for (int i = 0; i < count; i++)
{
- ((FixupPrecode *)entryPoint)->Init(pMD, pLoaderAllocator, pMD->GetMethodDescIndex(), (count - 1) - i);
+ ((FixupPrecode *)entryPointRW)->Init((FixupPrecode*)entryPoint, pMD, pLoaderAllocator, pMD->GetMethodDescIndex(), (count - 1) - i);
#ifdef FIXUP_PRECODE_PREALLOCATE_DYNAMIC_METHOD_JUMP_STUBS
_ASSERTE(
@@ -598,6 +619,7 @@ TADDR Precode::AllocateTemporaryEntryPoints(MethodDescChunk * pChunk,
_ASSERTE((Precode *)entryPoint == GetPrecodeForTemporaryEntryPoint(temporaryEntryPoints, i));
entryPoint += sizeof(FixupPrecode);
+ entryPointRW += sizeof(FixupPrecode);
pMD = (MethodDesc *)(dac_cast<TADDR>(pMD) + pMD->SizeOf());
}
@@ -613,13 +635,15 @@ TADDR Precode::AllocateTemporaryEntryPoints(MethodDescChunk * pChunk,
SIZE_T oneSize = SizeOfTemporaryEntryPoint(t);
TADDR entryPoint = temporaryEntryPoints;
+ TADDR entryPointRW = (TADDR)entryPointsWriterHolder.GetRW();
MethodDesc * pMD = pChunk->GetFirstMethodDesc();
for (int i = 0; i < count; i++)
{
- ((Precode *)entryPoint)->Init(t, pMD, pLoaderAllocator);
+ ((Precode *)entryPointRW)->Init((Precode *)entryPoint, t, pMD, pLoaderAllocator);
_ASSERTE((Precode *)entryPoint == GetPrecodeForTemporaryEntryPoint(temporaryEntryPoints, i));
entryPoint += oneSize;
+ entryPointRW += oneSize;
pMD = (MethodDesc *)(dac_cast<TADDR>(pMD) + pMD->SizeOf());
}
@@ -763,7 +787,7 @@ void Precode::SaveChunk::Save(DataImage* image, MethodDesc * pMD)
SIZE_T size = Precode::SizeOf(precodeType);
Precode* pPrecode = (Precode *)new (image->GetHeap()) BYTE[size];
- pPrecode->Init(precodeType, pMD, NULL);
+ pPrecode->Init(pPrecode, precodeType, pMD, NULL);
pPrecode->Save(image);
// Alias the temporary entrypoint
diff --git a/src/coreclr/vm/precode.h b/src/coreclr/vm/precode.h
index 99220f3b966..6ebb02fc8e4 100644
--- a/src/coreclr/vm/precode.h
+++ b/src/coreclr/vm/precode.h
@@ -238,7 +238,7 @@ public:
static Precode* Allocate(PrecodeType t, MethodDesc* pMD,
LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker);
- void Init(PrecodeType t, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+ void Init(Precode* pPrecodeRX, PrecodeType t, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
#ifndef DACCESS_COMPILE
void ResetTargetInterlocked();
diff --git a/src/coreclr/vm/prestub.cpp b/src/coreclr/vm/prestub.cpp
index d4c347cef2f..41c83b06c8c 100644
--- a/src/coreclr/vm/prestub.cpp
+++ b/src/coreclr/vm/prestub.cpp
@@ -2334,7 +2334,16 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT, CallerGCMode callerGCMo
{
if (!GetOrCreatePrecode()->SetTargetInterlocked(pStub->GetEntryPoint()))
{
- pStub->DecRef();
+ if (pStub->HasExternalEntryPoint())
+ {
+ // Stubs with external entry point are allocated from regular heap and so they are always writeable
+ pStub->DecRef();
+ }
+ else
+ {
+ ExecutableWriterHolder<Stub> stubWriterHolder(pStub, sizeof(Stub));
+ stubWriterHolder.GetRW()->DecRef();
+ }
}
else if (pStub->HasExternalEntryPoint())
{
@@ -2455,7 +2464,8 @@ static PCODE PatchNonVirtualExternalMethod(MethodDesc * pMD, PCODE pCode, PTR_CO
*(INT32 *)(pNewValue+1) = rel32UsingJumpStub((INT32*)(&pThunk->callJmp[1]), pCode, pMD, NULL);
_ASSERTE(IS_ALIGNED((size_t)pThunk, sizeof(INT64)));
- FastInterlockCompareExchangeLong((INT64*)pThunk, newValue, oldValue);
+ ExecutableWriterHolder<INT64> thunkWriterHolder((INT64*)pThunk, sizeof(INT64));
+ FastInterlockCompareExchangeLong(thunkWriterHolder.GetRW(), newValue, oldValue);
FlushInstructionCache(GetCurrentProcess(), pThunk, 8);
}
diff --git a/src/coreclr/vm/readytoruninfo.h b/src/coreclr/vm/readytoruninfo.h
index fcdfc7667a5..acc909ab5dd 100644
--- a/src/coreclr/vm/readytoruninfo.h
+++ b/src/coreclr/vm/readytoruninfo.h
@@ -228,7 +228,7 @@ private:
class DynamicHelpers
{
private:
- static void EmitHelperWithArg(BYTE*& pCode, LoaderAllocator * pAllocator, TADDR arg, PCODE target);
+ static void EmitHelperWithArg(BYTE*& pCode, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target);
public:
static PCODE CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target);
static PCODE CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target);
diff --git a/src/coreclr/vm/stubcache.cpp b/src/coreclr/vm/stubcache.cpp
index e70c9ac3f99..8a415001ef1 100644
--- a/src/coreclr/vm/stubcache.cpp
+++ b/src/coreclr/vm/stubcache.cpp
@@ -59,7 +59,8 @@ StubCacheBase::~StubCacheBase()
while (phe)
{
_ASSERTE(NULL != phe->m_pStub);
- phe->m_pStub->DecRef();
+ ExecutableWriterHolder<Stub> stubWriterHolder(phe->m_pStub, sizeof(Stub));
+ stubWriterHolder.GetRW()->DecRef();
phe = (STUBHASHENTRY*)GetNext((BYTE*)phe);
}
}
@@ -95,8 +96,9 @@ Stub *StubCacheBase::Canonicalize(const BYTE * pRawStub)
StubHolder<Stub> pstub;
pstub = phe->m_pStub;
+ ExecutableWriterHolder<Stub> stubWriterHolder(pstub, sizeof(Stub));
// IncRef as we're returning a reference to our caller.
- pstub->IncRef();
+ stubWriterHolder.GetRW()->IncRef();
pstub.SuppressRelease();
RETURN pstub;
@@ -149,7 +151,8 @@ Stub *StubCacheBase::Canonicalize(const BYTE * pRawStub)
pstub = phe->m_pStub;
}
// IncRef so that caller has firm ownership of stub.
- pstub->IncRef();
+ ExecutableWriterHolder<Stub> stubWriterHolder(pstub, sizeof(Stub));
+ stubWriterHolder.GetRW()->IncRef();
}
}
diff --git a/src/coreclr/vm/stublink.cpp b/src/coreclr/vm/stublink.cpp
index b7b3cfdf182..04a33e39826 100644
--- a/src/coreclr/vm/stublink.cpp
+++ b/src/coreclr/vm/stublink.cpp
@@ -308,7 +308,8 @@ public:
{
ReservationList *pNext = pList->pNext;
- pList->GetStub()->DecRef();
+ ExecutableWriterHolder<Stub> stubWriterHolder(pList->GetStub(), sizeof(Stub));
+ stubWriterHolder.GetRW()->DecRef();
pList = pNext;
}
@@ -320,7 +321,8 @@ public:
ReservationList *pList = ReservationList::FromStub(pStub);
- pList->pNext = m_pList;
+ ExecutableWriterHolder<ReservationList> listWriterHolder(pList, sizeof(ReservationList));
+ listWriterHolder.GetRW()->pNext = m_pList;
m_pList = pList;
}
};
@@ -844,7 +846,7 @@ Stub *StubLinker::Link(LoaderHeap *pHeap, DWORD flags)
);
ASSERT(pStub != NULL);
- bool fSuccess; fSuccess = EmitStub(pStub, globalsize, pHeap);
+ bool fSuccess = EmitStub(pStub, globalsize, pHeap);
#ifdef STUBLINKER_GENERATES_UNWIND_INFO
if (fSuccess)
@@ -1010,7 +1012,12 @@ bool StubLinker::EmitStub(Stub* pStub, int globalsize, LoaderHeap* pHeap)
STANDARD_VM_CONTRACT;
BYTE *pCode = (BYTE*)(pStub->GetBlob());
- BYTE *pData = pCode+globalsize; // start of data area
+
+ ExecutableWriterHolder<Stub> stubWriterHolder(pStub, sizeof(Stub));
+ Stub *pStubRW = stubWriterHolder.GetRW();
+
+ BYTE *pCodeRW = (BYTE*)(pStubRW->GetBlob());
+ BYTE *pDataRW = pCodeRW+globalsize; // start of data area
{
int lastCodeOffset = 0;
@@ -1020,7 +1027,7 @@ bool StubLinker::EmitStub(Stub* pStub, int globalsize, LoaderHeap* pHeap)
switch (pCodeElem->m_type) {
case CodeElement::kCodeRun:
- CopyMemory(pCode + pCodeElem->m_globaloffset,
+ CopyMemory(pCodeRW + pCodeElem->m_globaloffset,
((CodeRun*)pCodeElem)->m_codebytes,
((CodeRun*)pCodeElem)->m_numcodebytes);
currOffset = pCodeElem->m_globaloffset + ((CodeRun *)pCodeElem)->m_numcodebytes;
@@ -1052,8 +1059,9 @@ bool StubLinker::EmitStub(Stub* pStub, int globalsize, LoaderHeap* pHeap)
pLabelRef->m_refsize,
fixupval,
pCode + pCodeElem->m_globaloffset,
+ pCodeRW + pCodeElem->m_globaloffset,
pLabelRef->m_variationCode,
- pData + pCodeElem->m_dataoffset);
+ pDataRW + pCodeElem->m_dataoffset);
currOffset =
pCodeElem->m_globaloffset +
@@ -1070,7 +1078,7 @@ bool StubLinker::EmitStub(Stub* pStub, int globalsize, LoaderHeap* pHeap)
// Fill in zeros at the end, if necessary
if (lastCodeOffset < globalsize)
- ZeroMemory(pCode + lastCodeOffset, globalsize - lastCodeOffset);
+ ZeroMemory(pCodeRW + lastCodeOffset, globalsize - lastCodeOffset);
}
// Fill in patch offset, if we have one
@@ -1081,7 +1089,7 @@ bool StubLinker::EmitStub(Stub* pStub, int globalsize, LoaderHeap* pHeap)
{
UINT32 uLabelOffset = GetLabelOffset(m_pPatchLabel);
_ASSERTE(FitsIn<USHORT>(uLabelOffset));
- pStub->SetPatchOffset(static_cast<USHORT>(uLabelOffset));
+ pStubRW->SetPatchOffset(static_cast<USHORT>(uLabelOffset));
LOG((LF_CORDB, LL_INFO100, "SL::ES: patch offset:0x%x\n",
pStub->GetPatchOffset()));
@@ -1090,7 +1098,7 @@ bool StubLinker::EmitStub(Stub* pStub, int globalsize, LoaderHeap* pHeap)
#ifdef STUBLINKER_GENERATES_UNWIND_INFO
if (pStub->HasUnwindInfo())
{
- if (!EmitUnwindInfo(pStub, globalsize, pHeap))
+ if (!EmitUnwindInfo(pStub, pStubRW, globalsize, pHeap))
return false;
}
#endif // STUBLINKER_GENERATES_UNWIND_INFO
@@ -1266,11 +1274,11 @@ bool FindBlockCallback (PTR_VOID pvArgs, PTR_VOID pvAllocationBase, SIZE_T cbRes
return false;
}
-bool StubLinker::EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap)
+bool StubLinker::EmitUnwindInfo(Stub* pStubRX, Stub* pStubRW, int globalsize, LoaderHeap* pHeap)
{
STANDARD_VM_CONTRACT;
- BYTE *pCode = (BYTE*)(pStub->GetEntryPoint());
+ BYTE *pCode = (BYTE*)(pStubRX->GetEntryPoint());
//
// Determine the lower bound of the address space containing the stub.
@@ -1307,7 +1315,7 @@ bool StubLinker::EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap)
// make that INT32_MAX.
//
- StubUnwindInfoHeader *pHeader = pStub->GetUnwindInfoHeader();
+ StubUnwindInfoHeader *pHeader = pStubRW->GetUnwindInfoHeader();
_ASSERTE(IS_ALIGNED(pHeader, sizeof(void*)));
BYTE *pbBaseAddress = pbRegionBaseAddress;
@@ -1334,16 +1342,16 @@ bool StubLinker::EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap)
// Ensure that the first RUNTIME_FUNCTION struct ends up pointer aligned,
// so that the StubUnwindInfoHeader struct is aligned. UNWIND_INFO
// includes one UNWIND_CODE.
- _ASSERTE(IS_ALIGNED(pStub, sizeof(void*)));
+ _ASSERTE(IS_ALIGNED(pStubRX, sizeof(void*)));
_ASSERTE(0 == (FIELD_OFFSET(StubUnwindInfoHeader, FunctionEntry) % sizeof(void*)));
- StubUnwindInfoHeader * pUnwindInfoHeader = pStub->GetUnwindInfoHeader();
+ StubUnwindInfoHeader * pUnwindInfoHeader = pStubRW->GetUnwindInfoHeader();
#ifdef TARGET_AMD64
UNWIND_CODE *pDestUnwindCode = &pUnwindInfoHeader->UnwindInfo.UnwindCode[0];
#ifdef _DEBUG
- UNWIND_CODE *pDestUnwindCodeLimit = (UNWIND_CODE*)pStub->GetUnwindInfoHeaderSuffix();
+ UNWIND_CODE *pDestUnwindCodeLimit = (UNWIND_CODE*)pStubRW->GetUnwindInfoHeaderSuffix();
#endif
UINT FrameRegister = 0;
@@ -2055,8 +2063,6 @@ Stub* Stub::NewStub(PTR_VOID pCode, DWORD flags)
CONTRACTL_END;
Stub* pStub = NewStub(NULL, 0, flags | NEWSTUB_FL_EXTERNAL);
- _ASSERTE(pStub->HasExternalEntryPoint());
-
*(PTR_VOID *)(pStub + 1) = pCode;
return pStub;
@@ -2081,6 +2087,11 @@ Stub* Stub::NewStub(PTR_VOID pCode, DWORD flags)
}
CONTRACTL_END;
+ if (flags & NEWSTUB_FL_EXTERNAL)
+ {
+ _ASSERTE(pHeap == NULL);
+ }
+
#ifdef STUBLINKER_GENERATES_UNWIND_INFO
_ASSERTE(!nUnwindInfoSize || !pHeap || pHeap->m_fPermitStubsWithUnwindInfo);
#endif // STUBLINKER_GENERATES_UNWIND_INFO
@@ -2121,15 +2132,29 @@ Stub* Stub::NewStub(PTR_VOID pCode, DWORD flags)
}
else
{
- pBlock = (BYTE*)(void*) pHeap->AllocAlignedMem(totalSize, CODE_SIZE_ALIGN);
+ TaggedMemAllocPtr ptr = pHeap->AllocAlignedMem(totalSize, CODE_SIZE_ALIGN);
+ pBlock = (BYTE*)(void*)ptr;
flags |= NEWSTUB_FL_LOADERHEAP;
}
+ size_t stubPayloadOffset = totalSize -
+ (sizeof(Stub) + ((flags & NEWSTUB_FL_EXTERNAL) ? sizeof(PTR_PCODE) : numCodeBytes));
+
// Make sure that the payload of the stub is aligned
- Stub* pStub = (Stub*)((pBlock + totalSize) -
- (sizeof(Stub) + ((flags & NEWSTUB_FL_EXTERNAL) ? sizeof(PTR_PCODE) : numCodeBytes)));
+ Stub* pStubRX = (Stub*)(pBlock + stubPayloadOffset);
+ Stub* pStubRW;
+ ExecutableWriterHolder<Stub> stubWriterHolder;
- pStub->SetupStub(
+ if (pHeap == NULL)
+ {
+ pStubRW = pStubRX;
+ }
+ else
+ {
+ stubWriterHolder = ExecutableWriterHolder<Stub>(pStubRX, sizeof(Stub));
+ pStubRW = stubWriterHolder.GetRW();
+ }
+ pStubRW->SetupStub(
numCodeBytes,
flags
#ifdef STUBLINKER_GENERATES_UNWIND_INFO
@@ -2137,9 +2162,9 @@ Stub* Stub::NewStub(PTR_VOID pCode, DWORD flags)
#endif
);
- _ASSERTE((BYTE *)pStub->GetAllocationBase() == pBlock);
+ _ASSERTE((BYTE *)pStubRX->GetAllocationBase() == pBlock);
- return pStub;
+ return pStubRX;
}
void Stub::SetupStub(int numCodeBytes, DWORD flags
@@ -2298,7 +2323,8 @@ Stub *ArgBasedStubCache::GetStub(UINT_PTR key)
}
}
if (pStub) {
- pStub->IncRef();
+ ExecutableWriterHolder<Stub> stubWriterHolder(pStub, sizeof(Stub));
+ stubWriterHolder.GetRW()->IncRef();
}
return pStub;
}
@@ -2350,12 +2376,14 @@ Stub* ArgBasedStubCache::AttemptToSetStub(UINT_PTR key, Stub *pStub)
CrstHolder ch(&m_crst);
+ bool incRefForCache = false;
+
if (key < m_numFixedSlots) {
if (m_aStub[key]) {
pStub = m_aStub[key];
} else {
m_aStub[key] = pStub;
- pStub->IncRef(); // IncRef on cache's behalf
+ incRefForCache = true;
}
} else {
SlotEntry *pSlotEntry;
@@ -2371,14 +2399,19 @@ Stub* ArgBasedStubCache::AttemptToSetStub(UINT_PTR key, Stub *pStub)
if (!pSlotEntry) {
pSlotEntry = new SlotEntry;
pSlotEntry->m_pStub = pStub;
- pStub->IncRef(); // IncRef on cache's behalf
+ incRefForCache = true;
pSlotEntry->m_key = key;
pSlotEntry->m_pNext = m_pSlotEntries;
m_pSlotEntries = pSlotEntry;
}
}
if (pStub) {
- pStub->IncRef(); // IncRef because we're returning it to caller
+ ExecutableWriterHolder<Stub> stubWriterHolder(pStub, sizeof(Stub));
+ if (incRefForCache)
+ {
+ stubWriterHolder.GetRW()->IncRef(); // IncRef on cache's behalf
+ }
+ stubWriterHolder.GetRW()->IncRef(); // IncRef because we're returning it to caller
}
return pStub;
}
diff --git a/src/coreclr/vm/stublink.h b/src/coreclr/vm/stublink.h
index 9fdede17d2b..94326f9962e 100644
--- a/src/coreclr/vm/stublink.h
+++ b/src/coreclr/vm/stublink.h
@@ -59,7 +59,6 @@
//-------------------------------------------------------------------------
class InstructionFormat;
class Stub;
-class InterceptStub;
class CheckDuplicatedStructLayouts;
class CodeBasedStubCache;
struct CodeLabel;
@@ -400,7 +399,7 @@ private:
CodeRun *GetLastCodeRunIfAny();
- bool EmitUnwindInfo(Stub* pStub, int globalsize, LoaderHeap* pHeap);
+ bool EmitUnwindInfo(Stub* pStubRX, Stub* pStubRW, int globalsize, LoaderHeap* pHeap);
#if defined(TARGET_AMD64) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
UNWIND_CODE *AllocUnwindInfo (UCHAR Op, UCHAR nExtraSlots = 0);
@@ -972,7 +971,7 @@ class InstructionFormat
}
virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode) = 0;
- virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pCodeBuffer, UINT variationCode, BYTE *pDataBuffer) = 0;
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pCodeBufferRX, BYTE *pCodeBufferRW, UINT variationCode, BYTE *pDataBuffer) = 0;
virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
{
WRAPPER_NO_CONTRACT;
diff --git a/src/coreclr/vm/threadsuspend.cpp b/src/coreclr/vm/threadsuspend.cpp
index 68e4caf6e69..62333868533 100644
--- a/src/coreclr/vm/threadsuspend.cpp
+++ b/src/coreclr/vm/threadsuspend.cpp
@@ -3649,24 +3649,26 @@ void Thread::CommitGCStressInstructionUpdate()
auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+ ExecutableWriterHolder<BYTE> destCodeWriterHolder(pbDestCode, sizeof(DWORD));
+
#if defined(TARGET_X86) || defined(TARGET_AMD64)
- *pbDestCode = *pbSrcCode;
+ *destCodeWriterHolder.GetRW() = *pbSrcCode;
#elif defined(TARGET_ARM)
if (GetARMInstructionLength(pbDestCode) == 2)
- *(WORD*)pbDestCode = *(WORD*)pbSrcCode;
+ *(WORD*)destCodeWriterHolder.GetRW() = *(WORD*)pbSrcCode;
else
- *(DWORD*)pbDestCode = *(DWORD*)pbSrcCode;
+ *(DWORD*)destCodeWriterHolder.GetRW() = *(DWORD*)pbSrcCode;
#elif defined(TARGET_ARM64)
- *(DWORD*)pbDestCode = *(DWORD*)pbSrcCode;
+ *(DWORD*)destCodeWriterHolder.GetRW() = *(DWORD*)pbSrcCode;
#else
- *pbDestCode = *pbSrcCode;
+ *destCodeWriterHolder.GetRW() = *pbSrcCode;
#endif
diff --git a/src/coreclr/vm/virtualcallstub.cpp b/src/coreclr/vm/virtualcallstub.cpp
index 878e6728568..94a5c594d06 100644
--- a/src/coreclr/vm/virtualcallstub.cpp
+++ b/src/coreclr/vm/virtualcallstub.cpp
@@ -1209,8 +1209,9 @@ VTableCallHolder* VirtualCallStubManager::GenerateVTableCallStub(DWORD slot)
//allocate from the requisite heap and copy the template over it.
VTableCallHolder * pHolder = (VTableCallHolder*)(void*)vtable_heap->AllocAlignedMem(VTableCallHolder::GetHolderSize(slot), CODE_SIZE_ALIGN);
+ ExecutableWriterHolder<VTableCallHolder> vtableWriterHolder(pHolder, sizeof(VTableCallHolder));
+ vtableWriterHolder.GetRW()->Initialize(slot);
- pHolder->Initialize(slot);
ClrFlushInstructionCache(pHolder->stub(), pHolder->stub()->size());
AddToCollectibleVSDRangeList(pHolder);
@@ -2769,7 +2770,12 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE ad
}
#endif
- holder->Initialize(addrOfCode,
+ ExecutableWriterHolder<DispatchHolder> dispatchWriterHolder(holder, sizeof(DispatchHolder)
+#ifdef TARGET_AMD64
+ + sizeof(DispatchStubShort)
+#endif
+ );
+ dispatchWriterHolder.GetRW()->Initialize(holder, addrOfCode,
addrOfFail,
(size_t)pMTExpected
#ifdef TARGET_AMD64
@@ -2833,8 +2839,9 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStubLong(PCODE
//allocate from the requisite heap and copy the template over it.
DispatchHolder * holder = (DispatchHolder*) (void*)
dispatch_heap->AllocAlignedMem(DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_LONG), CODE_SIZE_ALIGN);
+ ExecutableWriterHolder<DispatchHolder> dispatchWriterHolder(holder, sizeof(DispatchHolder) + sizeof(DispatchStubLong));
- holder->Initialize(addrOfCode,
+ dispatchWriterHolder.GetRW()->Initialize(holder, addrOfCode,
addrOfFail,
(size_t)pMTExpected,
DispatchStub::e_TYPE_LONG);
@@ -2942,8 +2949,10 @@ ResolveHolder *VirtualCallStubManager::GenerateResolveStub(PCODE addr
//allocate from the requisite heap and copy the templates for each piece over it.
ResolveHolder * holder = (ResolveHolder*) (void*)
resolve_heap->AllocAlignedMem(sizeof(ResolveHolder), CODE_SIZE_ALIGN);
+ ExecutableWriterHolder<ResolveHolder> resolveWriterHolder(holder, sizeof(ResolveHolder));
- holder->Initialize(addrOfResolver, addrOfPatcher,
+ resolveWriterHolder.GetRW()->Initialize(holder,
+ addrOfResolver, addrOfPatcher,
dispatchToken, DispatchCache::HashToken(dispatchToken),
g_resolveCache->GetCacheBaseAddr(), counterAddr
#if defined(TARGET_X86) && !defined(UNIX_X86_ABI)
@@ -2986,8 +2995,9 @@ LookupHolder *VirtualCallStubManager::GenerateLookupStub(PCODE addrOfResolver, s
//allocate from the requisite heap and copy the template over it.
LookupHolder * holder = (LookupHolder*) (void*) lookup_heap->AllocAlignedMem(sizeof(LookupHolder), CODE_SIZE_ALIGN);
+ ExecutableWriterHolder<LookupHolder> lookupWriterHolder(holder, sizeof(LookupHolder));
- holder->Initialize(addrOfResolver, dispatchToken);
+ lookupWriterHolder.GetRW()->Initialize(holder, addrOfResolver, dispatchToken);
ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
AddToCollectibleVSDRangeList(holder);