Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/dotnet/runtime.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Vorlicek <jan.vorlicek@volny.cz>2021-07-11 17:46:53 +0300
committerGitHub <noreply@github.com>2021-07-11 17:46:53 +0300
commit6a47ecf4a8ee670356f5e554f08afb0b32cdac9a (patch)
treeeee15291b1ce2ad0c90c2e13b646d6e83fdf472b /src/coreclr/vm
parent83a4d3cc02fb04fce17b24fc09b3cdf77a12ba51 (diff)
W^X support (#54954)
* W^X support This change is the last part of enabling the W^X support. It adds the actual executable allocator that handles all double mapped memory allocations and creating the writeable mappings. The platform specific functionality is placed in a new minipal that is going to be a basis for future removal of Windows APIs usage from the native runtime. The last state of the change was tested on all the platforms that we support using coreclr pri 1 tests with both the W^X enabled and disabled using the COMPlus_EnableWriteXorExecute variable. The debugger changes were tested using the managed debugger testing suite on Windows x64, x86 and on Apple Silicon so far. Further testing on other platforms is in progress. * Replace LeafLock in UMEntryThunkFreeList by a new lock * Also allocate LoaderHeapFreeBlock from regular heap. * Set the W^X default to disabled
Diffstat (limited to 'src/coreclr/vm')
-rw-r--r--src/coreclr/vm/CMakeLists.txt4
-rw-r--r--src/coreclr/vm/amd64/JitHelpers_Fast.asm79
-rw-r--r--src/coreclr/vm/amd64/jithelpers_fast.S26
-rw-r--r--src/coreclr/vm/amd64/jitinterfaceamd64.cpp20
-rw-r--r--src/coreclr/vm/arm/armsinglestepper.cpp30
-rw-r--r--src/coreclr/vm/arm/asmhelpers.S10
-rw-r--r--src/coreclr/vm/arm/asmhelpers.asm12
-rw-r--r--src/coreclr/vm/arm/cgencpu.h13
-rw-r--r--src/coreclr/vm/arm/stubs.cpp23
-rw-r--r--src/coreclr/vm/arm64/arm64singlestepper.cpp12
-rw-r--r--src/coreclr/vm/arm64/asmhelpers.S10
-rw-r--r--src/coreclr/vm/arm64/asmhelpers.asm35
-rw-r--r--src/coreclr/vm/arm64/cgencpu.h13
-rw-r--r--src/coreclr/vm/arm64/stubs.cpp10
-rw-r--r--src/coreclr/vm/ceemain.cpp9
-rw-r--r--src/coreclr/vm/class.cpp5
-rw-r--r--src/coreclr/vm/codeman.cpp27
-rw-r--r--src/coreclr/vm/comcallablewrapper.cpp18
-rw-r--r--src/coreclr/vm/comcallablewrapper.h4
-rw-r--r--src/coreclr/vm/comdelegate.cpp2
-rw-r--r--src/coreclr/vm/dllimportcallback.cpp2
-rw-r--r--src/coreclr/vm/dynamicmethod.cpp7
-rw-r--r--src/coreclr/vm/excep.cpp2
-rw-r--r--src/coreclr/vm/exceptionhandling.cpp6
-rw-r--r--src/coreclr/vm/gccover.cpp4
-rw-r--r--src/coreclr/vm/i386/jithelp.S30
-rw-r--r--src/coreclr/vm/i386/jithelp.asm35
-rw-r--r--src/coreclr/vm/i386/jitinterfacex86.cpp84
-rw-r--r--src/coreclr/vm/i386/stublinkerx86.cpp2
-rw-r--r--src/coreclr/vm/i386/stublinkerx86.h10
-rw-r--r--src/coreclr/vm/jitinterface.cpp2
-rw-r--r--src/coreclr/vm/jitinterface.h42
-rw-r--r--src/coreclr/vm/loaderallocator.cpp17
-rw-r--r--src/coreclr/vm/loaderallocator.inl6
-rw-r--r--src/coreclr/vm/method.cpp40
-rw-r--r--src/coreclr/vm/precode.cpp4
-rw-r--r--src/coreclr/vm/stackwalk.cpp2
-rw-r--r--src/coreclr/vm/stublink.cpp14
-rw-r--r--src/coreclr/vm/stublink.h2
-rw-r--r--src/coreclr/vm/threads.cpp123
-rw-r--r--src/coreclr/vm/threads.h19
-rw-r--r--src/coreclr/vm/virtualcallstub.cpp14
42 files changed, 484 insertions, 345 deletions
diff --git a/src/coreclr/vm/CMakeLists.txt b/src/coreclr/vm/CMakeLists.txt
index 1d682d2a428..9c2cb3df0b7 100644
--- a/src/coreclr/vm/CMakeLists.txt
+++ b/src/coreclr/vm/CMakeLists.txt
@@ -833,7 +833,6 @@ elseif(CLR_CMAKE_TARGET_ARCH_ARM)
set(VM_SOURCES_DAC_AND_WKS_ARCH
${ARCH_SOURCES_DIR}/exceparm.cpp
${ARCH_SOURCES_DIR}/stubs.cpp
- ${ARCH_SOURCES_DIR}/armsinglestepper.cpp
)
set(VM_HEADERS_DAC_AND_WKS_ARCH
@@ -844,6 +843,7 @@ elseif(CLR_CMAKE_TARGET_ARCH_ARM)
set(VM_SOURCES_WKS_ARCH
${ARCH_SOURCES_DIR}/profiler.cpp
+ ${ARCH_SOURCES_DIR}/armsinglestepper.cpp
exceptionhandling.cpp
gcinfodecoder.cpp
)
@@ -868,7 +868,7 @@ elseif(CLR_CMAKE_TARGET_ARCH_ARM64)
)
if(CLR_CMAKE_HOST_UNIX)
- list(APPEND VM_SOURCES_DAC_AND_WKS_ARCH
+ list(APPEND VM_SOURCES_WKS_ARCH
${ARCH_SOURCES_DIR}/arm64singlestepper.cpp
)
endif(CLR_CMAKE_HOST_UNIX)
diff --git a/src/coreclr/vm/amd64/JitHelpers_Fast.asm b/src/coreclr/vm/amd64/JitHelpers_Fast.asm
index 82a301bb0cb..219597eb350 100644
--- a/src/coreclr/vm/amd64/JitHelpers_Fast.asm
+++ b/src/coreclr/vm/amd64/JitHelpers_Fast.asm
@@ -51,37 +51,6 @@ endif
extern JIT_InternalThrow:proc
-; There is an even more optimized version of these helpers possible which takes
-; advantage of knowledge of which way the ephemeral heap is growing to only do 1/2
-; that check (this is more significant in the JIT_WriteBarrier case).
-;
-; Additionally we can look into providing helpers which will take the src/dest from
-; specific registers (like x86) which _could_ (??) make for easier register allocation
-; for the JIT64, however it might lead to having to have some nasty code that treats
-; these guys really special like... :(.
-;
-; Version that does the move, checks whether or not it's in the GC and whether or not
-; it needs to have it's card updated
-;
-; void JIT_CheckedWriteBarrier(Object** dst, Object* src)
-LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT
-
- ; When WRITE_BARRIER_CHECK is defined _NotInHeap will write the reference
- ; but if it isn't then it will just return.
- ;
- ; See if this is in GCHeap
- cmp rcx, [g_lowest_address]
- jb NotInHeap
- cmp rcx, [g_highest_address]
- jnb NotInHeap
-
- jmp JIT_WriteBarrier
-
- NotInHeap:
- ; See comment above about possible AV
- mov [rcx], rdx
- ret
-LEAF_END_MARKED JIT_CheckedWriteBarrier, _TEXT
; Mark start of the code region that we patch at runtime
LEAF_ENTRY JIT_PatchedCodeStart, _TEXT
@@ -99,7 +68,8 @@ LEAF_ENTRY JIT_WriteBarrier, _TEXT
ifdef _DEBUG
; In debug builds, this just contains jump to the debug version of the write barrier by default
- jmp JIT_WriteBarrier_Debug
+ mov rax, JIT_WriteBarrier_Debug
+ jmp rax
endif
ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
@@ -388,6 +358,51 @@ endif
ret
LEAF_END_MARKED JIT_ByRefWriteBarrier, _TEXT
+Section segment para 'DATA'
+
+ align 16
+
+ public JIT_WriteBarrier_Loc
+JIT_WriteBarrier_Loc:
+ dq 0
+
+LEAF_ENTRY JIT_WriteBarrier_Callable, _TEXT
+ ; JIT_WriteBarrier(Object** dst, Object* src)
+ jmp QWORD PTR [JIT_WriteBarrier_Loc]
+LEAF_END JIT_WriteBarrier_Callable, _TEXT
+
+; There is an even more optimized version of these helpers possible which takes
+; advantage of knowledge of which way the ephemeral heap is growing to only do 1/2
+; that check (this is more significant in the JIT_WriteBarrier case).
+;
+; Additionally we can look into providing helpers which will take the src/dest from
+; specific registers (like x86) which _could_ (??) make for easier register allocation
+; for the JIT64, however it might lead to having to have some nasty code that treats
+; these guys really special like... :(.
+;
+; Version that does the move, checks whether or not it's in the GC and whether or not
+; it needs to have it's card updated
+;
+; void JIT_CheckedWriteBarrier(Object** dst, Object* src)
+LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT
+
+ ; When WRITE_BARRIER_CHECK is defined _NotInHeap will write the reference
+ ; but if it isn't then it will just return.
+ ;
+ ; See if this is in GCHeap
+ cmp rcx, [g_lowest_address]
+ jb NotInHeap
+ cmp rcx, [g_highest_address]
+ jnb NotInHeap
+
+ jmp QWORD PTR [JIT_WriteBarrier_Loc]
+
+ NotInHeap:
+ ; See comment above about possible AV
+ mov [rcx], rdx
+ ret
+LEAF_END_MARKED JIT_CheckedWriteBarrier, _TEXT
+
; The following helper will access ("probe") a word on each page of the stack
; starting with the page right beneath rsp down to the one pointed to by r11.
; The procedure is needed to make sure that the "guard" page is pushed down below the allocated stack frame.
diff --git a/src/coreclr/vm/amd64/jithelpers_fast.S b/src/coreclr/vm/amd64/jithelpers_fast.S
index a13afb48785..8109886d0c9 100644
--- a/src/coreclr/vm/amd64/jithelpers_fast.S
+++ b/src/coreclr/vm/amd64/jithelpers_fast.S
@@ -32,26 +32,14 @@ LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT
// See if this is in GCHeap
PREPARE_EXTERNAL_VAR g_lowest_address, rax
cmp rdi, [rax]
-#ifdef FEATURE_WRITEBARRIER_COPY
// jb NotInHeap
.byte 0x72, 0x12
-#else
- // jb NotInHeap
- .byte 0x72, 0x0e
-#endif
PREPARE_EXTERNAL_VAR g_highest_address, rax
cmp rdi, [rax]
-#ifdef FEATURE_WRITEBARRIER_COPY
// jnb NotInHeap
.byte 0x73, 0x06
jmp [rip + C_FUNC(JIT_WriteBarrier_Loc)]
-#else
- // jnb NotInHeap
- .byte 0x73, 0x02
- // jmp C_FUNC(JIT_WriteBarrier)
- .byte 0xeb, 0x05
-#endif
NotInHeap:
// See comment above about possible AV
@@ -398,11 +386,17 @@ LEAF_ENTRY JIT_ByRefWriteBarrier, _TEXT
ret
LEAF_END_MARKED JIT_ByRefWriteBarrier, _TEXT
-#ifdef FEATURE_WRITEBARRIER_COPY
// When JIT_WriteBarrier is copied into an allocated page,
// helpers use this global variable to jump to it. This variable is set in InitThreadManager.
- .global _JIT_WriteBarrier_Loc
- .zerofill __DATA,__common,_JIT_WriteBarrier_Loc,8,3
+ .global C_FUNC(JIT_WriteBarrier_Loc)
+#ifdef TARGET_OSX
+ .zerofill __DATA,__common,C_FUNC(JIT_WriteBarrier_Loc),8,3
+#else
+ .data
+ C_FUNC(JIT_WriteBarrier_Loc):
+ .quad 0
+ .text
+#endif
// ------------------------------------------------------------------
// __declspec(naked) void F_CALL_CONV JIT_WriteBarrier_Callable(Object **dst, Object* val)
@@ -412,8 +406,6 @@ LEAF_ENTRY JIT_WriteBarrier_Callable, _TEXT
jmp [rip + C_FUNC(JIT_WriteBarrier_Loc)]
LEAF_END JIT_WriteBarrier_Callable, _TEXT
-#endif // FEATURE_WRITEBARRIER_COPY
-
// The following helper will access ("probe") a word on each page of the stack
// starting with the page right beneath rsp down to the one pointed to by r11.
diff --git a/src/coreclr/vm/amd64/jitinterfaceamd64.cpp b/src/coreclr/vm/amd64/jitinterfaceamd64.cpp
index 38bff78a54c..02b023777b8 100644
--- a/src/coreclr/vm/amd64/jitinterfaceamd64.cpp
+++ b/src/coreclr/vm/amd64/jitinterfaceamd64.cpp
@@ -293,7 +293,10 @@ int WriteBarrierManager::ChangeWriteBarrierTo(WriteBarrierType newWriteBarrier,
// the memcpy must come before the switch statment because the asserts inside the switch
// are actually looking into the JIT_WriteBarrier buffer
- memcpy(GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier), (LPVOID)GetCurrentWriteBarrierCode(), GetCurrentWriteBarrierSize());
+ {
+ ExecutableWriterHolder<void> writeBarrierWriterHolder(GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier), GetCurrentWriteBarrierSize());
+ memcpy(writeBarrierWriterHolder.GetRW(), (LPVOID)GetCurrentWriteBarrierCode(), GetCurrentWriteBarrierSize());
+ }
switch (newWriteBarrier)
{
@@ -544,7 +547,8 @@ int WriteBarrierManager::UpdateEphemeralBounds(bool isRuntimeSuspended)
// Change immediate if different from new g_ephermeral_high.
if (*(UINT64*)m_pUpperBoundImmediate != (size_t)g_ephemeral_high)
{
- *(UINT64*)m_pUpperBoundImmediate = (size_t)g_ephemeral_high;
+ ExecutableWriterHolder<UINT64> upperBoundWriterHolder((UINT64*)m_pUpperBoundImmediate, sizeof(UINT64));
+ *upperBoundWriterHolder.GetRW() = (size_t)g_ephemeral_high;
stompWBCompleteActions |= SWB_ICACHE_FLUSH;
}
}
@@ -557,7 +561,8 @@ int WriteBarrierManager::UpdateEphemeralBounds(bool isRuntimeSuspended)
// Change immediate if different from new g_ephermeral_low.
if (*(UINT64*)m_pLowerBoundImmediate != (size_t)g_ephemeral_low)
{
- *(UINT64*)m_pLowerBoundImmediate = (size_t)g_ephemeral_low;
+ ExecutableWriterHolder<UINT64> lowerBoundImmediateWriterHolder((UINT64*)m_pLowerBoundImmediate, sizeof(UINT64));
+ *lowerBoundImmediateWriterHolder.GetRW() = (size_t)g_ephemeral_low;
stompWBCompleteActions |= SWB_ICACHE_FLUSH;
}
break;
@@ -609,7 +614,8 @@ int WriteBarrierManager::UpdateWriteWatchAndCardTableLocations(bool isRuntimeSus
#endif // FEATURE_SVR_GC
if (*(UINT64*)m_pWriteWatchTableImmediate != (size_t)g_sw_ww_table)
{
- *(UINT64*)m_pWriteWatchTableImmediate = (size_t)g_sw_ww_table;
+ ExecutableWriterHolder<UINT64> writeWatchTableImmediateWriterHolder((UINT64*)m_pWriteWatchTableImmediate, sizeof(UINT64));
+ *writeWatchTableImmediateWriterHolder.GetRW() = (size_t)g_sw_ww_table;
stompWBCompleteActions |= SWB_ICACHE_FLUSH;
}
break;
@@ -621,14 +627,16 @@ int WriteBarrierManager::UpdateWriteWatchAndCardTableLocations(bool isRuntimeSus
if (*(UINT64*)m_pCardTableImmediate != (size_t)g_card_table)
{
- *(UINT64*)m_pCardTableImmediate = (size_t)g_card_table;
+ ExecutableWriterHolder<UINT64> cardTableImmediateWriterHolder((UINT64*)m_pCardTableImmediate, sizeof(UINT64));
+ *cardTableImmediateWriterHolder.GetRW() = (size_t)g_card_table;
stompWBCompleteActions |= SWB_ICACHE_FLUSH;
}
#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
if (*(UINT64*)m_pCardBundleTableImmediate != (size_t)g_card_bundle_table)
{
- *(UINT64*)m_pCardBundleTableImmediate = (size_t)g_card_bundle_table;
+ ExecutableWriterHolder<UINT64> cardBundleTableImmediateWriterHolder((UINT64*)m_pCardBundleTableImmediate, sizeof(UINT64));
+ *cardBundleTableImmediateWriterHolder.GetRW() = (size_t)g_card_bundle_table;
stompWBCompleteActions |= SWB_ICACHE_FLUSH;
}
#endif
diff --git a/src/coreclr/vm/arm/armsinglestepper.cpp b/src/coreclr/vm/arm/armsinglestepper.cpp
index 79317263b22..f9e718ae542 100644
--- a/src/coreclr/vm/arm/armsinglestepper.cpp
+++ b/src/coreclr/vm/arm/armsinglestepper.cpp
@@ -97,11 +97,7 @@ ArmSingleStepper::ArmSingleStepper()
ArmSingleStepper::~ArmSingleStepper()
{
#if !defined(DACCESS_COMPILE)
-#ifdef TARGET_UNIX
SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->BackoutMem(m_rgCode, kMaxCodeBuffer * sizeof(WORD));
-#else
- DeleteExecutable(m_rgCode);
-#endif
#endif
}
@@ -110,11 +106,7 @@ void ArmSingleStepper::Init()
#if !defined(DACCESS_COMPILE)
if (m_rgCode == NULL)
{
-#ifdef TARGET_UNIX
m_rgCode = (WORD *)(void *)SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T(kMaxCodeBuffer * sizeof(WORD)));
-#else
- m_rgCode = new (executable) WORD[kMaxCodeBuffer];
-#endif
}
#endif
}
@@ -287,6 +279,8 @@ void ArmSingleStepper::Apply(T_CONTEXT *pCtx)
DWORD idxNextInstruction = 0;
+ ExecutableWriterHolder<WORD> codeWriterHolder(m_rgCode, kMaxCodeBuffer * sizeof(m_rgCode[0]));
+
if (m_originalITState.InITBlock() && !ConditionHolds(pCtx, m_originalITState.CurrentCondition()))
{
LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper: Case 1: ITState::Clear;\n"));
@@ -295,7 +289,7 @@ void ArmSingleStepper::Apply(T_CONTEXT *pCtx)
// to execute. We'll put the correct value back during fixup.
ITState::Clear(pCtx);
m_fSkipIT = true;
- m_rgCode[idxNextInstruction++] = kBreakpointOp;
+ codeWriterHolder.GetRW()[idxNextInstruction++] = kBreakpointOp;
}
else if (TryEmulate(pCtx, opcode1, opcode2, false))
{
@@ -308,8 +302,8 @@ void ArmSingleStepper::Apply(T_CONTEXT *pCtx)
m_fEmulate = true;
// Set breakpoints to stop the execution. This will get us right back here.
- m_rgCode[idxNextInstruction++] = kBreakpointOp;
- m_rgCode[idxNextInstruction++] = kBreakpointOp;
+ codeWriterHolder.GetRW()[idxNextInstruction++] = kBreakpointOp;
+ codeWriterHolder.GetRW()[idxNextInstruction++] = kBreakpointOp;
}
else
{
@@ -323,24 +317,24 @@ void ArmSingleStepper::Apply(T_CONTEXT *pCtx)
// guarantee one of them will be hit (we don't care which one -- the fixup code will update
// the PC and IT state to make it look as though the CPU just executed the current
// instruction).
- m_rgCode[idxNextInstruction++] = opcode1;
+ codeWriterHolder.GetRW()[idxNextInstruction++] = opcode1;
if (Is32BitInstruction(opcode1))
- m_rgCode[idxNextInstruction++] = opcode2;
+ codeWriterHolder.GetRW()[idxNextInstruction++] = opcode2;
- m_rgCode[idxNextInstruction++] = kBreakpointOp;
- m_rgCode[idxNextInstruction++] = kBreakpointOp;
- m_rgCode[idxNextInstruction++] = kBreakpointOp;
+ codeWriterHolder.GetRW()[idxNextInstruction++] = kBreakpointOp;
+ codeWriterHolder.GetRW()[idxNextInstruction++] = kBreakpointOp;
+ codeWriterHolder.GetRW()[idxNextInstruction++] = kBreakpointOp;
}
// Always terminate the redirection buffer with a breakpoint.
- m_rgCode[idxNextInstruction++] = kBreakpointOp;
+ codeWriterHolder.GetRW()[idxNextInstruction++] = kBreakpointOp;
_ASSERTE(idxNextInstruction <= kMaxCodeBuffer);
// Set the thread up so it will redirect to our buffer when execution resumes.
pCtx->Pc = ((DWORD)(DWORD_PTR)m_rgCode) | THUMB_CODE;
// Make sure the CPU sees the updated contents of the buffer.
- FlushInstructionCache(GetCurrentProcess(), m_rgCode, sizeof(m_rgCode));
+ FlushInstructionCache(GetCurrentProcess(), m_rgCode, kMaxCodeBuffer * sizeof(m_rgCode[0]));
// Done, set the state.
m_state = Applied;
diff --git a/src/coreclr/vm/arm/asmhelpers.S b/src/coreclr/vm/arm/asmhelpers.S
index 930395b56dc..3faa8fe3684 100644
--- a/src/coreclr/vm/arm/asmhelpers.S
+++ b/src/coreclr/vm/arm/asmhelpers.S
@@ -978,6 +978,16 @@ g_rgWriteBarrierDescriptors:
.global g_rgWriteBarrierDescriptors
+// ------------------------------------------------------------------
+// __declspec(naked) void F_CALL_CONV JIT_WriteBarrier_Callable(Object **dst, Object* val)
+ LEAF_ENTRY JIT_WriteBarrier_Callable
+
+ // Branch to the write barrier
+ ldr r2, =JIT_WriteBarrier_Loc // or R3? See targetarm.h
+ ldr pc, [r2]
+
+ LEAF_END JIT_WriteBarrier_Callable
+
#ifdef FEATURE_READYTORUN
NESTED_ENTRY DelayLoad_MethodCall_FakeProlog, _TEXT, NoHandler
diff --git a/src/coreclr/vm/arm/asmhelpers.asm b/src/coreclr/vm/arm/asmhelpers.asm
index d20540e6209..82596e66693 100644
--- a/src/coreclr/vm/arm/asmhelpers.asm
+++ b/src/coreclr/vm/arm/asmhelpers.asm
@@ -1724,6 +1724,18 @@ tempReg SETS "$tmpReg"
END_WRITE_BARRIERS
+ IMPORT JIT_WriteBarrier_Loc
+
+; ------------------------------------------------------------------
+; __declspec(naked) void F_CALL_CONV JIT_WriteBarrier_Callable(Object **dst, Object* val)
+ LEAF_ENTRY JIT_WriteBarrier_Callable
+
+ ; Branch to the write barrier
+ ldr r2, =JIT_WriteBarrier_Loc ; or R3? See targetarm.h
+ ldr pc, [r2]
+
+ LEAF_END
+
#ifdef FEATURE_READYTORUN
NESTED_ENTRY DelayLoad_MethodCall_FakeProlog
diff --git a/src/coreclr/vm/arm/cgencpu.h b/src/coreclr/vm/arm/cgencpu.h
index 88d0c6802b6..425c2865584 100644
--- a/src/coreclr/vm/arm/cgencpu.h
+++ b/src/coreclr/vm/arm/cgencpu.h
@@ -1069,6 +1069,7 @@ struct StubPrecode {
return m_pTarget;
}
+#ifndef DACCESS_COMPILE
void ResetTargetInterlocked()
{
CONTRACTL
@@ -1095,6 +1096,7 @@ struct StubPrecode {
return (TADDR)InterlockedCompareExchange(
(LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)target, (LONG)expected) == expected;
}
+#endif // !DACCESS_COMPILE
#ifdef FEATURE_PREJIT
void Fixup(DataImage *image);
@@ -1167,6 +1169,13 @@ struct FixupPrecode {
return dac_cast<TADDR>(this) + (m_PrecodeChunkIndex + 1) * sizeof(FixupPrecode);
}
+ size_t GetSizeRW()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return GetBase() + sizeof(void*) - dac_cast<TADDR>(this);
+ }
+
TADDR GetMethodDesc();
PCODE GetTarget()
@@ -1175,6 +1184,7 @@ struct FixupPrecode {
return m_pTarget;
}
+#ifndef DACCESS_COMPILE
void ResetTargetInterlocked()
{
CONTRACTL
@@ -1201,6 +1211,7 @@ struct FixupPrecode {
return (TADDR)InterlockedCompareExchange(
(LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)target, (LONG)expected) == expected;
}
+#endif // !DACCESS_COMPILE
static BOOL IsFixupPrecodeByASM(PCODE addr)
{
@@ -1256,6 +1267,7 @@ struct ThisPtrRetBufPrecode {
return m_pTarget;
}
+#ifndef DACCESS_COMPILE
BOOL SetTargetInterlocked(TADDR target, TADDR expected)
{
CONTRACTL
@@ -1268,6 +1280,7 @@ struct ThisPtrRetBufPrecode {
ExecutableWriterHolder<ThisPtrRetBufPrecode> precodeWriterHolder(this, sizeof(ThisPtrRetBufPrecode));
return FastInterlockCompareExchange((LONG*)&precodeWriterHolder.GetRW()->m_pTarget, (LONG)target, (LONG)expected) == (LONG)expected;
}
+#endif // !DACCESS_COMPILE
};
typedef DPTR(ThisPtrRetBufPrecode) PTR_ThisPtrRetBufPrecode;
diff --git a/src/coreclr/vm/arm/stubs.cpp b/src/coreclr/vm/arm/stubs.cpp
index aac3e25b181..6e62df23703 100644
--- a/src/coreclr/vm/arm/stubs.cpp
+++ b/src/coreclr/vm/arm/stubs.cpp
@@ -329,16 +329,28 @@ void ComputeWriteBarrierRange(BYTE ** ppbStart, DWORD * pcbLength)
{
DWORD size = (PBYTE)JIT_PatchedWriteBarrierLast - (PBYTE)JIT_PatchedWriteBarrierStart;
*ppbStart = (PBYTE)JIT_PatchedWriteBarrierStart;
+ if (IsWriteBarrierCopyEnabled())
+ {
+ *ppbStart = GetWriteBarrierCodeLocation(*ppbStart);
+ }
*pcbLength = size;
}
void CopyWriteBarrier(PCODE dstCode, PCODE srcCode, PCODE endCode)
{
- TADDR dst = PCODEToPINSTR(dstCode);
+ TADDR dst = (TADDR)PCODEToPINSTR((PCODE)GetWriteBarrierCodeLocation((void*)dstCode));
TADDR src = PCODEToPINSTR(srcCode);
TADDR end = PCODEToPINSTR(endCode);
size_t size = (PBYTE)end - (PBYTE)src;
+
+ ExecutableWriterHolder<void> writeBarrierWriterHolder;
+ if (IsWriteBarrierCopyEnabled())
+ {
+ writeBarrierWriterHolder = ExecutableWriterHolder<void>((void*)dst, size);
+ dst = (TADDR)writeBarrierWriterHolder.GetRW();
+ }
+
memcpy((PVOID)dst, (PVOID)src, size);
}
@@ -419,7 +431,7 @@ void UpdateGCWriteBarriers(bool postGrow = false)
}
#define GWB_PATCH_OFFSET(_global) \
if (pDesc->m_dw_##_global##_offset != 0xffff) \
- PutThumb2Mov32((UINT16*)(to + pDesc->m_dw_##_global##_offset - 1), (UINT32)(dac_cast<TADDR>(_global)));
+ PutThumb2Mov32((UINT16*)(to + pDesc->m_dw_##_global##_offset), (UINT32)(dac_cast<TADDR>(_global)));
// Iterate through the write barrier patch table created in the .clrwb section
// (see write barrier asm code)
@@ -431,6 +443,13 @@ void UpdateGCWriteBarriers(bool postGrow = false)
PBYTE to = FindWBMapping(pDesc->m_pFuncStart);
if(to)
{
+ to = (PBYTE)PCODEToPINSTR((PCODE)GetWriteBarrierCodeLocation(to));
+ ExecutableWriterHolder<BYTE> barrierWriterHolder;
+ if (IsWriteBarrierCopyEnabled())
+ {
+ barrierWriterHolder = ExecutableWriterHolder<BYTE>(to, pDesc->m_pFuncEnd - pDesc->m_pFuncStart);
+ to = barrierWriterHolder.GetRW();
+ }
GWB_PATCH_OFFSET(g_lowest_address);
GWB_PATCH_OFFSET(g_highest_address);
GWB_PATCH_OFFSET(g_ephemeral_low);
diff --git a/src/coreclr/vm/arm64/arm64singlestepper.cpp b/src/coreclr/vm/arm64/arm64singlestepper.cpp
index d45925311a3..6c1764647c9 100644
--- a/src/coreclr/vm/arm64/arm64singlestepper.cpp
+++ b/src/coreclr/vm/arm64/arm64singlestepper.cpp
@@ -46,11 +46,7 @@ Arm64SingleStepper::Arm64SingleStepper()
Arm64SingleStepper::~Arm64SingleStepper()
{
#if !defined(DACCESS_COMPILE)
-#ifdef TARGET_UNIX
SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->BackoutMem(m_rgCode, kMaxCodeBuffer * sizeof(uint32_t));
-#else
- DeleteExecutable(m_rgCode);
-#endif
#endif
}
@@ -59,11 +55,7 @@ void Arm64SingleStepper::Init()
#if !defined(DACCESS_COMPILE)
if (m_rgCode == NULL)
{
-#ifdef TARGET_UNIX
m_rgCode = (uint32_t *)(void *)SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T(kMaxCodeBuffer * sizeof(uint32_t)));
-#else
- m_rgCode = new (executable) uint32_t[kMaxCodeBuffer];
-#endif
}
#endif
}
@@ -207,7 +199,7 @@ void Arm64SingleStepper::Apply(T_CONTEXT *pCtx)
unsigned int idxNextInstruction = 0;
- ExecutableWriterHolder<DWORD> codeWriterHolder(m_rgCode, sizeof(m_rgCode));
+ ExecutableWriterHolder<DWORD> codeWriterHolder(m_rgCode, kMaxCodeBuffer * sizeof(m_rgCode[0]));
if (TryEmulate(pCtx, opcode, false))
{
@@ -230,7 +222,7 @@ void Arm64SingleStepper::Apply(T_CONTEXT *pCtx)
pCtx->Pc = (uint64_t)m_rgCode;
// Make sure the CPU sees the updated contents of the buffer.
- FlushInstructionCache(GetCurrentProcess(), m_rgCode, sizeof(m_rgCode));
+ FlushInstructionCache(GetCurrentProcess(), m_rgCode, kMaxCodeBuffer * sizeof(m_rgCode[0]));
// Done, set the state.
m_state = Applied;
diff --git a/src/coreclr/vm/arm64/asmhelpers.S b/src/coreclr/vm/arm64/asmhelpers.S
index e6b47d07b2b..8ef66586cd2 100644
--- a/src/coreclr/vm/arm64/asmhelpers.S
+++ b/src/coreclr/vm/arm64/asmhelpers.S
@@ -270,13 +270,9 @@ LOCAL_LABEL(EphemeralCheckEnabled):
ldr x7, [x12]
// Update wbs state
-#ifdef FEATURE_WRITEBARRIER_COPY
PREPARE_EXTERNAL_VAR JIT_WriteBarrier_Table_Loc, x12
ldr x12, [x12]
add x12, x12, x9
-#else // FEATURE_WRITEBARRIER_COPY
- adr x12, LOCAL_LABEL(wbs_begin)
-#endif // FEATURE_WRITEBARRIER_COPY
stp x0, x1, [x12], 16
stp x2, x3, [x12], 16
@@ -295,16 +291,10 @@ LEAF_ENTRY JIT_WriteBarrier_Callable, _TEXT
mov x14, x0 // x14 = dst
mov x15, x1 // x15 = val
-#ifdef FEATURE_WRITEBARRIER_COPY
-LOCAL_LABEL(Branch_JIT_WriteBarrier_Copy):
// Branch to the write barrier
PREPARE_EXTERNAL_VAR JIT_WriteBarrier_Loc, x17
ldr x17, [x17]
br x17
-#else // FEATURE_WRITEBARRIER_COPY
- // Branch to the write barrier
- b C_FUNC(JIT_WriteBarrier)
-#endif // FEATURE_WRITEBARRIER_COPY
LEAF_END JIT_WriteBarrier_Callable, _TEXT
.balign 64 // Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line
diff --git a/src/coreclr/vm/arm64/asmhelpers.asm b/src/coreclr/vm/arm64/asmhelpers.asm
index ffbeb9fd1ac..17d3a676940 100644
--- a/src/coreclr/vm/arm64/asmhelpers.asm
+++ b/src/coreclr/vm/arm64/asmhelpers.asm
@@ -61,6 +61,10 @@
#ifdef FEATURE_COMINTEROP
IMPORT CLRToCOMWorker
#endif // FEATURE_COMINTEROP
+
+ IMPORT JIT_WriteBarrier_Table_Loc
+ IMPORT JIT_WriteBarrier_Loc
+
TEXTAREA
;; LPVOID __stdcall GetCurrentIP(void);
@@ -308,6 +312,7 @@ ThePreStubPatchLabel
; x12 will be used for pointers
mov x8, x0
+ mov x9, x1
adrp x12, g_card_table
ldr x0, [x12, g_card_table]
@@ -346,7 +351,9 @@ EphemeralCheckEnabled
ldr x7, [x12, g_highest_address]
; Update wbs state
- adr x12, wbs_begin
+ adrp x12, JIT_WriteBarrier_Table_Loc
+ ldr x12, [x12, JIT_WriteBarrier_Table_Loc]
+ add x12, x12, x9
stp x0, x1, [x12], 16
stp x2, x3, [x12], 16
stp x4, x5, [x12], 16
@@ -355,9 +362,11 @@ EphemeralCheckEnabled
EPILOG_RESTORE_REG_PAIR fp, lr, #16!
EPILOG_RETURN
+ WRITE_BARRIER_END JIT_UpdateWriteBarrierState
+
; Begin patchable literal pool
ALIGN 64 ; Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line
-
+ WRITE_BARRIER_ENTRY JIT_WriteBarrier_Table
wbs_begin
wbs_card_table
DCQ 0
@@ -375,14 +384,7 @@ wbs_lowest_address
DCQ 0
wbs_highest_address
DCQ 0
-
- WRITE_BARRIER_END JIT_UpdateWriteBarrierState
-
-; ------------------------------------------------------------------
-; End of the writeable code region
- LEAF_ENTRY JIT_PatchedCodeLast
- ret lr
- LEAF_END
+ WRITE_BARRIER_END JIT_WriteBarrier_Table
; void JIT_ByRefWriteBarrier
; On entry:
@@ -546,6 +548,12 @@ Exit
ret lr
WRITE_BARRIER_END JIT_WriteBarrier
+; ------------------------------------------------------------------
+; End of the writeable code region
+ LEAF_ENTRY JIT_PatchedCodeLast
+ ret lr
+ LEAF_END
+
#ifdef FEATURE_PREJIT
;------------------------------------------------
; VirtualMethodFixupStub
@@ -1417,9 +1425,10 @@ CallHelper2
mov x14, x0 ; x14 = dst
mov x15, x1 ; x15 = val
- ; Branch to the write barrier (which is already correctly overwritten with
- ; single or multi-proc code based on the current CPU
- b JIT_WriteBarrier
+ ; Branch to the write barrier
+ adrp x17, JIT_WriteBarrier_Loc
+ ldr x17, [x17, JIT_WriteBarrier_Loc]
+ br x17
LEAF_END
diff --git a/src/coreclr/vm/arm64/cgencpu.h b/src/coreclr/vm/arm64/cgencpu.h
index 83e56cfb9f9..0641d89ff1a 100644
--- a/src/coreclr/vm/arm64/cgencpu.h
+++ b/src/coreclr/vm/arm64/cgencpu.h
@@ -597,6 +597,7 @@ struct StubPrecode {
return m_pTarget;
}
+#ifndef DACCESS_COMPILE
void ResetTargetInterlocked()
{
CONTRACTL
@@ -623,6 +624,7 @@ struct StubPrecode {
return (TADDR)InterlockedCompareExchange64(
(LONGLONG*)&precodeWriterHolder.GetRW()->m_pTarget, (TADDR)target, (TADDR)expected) == expected;
}
+#endif // !DACCESS_COMPILE
#ifdef FEATURE_PREJIT
void Fixup(DataImage *image);
@@ -715,6 +717,13 @@ struct FixupPrecode {
return dac_cast<TADDR>(this) + (m_PrecodeChunkIndex + 1) * sizeof(FixupPrecode);
}
+ size_t GetSizeRW()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return GetBase() + sizeof(void*) - dac_cast<TADDR>(this);
+ }
+
TADDR GetMethodDesc();
PCODE GetTarget()
@@ -723,6 +732,7 @@ struct FixupPrecode {
return m_pTarget;
}
+#ifndef DACCESS_COMPILE
void ResetTargetInterlocked()
{
CONTRACTL
@@ -749,6 +759,7 @@ struct FixupPrecode {
return (TADDR)InterlockedCompareExchange64(
(LONGLONG*)&precodeWriterHolder.GetRW()->m_pTarget, (TADDR)target, (TADDR)expected) == expected;
}
+#endif // !DACCESS_COMPILE
static BOOL IsFixupPrecodeByASM(PCODE addr)
{
@@ -797,6 +808,7 @@ struct ThisPtrRetBufPrecode {
return m_pTarget;
}
+#ifndef DACCESS_COMPILE
BOOL SetTargetInterlocked(TADDR target, TADDR expected)
{
CONTRACTL
@@ -810,6 +822,7 @@ struct ThisPtrRetBufPrecode {
return (TADDR)InterlockedCompareExchange64(
(LONGLONG*)&precodeWriterHolder.GetRW()->m_pTarget, (TADDR)target, (TADDR)expected) == expected;
}
+#endif // !DACCESS_COMPILE
};
typedef DPTR(ThisPtrRetBufPrecode) PTR_ThisPtrRetBufPrecode;
diff --git a/src/coreclr/vm/arm64/stubs.cpp b/src/coreclr/vm/arm64/stubs.cpp
index 54cf1c49275..12d56ddb986 100644
--- a/src/coreclr/vm/arm64/stubs.cpp
+++ b/src/coreclr/vm/arm64/stubs.cpp
@@ -1067,8 +1067,14 @@ extern "C" void STDCALL JIT_PatchedCodeLast();
static void UpdateWriteBarrierState(bool skipEphemeralCheck)
{
BYTE *writeBarrierCodeStart = GetWriteBarrierCodeLocation((void*)JIT_PatchedCodeStart);
- ExecutableWriterHolder<BYTE> writeBarrierWriterHolder(writeBarrierCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart);
- JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap(), writeBarrierWriterHolder.GetRW() - writeBarrierCodeStart);
+ BYTE *writeBarrierCodeStartRW = writeBarrierCodeStart;
+ ExecutableWriterHolder<BYTE> writeBarrierWriterHolder;
+ if (IsWriteBarrierCopyEnabled())
+ {
+ writeBarrierWriterHolder = ExecutableWriterHolder<BYTE>(writeBarrierCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart);
+ writeBarrierCodeStartRW = writeBarrierWriterHolder.GetRW();
+ }
+ JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap(), writeBarrierCodeStartRW - writeBarrierCodeStart);
}
void InitJITHelpers1()
diff --git a/src/coreclr/vm/ceemain.cpp b/src/coreclr/vm/ceemain.cpp
index cdc5925234a..b60aac924d2 100644
--- a/src/coreclr/vm/ceemain.cpp
+++ b/src/coreclr/vm/ceemain.cpp
@@ -607,6 +607,11 @@ void EESocketCleanupHelper(bool isExecutingOnAltStack)
#endif // TARGET_UNIX
#endif // CROSSGEN_COMPILE
+void FatalErrorHandler(UINT errorCode, LPCWSTR pszMessage)
+{
+ EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(errorCode, pszMessage);
+}
+
void EEStartupHelper()
{
CONTRACTL
@@ -670,6 +675,8 @@ void EEStartupHelper()
// This needs to be done before the EE has started
InitializeStartupFlags();
+ IfFailGo(ExecutableAllocator::StaticInitialize(FatalErrorHandler));
+
ThreadpoolMgr::StaticInitialize();
MethodDescBackpatchInfoTracker::StaticInitialize();
@@ -824,7 +831,7 @@ void EEStartupHelper()
g_runtimeLoadedBaseAddress = (SIZE_T)pe.GetBase();
g_runtimeVirtualSize = (SIZE_T)pe.GetVirtualSize();
- InitCodeAllocHint(g_runtimeLoadedBaseAddress, g_runtimeVirtualSize, GetRandomInt(64));
+ ExecutableAllocator::InitCodeAllocHint(g_runtimeLoadedBaseAddress, g_runtimeVirtualSize, GetRandomInt(64));
}
#endif // !TARGET_UNIX
diff --git a/src/coreclr/vm/class.cpp b/src/coreclr/vm/class.cpp
index 02feec829a7..5c5004f5686 100644
--- a/src/coreclr/vm/class.cpp
+++ b/src/coreclr/vm/class.cpp
@@ -153,7 +153,9 @@ void EEClass::Destruct(MethodTable * pOwningMT)
if (pDelegateEEClass->m_pStaticCallStub)
{
- BOOL fStubDeleted = pDelegateEEClass->m_pStaticCallStub->DecRef();
+ ExecutableWriterHolder<Stub> stubWriterHolder(pDelegateEEClass->m_pStaticCallStub, sizeof(Stub));
+ BOOL fStubDeleted = stubWriterHolder.GetRW()->DecRef();
+
if (fStubDeleted)
{
DelegateInvokeStubManager::g_pManager->RemoveStub(pDelegateEEClass->m_pStaticCallStub);
@@ -167,7 +169,6 @@ void EEClass::Destruct(MethodTable * pOwningMT)
// it is owned by the m_pMulticastStubCache, not by the class
// - it is shared across classes. So we don't decrement
// its ref count here
- delete pDelegateEEClass->m_pUMThunkMarshInfo;
}
#ifdef FEATURE_COMINTEROP
diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp
index 37220786fed..78721292a3e 100644
--- a/src/coreclr/vm/codeman.cpp
+++ b/src/coreclr/vm/codeman.cpp
@@ -2139,8 +2139,7 @@ VOID EEJitManager::EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SI
return; // Unable to allocate the reserve - give up
}
- pNewReserve->m_ptr = ClrVirtualAllocWithinRange(loAddrCurrent, hiAddrCurrent,
- allocChunk, MEM_RESERVE, PAGE_NOACCESS);
+ pNewReserve->m_ptr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(allocChunk, loAddrCurrent, hiAddrCurrent);
if (pNewReserve->m_ptr != NULL)
break;
@@ -2231,8 +2230,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
if (!pInfo->getThrowOnOutOfMemoryWithinRange() && PEDecoder::GetForceRelocs())
RETURN NULL;
#endif
- pBaseAddr = ClrVirtualAllocWithinRange(loAddr, hiAddr,
- reserveSize, MEM_RESERVE, PAGE_NOACCESS);
+ pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(reserveSize, loAddr, hiAddr);
if (!pBaseAddr)
{
@@ -2251,7 +2249,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
}
else
{
- pBaseAddr = ClrVirtualAllocExecutable(reserveSize, MEM_RESERVE, PAGE_NOACCESS);
+ pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->Reserve(reserveSize);
if (!pBaseAddr)
ThrowOutOfMemory();
}
@@ -2686,15 +2684,14 @@ void EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, size_t reserveFo
*pAllocatedSize = sizeof(CodeHeader) + totalSize;
-#if defined(HOST_OSX) && defined(HOST_ARM64)
-#define FEATURE_WXORX
-#endif
-
-#ifdef FEATURE_WXORX
- pCodeHdrRW = (CodeHeader *)new BYTE[*pAllocatedSize];
-#else
- pCodeHdrRW = pCodeHdr;
-#endif
+ if (ExecutableAllocator::IsWXORXEnabled())
+ {
+ pCodeHdrRW = (CodeHeader *)new BYTE[*pAllocatedSize];
+ }
+ else
+ {
+ pCodeHdrRW = pCodeHdr;
+ }
#ifdef USE_INDIRECT_CODEHEADER
if (requestInfo.IsDynamicDomain())
@@ -3347,7 +3344,7 @@ void EEJitManager::Unload(LoaderAllocator *pAllocator)
}
}
- ResetCodeAllocHint();
+ ExecutableAllocator::ResetCodeAllocHint();
}
EEJitManager::DomainCodeHeapList::DomainCodeHeapList()
diff --git a/src/coreclr/vm/comcallablewrapper.cpp b/src/coreclr/vm/comcallablewrapper.cpp
index 8b95dac8cdd..499880dc16d 100644
--- a/src/coreclr/vm/comcallablewrapper.cpp
+++ b/src/coreclr/vm/comcallablewrapper.cpp
@@ -3183,12 +3183,11 @@ void ComMethodTable::Cleanup()
if (m_pDispatchInfo)
delete m_pDispatchInfo;
- if (m_pMDescr)
- DeleteExecutable(m_pMDescr);
if (m_pITypeInfo && !g_fProcessDetach)
SafeRelease(m_pITypeInfo);
- DeleteExecutable(this);
+ // The m_pMDescr and the current instance is allocated from the related LoaderAllocator
+ // so no cleanup is needed here.
}
@@ -3214,7 +3213,7 @@ void ComMethodTable::LayOutClassMethodTable()
SLOT *pComVtable;
unsigned cbPrevSlots = 0;
unsigned cbAlloc = 0;
- NewExecutableHolder<BYTE> pMDMemoryPtr = NULL;
+ AllocMemHolder<BYTE> pMDMemoryPtr;
BYTE* pMethodDescMemory = NULL;
size_t writeableOffset = 0;
unsigned cbNumParentVirtualMethods = 0;
@@ -3321,7 +3320,7 @@ void ComMethodTable::LayOutClassMethodTable()
cbAlloc = cbMethodDescs;
if (cbAlloc > 0)
{
- pMDMemoryPtr = (BYTE*) new (executable) BYTE[cbAlloc + sizeof(UINT_PTR)];
+ pMDMemoryPtr = m_pMT->GetLoaderAllocator()->GetStubHeap()->AllocMem(S_SIZE_T(cbAlloc + sizeof(UINT_PTR)));
pMethodDescMemory = pMDMemoryPtr;
methodDescMemoryWriteableHolder = ExecutableWriterHolder<BYTE>(pMethodDescMemory, cbAlloc + sizeof(UINT_PTR));
@@ -3703,7 +3702,6 @@ BOOL ComMethodTable::LayOutInterfaceMethodTable(MethodTable* pClsMT)
// Method descs are at the end of the vtable
// m_cbSlots interfaces methods + IUnk methods
pMethodDescMemory = (BYTE *)&pComVtable[m_cbSlots];
-
for (i = 0; i < cbSlots; i++)
{
ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
@@ -4495,13 +4493,12 @@ ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForClass(MethodTable
if (cbToAlloc.IsOverflow())
ThrowHR(COR_E_OVERFLOW);
- NewExecutableHolder<ComMethodTable> pComMT = (ComMethodTable*) new (executable) BYTE[cbToAlloc.Value()];
+ AllocMemHolder<ComMethodTable> pComMT(pClassMT->GetLoaderAllocator()->GetStubHeap()->AllocMem(S_SIZE_T(cbToAlloc.Value())));
_ASSERTE(!cbNewSlots.IsOverflow() && !cbTotalSlots.IsOverflow() && !cbVtable.IsOverflow());
ExecutableWriterHolder<ComMethodTable> comMTWriterHolder(pComMT, cbToAlloc.Value());
ComMethodTable* pComMTRW = comMTWriterHolder.GetRW();
-
// set up the header
pComMTRW->m_ptReserved = (SLOT)(size_t)0xDEADC0FF; // reserved
pComMTRW->m_pMT = pClassMT; // pointer to the class method table
@@ -4573,7 +4570,7 @@ ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForInterface(MethodT
if (cbToAlloc.IsOverflow())
ThrowHR(COR_E_OVERFLOW);
- NewExecutableHolder<ComMethodTable> pComMT = (ComMethodTable*) new (executable) BYTE[cbToAlloc.Value()];
+ AllocMemHolder<ComMethodTable> pComMT(pInterfaceMT->GetLoaderAllocator()->GetStubHeap()->AllocMem(S_SIZE_T(cbToAlloc.Value())));
_ASSERTE(!cbVtable.IsOverflow() && !cbMethDescs.IsOverflow());
@@ -4639,7 +4636,8 @@ ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForBasic(MethodTable
unsigned cbVtable = cbExtraSlots * sizeof(SLOT);
unsigned cbToAlloc = sizeof(ComMethodTable) + cbVtable;
- NewExecutableHolder<ComMethodTable> pComMT = (ComMethodTable*) new (executable) BYTE[cbToAlloc];
+ AllocMemHolder<ComMethodTable> pComMT(pMT->GetLoaderAllocator()->GetStubHeap()->AllocMem(S_SIZE_T(cbToAlloc)));
+
ExecutableWriterHolder<ComMethodTable> comMTWriterHolder(pComMT, cbToAlloc);
ComMethodTable* pComMTRW = comMTWriterHolder.GetRW();
diff --git a/src/coreclr/vm/comcallablewrapper.h b/src/coreclr/vm/comcallablewrapper.h
index 2581ddf832f..0f1e4b878e4 100644
--- a/src/coreclr/vm/comcallablewrapper.h
+++ b/src/coreclr/vm/comcallablewrapper.h
@@ -499,6 +499,7 @@ struct ComMethodTable
// Accessor for the IDispatch information.
DispatchInfo* GetDispatchInfo();
+#ifndef DACCESS_COMPILE
LONG AddRef()
{
LIMITED_METHOD_CONTRACT;
@@ -527,6 +528,7 @@ struct ComMethodTable
return cbRef;
}
+#endif // DACCESS_COMPILE
CorIfaceAttr GetInterfaceType()
{
@@ -746,6 +748,7 @@ struct ComMethodTable
}
+#ifndef DACCESS_COMPILE
inline REFIID GetIID()
{
// Cannot use a normal CONTRACT since the return type is ref type which
@@ -768,6 +771,7 @@ struct ComMethodTable
return m_IID;
}
+#endif // DACCESS_COMPILE
void CheckParentComVisibility(BOOL fForIDispatch)
{
diff --git a/src/coreclr/vm/comdelegate.cpp b/src/coreclr/vm/comdelegate.cpp
index b6c17260a13..1b61e16dec5 100644
--- a/src/coreclr/vm/comdelegate.cpp
+++ b/src/coreclr/vm/comdelegate.cpp
@@ -1253,7 +1253,7 @@ LPVOID COMDelegate::ConvertToCallback(OBJECTREF pDelegateObj)
{
GCX_PREEMP();
- pUMThunkMarshInfo = new UMThunkMarshInfo();
+ pUMThunkMarshInfo = (UMThunkMarshInfo*)(void*)pMT->GetLoaderAllocator()->GetStubHeap()->AllocMem(S_SIZE_T(sizeof(UMThunkMarshInfo)));
ExecutableWriterHolder<UMThunkMarshInfo> uMThunkMarshInfoWriterHolder(pUMThunkMarshInfo, sizeof(UMThunkMarshInfo));
uMThunkMarshInfoWriterHolder.GetRW()->LoadTimeInit(pInvokeMeth);
diff --git a/src/coreclr/vm/dllimportcallback.cpp b/src/coreclr/vm/dllimportcallback.cpp
index 4a88f81df52..4f3cf879d10 100644
--- a/src/coreclr/vm/dllimportcallback.cpp
+++ b/src/coreclr/vm/dllimportcallback.cpp
@@ -41,7 +41,7 @@ public:
{
WRAPPER_NO_CONTRACT;
- m_crst.Init(CrstLeafLock, CRST_UNSAFE_ANYMODE);
+ m_crst.Init(CrstUMEntryThunkFreeListLock, CRST_UNSAFE_ANYMODE);
}
UMEntryThunk *GetUMEntryThunk()
diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp
index 9dae86aca93..541d88dc168 100644
--- a/src/coreclr/vm/dynamicmethod.cpp
+++ b/src/coreclr/vm/dynamicmethod.cpp
@@ -403,8 +403,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo)
if (pInfo->m_loAddr != NULL || pInfo->m_hiAddr != NULL)
{
- m_pBaseAddr = ClrVirtualAllocWithinRange(pInfo->m_loAddr, pInfo->m_hiAddr,
- ReserveBlockSize, MEM_RESERVE, PAGE_NOACCESS);
+ m_pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(ReserveBlockSize, pInfo->m_loAddr, pInfo->m_hiAddr);
if (!m_pBaseAddr)
{
if (pInfo->getThrowOnOutOfMemoryWithinRange())
@@ -417,7 +416,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo)
// top up the ReserveBlockSize to suggested minimum
ReserveBlockSize = max(ReserveBlockSize, pInfo->getReserveSize());
- m_pBaseAddr = ClrVirtualAllocExecutable(ReserveBlockSize, MEM_RESERVE, PAGE_NOACCESS);
+ m_pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->Reserve(ReserveBlockSize);
if (!m_pBaseAddr)
ThrowOutOfMemory();
}
@@ -749,7 +748,7 @@ HostCodeHeap::TrackAllocation* HostCodeHeap::AllocMemory_NoThrow(size_t header,
if (m_pLastAvailableCommittedAddr + sizeToCommit <= m_pBaseAddr + m_TotalBytesAvailable)
{
- if (NULL == ClrVirtualAlloc(m_pLastAvailableCommittedAddr, sizeToCommit, MEM_COMMIT, PAGE_EXECUTE_READWRITE))
+ if (NULL == ExecutableAllocator::Instance()->Commit(m_pLastAvailableCommittedAddr, sizeToCommit, true /* isExecutable */))
{
LOG((LF_BCL, LL_ERROR, "CodeHeap [0x%p] - VirtualAlloc failed\n", this));
return NULL;
diff --git a/src/coreclr/vm/excep.cpp b/src/coreclr/vm/excep.cpp
index a1fdf255a5c..6bf5efcc802 100644
--- a/src/coreclr/vm/excep.cpp
+++ b/src/coreclr/vm/excep.cpp
@@ -6699,14 +6699,12 @@ AdjustContextForJITHelpers(
PCODE ip = GetIP(pContext);
-#ifdef FEATURE_WRITEBARRIER_COPY
if (IsIPInWriteBarrierCodeCopy(ip))
{
// Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
ip = AdjustWriteBarrierIP(ip);
SetIP(pContext, ip);
}
-#endif // FEATURE_WRITEBARRIER_COPY
#ifdef FEATURE_DATABREAKPOINT
diff --git a/src/coreclr/vm/exceptionhandling.cpp b/src/coreclr/vm/exceptionhandling.cpp
index 7fff234ca85..4af702fab14 100644
--- a/src/coreclr/vm/exceptionhandling.cpp
+++ b/src/coreclr/vm/exceptionhandling.cpp
@@ -4694,14 +4694,12 @@ VOID DECLSPEC_NORETURN UnwindManagedExceptionPass1(PAL_SEHException& ex, CONTEXT
break;
}
-#ifdef FEATURE_WRITEBARRIER_COPY
if (IsIPInWriteBarrierCodeCopy(controlPc))
{
// Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
controlPc = AdjustWriteBarrierIP(controlPc);
SetIP(frameContext, controlPc);
}
-#endif // FEATURE_WRITEBARRIER_COPY
UINT_PTR sp = GetSP(frameContext);
@@ -5174,13 +5172,11 @@ BOOL IsSafeToHandleHardwareException(PCONTEXT contextRecord, PEXCEPTION_RECORD e
{
PCODE controlPc = GetIP(contextRecord);
-#ifdef FEATURE_WRITEBARRIER_COPY
if (IsIPInWriteBarrierCodeCopy(controlPc))
{
// Pretend we were executing the barrier function at its original location
controlPc = AdjustWriteBarrierIP(controlPc);
}
-#endif // FEATURE_WRITEBARRIER_COPY
return g_fEEStarted && (
exceptionRecord->ExceptionCode == STATUS_BREAKPOINT ||
@@ -5259,14 +5255,12 @@ BOOL HandleHardwareException(PAL_SEHException* ex)
{
GCX_COOP(); // Must be cooperative to modify frame chain.
-#ifdef FEATURE_WRITEBARRIER_COPY
if (IsIPInWriteBarrierCodeCopy(controlPc))
{
// Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
controlPc = AdjustWriteBarrierIP(controlPc);
SetIP(ex->GetContextRecord(), controlPc);
}
-#endif // FEATURE_WRITEBARRIER_COPY
if (IsIPInMarkedJitHelper(controlPc))
{
diff --git a/src/coreclr/vm/gccover.cpp b/src/coreclr/vm/gccover.cpp
index be856dbe1a6..9ce0cc676f7 100644
--- a/src/coreclr/vm/gccover.cpp
+++ b/src/coreclr/vm/gccover.cpp
@@ -1258,9 +1258,9 @@ void RemoveGcCoverageInterrupt(TADDR instrPtr, BYTE * savedInstrPtr, GCCoverageI
{
ExecutableWriterHolder<void> instrPtrWriterHolder((void*)instrPtr, 4);
#ifdef TARGET_ARM
- if (GetARMInstructionLength(savedInstrPtr) == 2)
+ if (GetARMInstructionLength(savedInstrPtr) == 2)
*(WORD *)instrPtrWriterHolder.GetRW() = *(WORD *)savedInstrPtr;
- else
+ else
*(DWORD *)instrPtrWriterHolder.GetRW() = *(DWORD *)savedInstrPtr;
#elif defined(TARGET_ARM64)
*(DWORD *)instrPtrWriterHolder.GetRW() = *(DWORD *)savedInstrPtr;
diff --git a/src/coreclr/vm/i386/jithelp.S b/src/coreclr/vm/i386/jithelp.S
index facce7cacd3..dc56da1d177 100644
--- a/src/coreclr/vm/i386/jithelp.S
+++ b/src/coreclr/vm/i386/jithelp.S
@@ -377,10 +377,27 @@ LEAF_ENTRY JIT_WriteBarrierGroup, _TEXT
ret
LEAF_END JIT_WriteBarrierGroup, _TEXT
-#ifdef FEATURE_USE_ASM_GC_WRITE_BARRIERS
-// *******************************************************************************
-// Write barrier wrappers with fcall calling convention
-//
+ .data
+ .align 4
+ .global C_FUNC(JIT_WriteBarrierEAX_Loc)
+C_FUNC(JIT_WriteBarrierEAX_Loc):
+ .word 0
+ .text
+
+LEAF_ENTRY JIT_WriteBarrier_Callable, _TEXT
+ mov eax, edx
+ mov edx, ecx
+ push eax
+ call 1f
+1:
+ pop eax
+2:
+ add eax, offset _GLOBAL_OFFSET_TABLE_+1 // (2b - 1b)
+ mov eax, dword ptr [eax + C_FUNC(JIT_WriteBarrierEAX_Loc)@GOT]
+ xchg eax, dword ptr [esp]
+ ret
+LEAF_END JIT_WriteBarrier_Callable, _TEXT
+
.macro UniversalWriteBarrierHelper name
.align 4
@@ -392,6 +409,11 @@ LEAF_END JIT_\name, _TEXT
.endm
+#ifdef FEATURE_USE_ASM_GC_WRITE_BARRIERS
+// *******************************************************************************
+// Write barrier wrappers with fcall calling convention
+//
+
// Only define these if we're using the ASM GC write barriers; if this flag is not defined,
// we'll use C++ versions of these write barriers.
UniversalWriteBarrierHelper CheckedWriteBarrier
diff --git a/src/coreclr/vm/i386/jithelp.asm b/src/coreclr/vm/i386/jithelp.asm
index 3743ac3cbe0..3650b3f2afd 100644
--- a/src/coreclr/vm/i386/jithelp.asm
+++ b/src/coreclr/vm/i386/jithelp.asm
@@ -411,15 +411,13 @@ ENDM
;*******************************************************************************
; Write barrier wrappers with fcall calling convention
;
-UniversalWriteBarrierHelper MACRO name
+
+ .data
ALIGN 4
-PUBLIC @JIT_&name&@8
-@JIT_&name&@8 PROC
- mov eax,edx
- mov edx,ecx
- jmp _JIT_&name&EAX@0
-@JIT_&name&@8 ENDP
-ENDM
+ public _JIT_WriteBarrierEAX_Loc
+_JIT_WriteBarrierEAX_Loc dd 0
+
+ .code
; WriteBarrierStart and WriteBarrierEnd are used to determine bounds of
; WriteBarrier functions so can determine if got AV in them.
@@ -429,6 +427,25 @@ _JIT_WriteBarrierGroup@0 PROC
ret
_JIT_WriteBarrierGroup@0 ENDP
+ ALIGN 4
+PUBLIC @JIT_WriteBarrier_Callable@8
+@JIT_WriteBarrier_Callable@8 PROC
+ mov eax,edx
+ mov edx,ecx
+ jmp DWORD PTR [_JIT_WriteBarrierEAX_Loc]
+
+@JIT_WriteBarrier_Callable@8 ENDP
+
+UniversalWriteBarrierHelper MACRO name
+ ALIGN 4
+PUBLIC @JIT_&name&@8
+@JIT_&name&@8 PROC
+ mov eax,edx
+ mov edx,ecx
+ jmp _JIT_&name&EAX@0
+@JIT_&name&@8 ENDP
+ENDM
+
ifdef FEATURE_USE_ASM_GC_WRITE_BARRIERS
; Only define these if we're using the ASM GC write barriers; if this flag is not defined,
; we'll use C++ versions of these write barriers.
@@ -1233,6 +1250,8 @@ fremloopd:
; PatchedCodeStart and PatchedCodeEnd are used to determine bounds of patched code.
;
+ ALIGN 4
+
_JIT_PatchedCodeStart@0 proc public
ret
_JIT_PatchedCodeStart@0 endp
diff --git a/src/coreclr/vm/i386/jitinterfacex86.cpp b/src/coreclr/vm/i386/jitinterfacex86.cpp
index 0e366bdbd1a..0467f347aaa 100644
--- a/src/coreclr/vm/i386/jitinterfacex86.cpp
+++ b/src/coreclr/vm/i386/jitinterfacex86.cpp
@@ -1050,10 +1050,18 @@ void InitJITHelpers1()
{
BYTE * pfunc = (BYTE *) JIT_WriteBarrierReg_PreGrow;
- BYTE * pBuf = (BYTE *)c_rgWriteBarriers[iBarrier];
+ BYTE * pBuf = GetWriteBarrierCodeLocation((BYTE *)c_rgWriteBarriers[iBarrier]);
int reg = c_rgWriteBarrierRegs[iBarrier];
- memcpy(pBuf, pfunc, 34);
+ BYTE * pBufRW = pBuf;
+ ExecutableWriterHolder<BYTE> barrierWriterHolder;
+ if (IsWriteBarrierCopyEnabled())
+ {
+ barrierWriterHolder = ExecutableWriterHolder<BYTE>(pBuf, 34);
+ pBufRW = barrierWriterHolder.GetRW();
+ }
+
+ memcpy(pBufRW, pfunc, 34);
// assert the copied code ends in a ret to make sure we got the right length
_ASSERTE(pBuf[33] == 0xC3);
@@ -1069,24 +1077,24 @@ void InitJITHelpers1()
_ASSERTE(pBuf[0] == 0x89);
// Update the reg field (bits 3..5) of the ModR/M byte of this instruction
- pBuf[1] &= 0xc7;
- pBuf[1] |= reg << 3;
+ pBufRW[1] &= 0xc7;
+ pBufRW[1] |= reg << 3;
// Second instruction to patch is cmp reg, imm32 (low bound)
_ASSERTE(pBuf[2] == 0x81);
// Here the lowest three bits in ModR/M field are the register
- pBuf[3] &= 0xf8;
- pBuf[3] |= reg;
+ pBufRW[3] &= 0xf8;
+ pBufRW[3] |= reg;
#ifdef WRITE_BARRIER_CHECK
// Don't do the fancy optimization just jump to the old one
// Use the slow one from time to time in a debug build because
// there are some good asserts in the unoptimized one
if ((g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK) || DEBUG_RANDOM_BARRIER_CHECK) {
- pfunc = &pBuf[0];
+ pfunc = &pBufRW[0];
*pfunc++ = 0xE9; // JMP c_rgDebugWriteBarriers[iBarrier]
- *((DWORD*) pfunc) = (BYTE*) c_rgDebugWriteBarriers[iBarrier] - (pfunc + sizeof(DWORD));
+ *((DWORD*) pfunc) = (BYTE*) c_rgDebugWriteBarriers[iBarrier] - (&pBuf[1] + sizeof(DWORD));
}
#endif // WRITE_BARRIER_CHECK
}
@@ -1132,7 +1140,7 @@ void ValidateWriteBarrierHelpers()
#endif // WRITE_BARRIER_CHECK
// first validate the PreGrow helper
- BYTE* pWriteBarrierFunc = reinterpret_cast<BYTE*>(JIT_WriteBarrierEAX);
+ BYTE* pWriteBarrierFunc = GetWriteBarrierCodeLocation(reinterpret_cast<BYTE*>(JIT_WriteBarrierEAX));
// ephemeral region
DWORD* pLocation = reinterpret_cast<DWORD*>(&pWriteBarrierFunc[AnyGrow_EphemeralLowerBound]);
@@ -1170,7 +1178,7 @@ void ValidateWriteBarrierHelpers()
#endif //CODECOVERAGE
/*********************************************************************/
-#define WriteBarrierIsPreGrow() (((BYTE *)JIT_WriteBarrierEAX)[10] == 0xc1)
+#define WriteBarrierIsPreGrow() ((GetWriteBarrierCodeLocation((BYTE *)JIT_WriteBarrierEAX))[10] == 0xc1)
/*********************************************************************/
@@ -1188,20 +1196,28 @@ int StompWriteBarrierEphemeral(bool /* isRuntimeSuspended */)
#ifdef WRITE_BARRIER_CHECK
// Don't do the fancy optimization if we are checking write barrier
- if (((BYTE *)JIT_WriteBarrierEAX)[0] == 0xE9) // we are using slow write barrier
+ if ((GetWriteBarrierCodeLocation((BYTE *)JIT_WriteBarrierEAX))[0] == 0xE9) // we are using slow write barrier
return stompWBCompleteActions;
#endif // WRITE_BARRIER_CHECK
// Update the lower bound.
for (int iBarrier = 0; iBarrier < NUM_WRITE_BARRIERS; iBarrier++)
{
- BYTE * pBuf = (BYTE *)c_rgWriteBarriers[iBarrier];
+ BYTE * pBuf = GetWriteBarrierCodeLocation((BYTE *)c_rgWriteBarriers[iBarrier]);
+
+ BYTE * pBufRW = pBuf;
+ ExecutableWriterHolder<BYTE> barrierWriterHolder;
+ if (IsWriteBarrierCopyEnabled())
+ {
+ barrierWriterHolder = ExecutableWriterHolder<BYTE>(pBuf, 42);
+ pBufRW = barrierWriterHolder.GetRW();
+ }
// assert there is in fact a cmp r/m32, imm32 there
_ASSERTE(pBuf[2] == 0x81);
// Update the immediate which is the lower bound of the ephemeral generation
- size_t *pfunc = (size_t *) &pBuf[AnyGrow_EphemeralLowerBound];
+ size_t *pfunc = (size_t *) &pBufRW[AnyGrow_EphemeralLowerBound];
//avoid trivial self modifying code
if (*pfunc != (size_t) g_ephemeral_low)
{
@@ -1214,7 +1230,7 @@ int StompWriteBarrierEphemeral(bool /* isRuntimeSuspended */)
_ASSERTE(pBuf[10] == 0x81);
// Update the upper bound if we are using the PostGrow thunk.
- pfunc = (size_t *) &pBuf[PostGrow_EphemeralUpperBound];
+ pfunc = (size_t *) &pBufRW[PostGrow_EphemeralUpperBound];
//avoid trivial self modifying code
if (*pfunc != (size_t) g_ephemeral_high)
{
@@ -1244,7 +1260,7 @@ int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
#ifdef WRITE_BARRIER_CHECK
// Don't do the fancy optimization if we are checking write barrier
- if (((BYTE *)JIT_WriteBarrierEAX)[0] == 0xE9) // we are using slow write barrier
+ if ((GetWriteBarrierCodeLocation((BYTE *)JIT_WriteBarrierEAX))[0] == 0xE9) // we are using slow write barrier
return stompWBCompleteActions;
#endif // WRITE_BARRIER_CHECK
@@ -1253,12 +1269,20 @@ int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
for (int iBarrier = 0; iBarrier < NUM_WRITE_BARRIERS; iBarrier++)
{
- BYTE * pBuf = (BYTE *)c_rgWriteBarriers[iBarrier];
+ BYTE * pBuf = GetWriteBarrierCodeLocation((BYTE *)c_rgWriteBarriers[iBarrier]);
int reg = c_rgWriteBarrierRegs[iBarrier];
size_t *pfunc;
- // Check if we are still using the pre-grow version of the write barrier.
+ BYTE * pBufRW = pBuf;
+ ExecutableWriterHolder<BYTE> barrierWriterHolder;
+ if (IsWriteBarrierCopyEnabled())
+ {
+ barrierWriterHolder = ExecutableWriterHolder<BYTE>(pBuf, 42);
+ pBufRW = barrierWriterHolder.GetRW();
+ }
+
+ // Check if we are still using the pre-grow version of the write barrier.
if (bWriteBarrierIsPreGrow)
{
// Check if we need to use the upper bounds checking barrier stub.
@@ -1271,7 +1295,7 @@ int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
}
pfunc = (size_t *) JIT_WriteBarrierReg_PostGrow;
- memcpy(pBuf, pfunc, 42);
+ memcpy(pBufRW, pfunc, 42);
// assert the copied code ends in a ret to make sure we got the right length
_ASSERTE(pBuf[41] == 0xC3);
@@ -1287,35 +1311,35 @@ int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
_ASSERTE(pBuf[0] == 0x89);
// Update the reg field (bits 3..5) of the ModR/M byte of this instruction
- pBuf[1] &= 0xc7;
- pBuf[1] |= reg << 3;
+ pBufRW[1] &= 0xc7;
+ pBufRW[1] |= reg << 3;
// Second instruction to patch is cmp reg, imm32 (low bound)
_ASSERTE(pBuf[2] == 0x81);
// Here the lowest three bits in ModR/M field are the register
- pBuf[3] &= 0xf8;
- pBuf[3] |= reg;
+ pBufRW[3] &= 0xf8;
+ pBufRW[3] |= reg;
// Third instruction to patch is another cmp reg, imm32 (high bound)
_ASSERTE(pBuf[10] == 0x81);
// Here the lowest three bits in ModR/M field are the register
- pBuf[11] &= 0xf8;
- pBuf[11] |= reg;
+ pBufRW[11] &= 0xf8;
+ pBufRW[11] |= reg;
bStompWriteBarrierEphemeral = true;
// What we're trying to update is the offset field of a
// cmp offset[edx], 0ffh instruction
_ASSERTE(pBuf[22] == 0x80);
- pfunc = (size_t *) &pBuf[PostGrow_CardTableFirstLocation];
+ pfunc = (size_t *) &pBufRW[PostGrow_CardTableFirstLocation];
*pfunc = (size_t) g_card_table;
// What we're trying to update is the offset field of a
// mov offset[edx], 0ffh instruction
_ASSERTE(pBuf[34] == 0xC6);
- pfunc = (size_t *) &pBuf[PostGrow_CardTableSecondLocation];
+ pfunc = (size_t *) &pBufRW[PostGrow_CardTableSecondLocation];
}
else
@@ -1324,14 +1348,14 @@ int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
// cmp offset[edx], 0ffh instruction
_ASSERTE(pBuf[14] == 0x80);
- pfunc = (size_t *) &pBuf[PreGrow_CardTableFirstLocation];
+ pfunc = (size_t *) &pBufRW[PreGrow_CardTableFirstLocation];
*pfunc = (size_t) g_card_table;
// What we're trying to update is the offset field of a
// mov offset[edx], 0ffh instruction
_ASSERTE(pBuf[26] == 0xC6);
- pfunc = (size_t *) &pBuf[PreGrow_CardTableSecondLocation];
+ pfunc = (size_t *) &pBufRW[PreGrow_CardTableSecondLocation];
}
}
else
@@ -1340,13 +1364,13 @@ int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
// cmp offset[edx], 0ffh instruction
_ASSERTE(pBuf[22] == 0x80);
- pfunc = (size_t *) &pBuf[PostGrow_CardTableFirstLocation];
+ pfunc = (size_t *) &pBufRW[PostGrow_CardTableFirstLocation];
*pfunc = (size_t) g_card_table;
// What we're trying to update is the offset field of a
// mov offset[edx], 0ffh instruction
_ASSERTE(pBuf[34] == 0xC6);
- pfunc = (size_t *) &pBuf[PostGrow_CardTableSecondLocation];
+ pfunc = (size_t *) &pBufRW[PostGrow_CardTableSecondLocation];
}
// Stick in the adjustment value.
diff --git a/src/coreclr/vm/i386/stublinkerx86.cpp b/src/coreclr/vm/i386/stublinkerx86.cpp
index 61c5dfd90cb..564363053fc 100644
--- a/src/coreclr/vm/i386/stublinkerx86.cpp
+++ b/src/coreclr/vm/i386/stublinkerx86.cpp
@@ -4829,7 +4829,7 @@ COPY_VALUE_CLASS:
X86EmitOp(0x8d, kEDX, elemBaseReg, elemOfs, elemScaledReg, elemScale);
// call JIT_Writeable_Thunks_Buf.WriteBarrierReg[0] (== EAX)
- X86EmitCall(NewExternalCodeLabel((LPVOID) &JIT_WriteBarrierEAX), 0);
+ X86EmitCall(NewExternalCodeLabel((LPVOID) GetWriteBarrierCodeLocation(&JIT_WriteBarrierEAX)), 0);
}
else
#else // TARGET_AMD64
diff --git a/src/coreclr/vm/i386/stublinkerx86.h b/src/coreclr/vm/i386/stublinkerx86.h
index af5244d0771..564c999975e 100644
--- a/src/coreclr/vm/i386/stublinkerx86.h
+++ b/src/coreclr/vm/i386/stublinkerx86.h
@@ -536,7 +536,7 @@ struct StubPrecode {
return rel32Decode(PTR_HOST_MEMBER_TADDR(StubPrecode, this, m_rel32));
}
-
+#ifndef DACCESS_COMPILE
void ResetTargetInterlocked()
{
CONTRACTL
@@ -562,6 +562,7 @@ struct StubPrecode {
ExecutableWriterHolder<void> rel32Holder(&m_rel32, 4);
return rel32SetInterlocked(&m_rel32, rel32Holder.GetRW(), target, expected, (MethodDesc*)GetMethodDesc());
}
+#endif // !DACCESS_COMPILE
};
IN_TARGET_64BIT(static_assert_no_msg(offsetof(StubPrecode, m_movR10) == OFFSETOF_PRECODE_TYPE);)
IN_TARGET_64BIT(static_assert_no_msg(offsetof(StubPrecode, m_type) == OFFSETOF_PRECODE_TYPE_MOV_R10);)
@@ -646,6 +647,13 @@ struct FixupPrecode {
return dac_cast<TADDR>(this) + (m_PrecodeChunkIndex + 1) * sizeof(FixupPrecode);
}
+ size_t GetSizeRW()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return GetBase() + sizeof(void*) - dac_cast<TADDR>(this);
+ }
+
TADDR GetMethodDesc();
#else // HAS_FIXUP_PRECODE_CHUNKS
TADDR GetMethodDesc()
diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp
index a1e4d93d881..882e2c29cef 100644
--- a/src/coreclr/vm/jitinterface.cpp
+++ b/src/coreclr/vm/jitinterface.cpp
@@ -11875,7 +11875,7 @@ WORD CEEJitInfo::getRelocTypeHint(void * target)
if (m_fAllowRel32)
{
// The JIT calls this method for data addresses only. It always uses REL32s for direct code targets.
- if (IsPreferredExecutableRange(target))
+ if (ExecutableAllocator::IsPreferredExecutableRange(target))
return IMAGE_REL_BASED_REL32;
}
#endif // TARGET_AMD64
diff --git a/src/coreclr/vm/jitinterface.h b/src/coreclr/vm/jitinterface.h
index ca9d03c2141..e071d0717d1 100644
--- a/src/coreclr/vm/jitinterface.h
+++ b/src/coreclr/vm/jitinterface.h
@@ -238,15 +238,10 @@ extern "C" FCDECL2(Object*, ChkCastAny_NoCacheLookup, CORINFO_CLASS_HANDLE type,
extern "C" FCDECL2(Object*, IsInstanceOfAny_NoCacheLookup, CORINFO_CLASS_HANDLE type, Object* obj);
extern "C" FCDECL2(LPVOID, Unbox_Helper, CORINFO_CLASS_HANDLE type, Object* obj);
-#if defined(TARGET_ARM64) || defined(FEATURE_WRITEBARRIER_COPY)
// ARM64 JIT_WriteBarrier uses speciall ABI and thus is not callable directly
// Copied write barriers must be called at a different location
extern "C" FCDECL2(VOID, JIT_WriteBarrier_Callable, Object **dst, Object *ref);
#define WriteBarrier_Helper JIT_WriteBarrier_Callable
-#else
-// in other cases the regular JIT helper is callable.
-#define WriteBarrier_Helper JIT_WriteBarrier
-#endif
extern "C" FCDECL1(void, JIT_InternalThrow, unsigned exceptNum);
extern "C" FCDECL1(void*, JIT_InternalThrowFromHelper, unsigned exceptNum);
@@ -344,28 +339,25 @@ EXTERN_C FCDECL2_VV(UINT64, JIT_LRsz, UINT64 num, int shift);
#ifdef TARGET_X86
+#define ENUM_X86_WRITE_BARRIER_REGISTERS() \
+ X86_WRITE_BARRIER_REGISTER(EAX) \
+ X86_WRITE_BARRIER_REGISTER(ECX) \
+ X86_WRITE_BARRIER_REGISTER(EBX) \
+ X86_WRITE_BARRIER_REGISTER(ESI) \
+ X86_WRITE_BARRIER_REGISTER(EDI) \
+ X86_WRITE_BARRIER_REGISTER(EBP)
+
extern "C"
{
- void STDCALL JIT_CheckedWriteBarrierEAX(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_CheckedWriteBarrierEBX(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_CheckedWriteBarrierECX(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_CheckedWriteBarrierESI(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_CheckedWriteBarrierEDI(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_CheckedWriteBarrierEBP(); // JIThelp.asm/JIThelp.s
-
- void STDCALL JIT_DebugWriteBarrierEAX(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_DebugWriteBarrierEBX(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_DebugWriteBarrierECX(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_DebugWriteBarrierESI(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_DebugWriteBarrierEDI(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_DebugWriteBarrierEBP(); // JIThelp.asm/JIThelp.s
-
- void STDCALL JIT_WriteBarrierEAX(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_WriteBarrierEBX(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_WriteBarrierECX(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_WriteBarrierESI(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_WriteBarrierEDI(); // JIThelp.asm/JIThelp.s
- void STDCALL JIT_WriteBarrierEBP(); // JIThelp.asm/JIThelp.s
+
+// JIThelp.asm/JIThelp.s
+#define X86_WRITE_BARRIER_REGISTER(reg) \
+ void STDCALL JIT_CheckedWriteBarrier##reg(); \
+ void STDCALL JIT_DebugWriteBarrier##reg(); \
+ void STDCALL JIT_WriteBarrier##reg();
+
+ ENUM_X86_WRITE_BARRIER_REGISTERS()
+#undef X86_WRITE_BARRIER_REGISTER
void STDCALL JIT_WriteBarrierGroup();
void STDCALL JIT_WriteBarrierGroup_End();
diff --git a/src/coreclr/vm/loaderallocator.cpp b/src/coreclr/vm/loaderallocator.cpp
index 4f222be4a2c..0a77e4445f0 100644
--- a/src/coreclr/vm/loaderallocator.cpp
+++ b/src/coreclr/vm/loaderallocator.cpp
@@ -1137,7 +1137,7 @@ void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory)
_ASSERTE(dwTotalReserveMemSize <= VIRTUAL_ALLOC_RESERVE_GRANULARITY);
#endif
- BYTE * initReservedMem = ClrVirtualAllocExecutable(dwTotalReserveMemSize, MEM_RESERVE, PAGE_NOACCESS);
+ BYTE * initReservedMem = (BYTE*)ExecutableAllocator::Instance()->Reserve(dwTotalReserveMemSize);
m_InitialReservedMemForLoaderHeaps = initReservedMem;
@@ -1672,18 +1672,25 @@ void AssemblyLoaderAllocator::SetCollectible()
{
CONTRACTL
{
- THROWS;
+ NOTHROW;
}
CONTRACTL_END;
m_IsCollectible = true;
-#ifndef DACCESS_COMPILE
- m_pShuffleThunkCache = new ShuffleThunkCache(m_pStubHeap);
-#endif
}
#ifndef DACCESS_COMPILE
+void AssemblyLoaderAllocator::Init(AppDomain* pAppDomain)
+{
+ m_Id.Init();
+ LoaderAllocator::Init((BaseDomain *)pAppDomain);
+ if (IsCollectible())
+ {
+ m_pShuffleThunkCache = new ShuffleThunkCache(m_pStubHeap);
+ }
+}
+
#ifndef CROSSGEN_COMPILE
AssemblyLoaderAllocator::~AssemblyLoaderAllocator()
diff --git a/src/coreclr/vm/loaderallocator.inl b/src/coreclr/vm/loaderallocator.inl
index a826675ccc9..993732d4010 100644
--- a/src/coreclr/vm/loaderallocator.inl
+++ b/src/coreclr/vm/loaderallocator.inl
@@ -21,12 +21,6 @@ inline void GlobalLoaderAllocator::Init(BaseDomain *pDomain)
LoaderAllocator::Init(pDomain, m_ExecutableHeapInstance);
}
-inline void AssemblyLoaderAllocator::Init(AppDomain* pAppDomain)
-{
- m_Id.Init();
- LoaderAllocator::Init((BaseDomain *)pAppDomain);
-}
-
inline BOOL LoaderAllocatorID::Equals(LoaderAllocatorID *pId)
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/coreclr/vm/method.cpp b/src/coreclr/vm/method.cpp
index bd3984d8697..db308ab208a 100644
--- a/src/coreclr/vm/method.cpp
+++ b/src/coreclr/vm/method.cpp
@@ -4188,46 +4188,6 @@ c_CentralJumpCode = {
};
#include <poppack.h>
-#elif defined(TARGET_AMD64)
-
-#include <pshpack1.h>
-static const struct CentralJumpCode {
- BYTE m_movzxRAX[4];
- BYTE m_shlEAX[4];
- BYTE m_movRAX[2];
- MethodDesc* m_pBaseMD;
- BYTE m_addR10RAX[3];
- BYTE m_jmp[1];
- INT32 m_rel32;
-
- inline void Setup(CentralJumpCode* pCodeRX, MethodDesc* pMD, PCODE target, LoaderAllocator *pLoaderAllocator) {
- WRAPPER_NO_CONTRACT;
- m_pBaseMD = pMD;
- m_rel32 = rel32UsingJumpStub(&pCodeRX->m_rel32, target, pMD, pLoaderAllocator);
- }
-
- inline BOOL CheckTarget(TADDR target) {
- WRAPPER_NO_CONTRACT;
- TADDR addr = rel32Decode(PTR_HOST_MEMBER_TADDR(CentralJumpCode, this, m_rel32));
- if (*PTR_BYTE(addr) == 0x48 &&
- *PTR_BYTE(addr+1) == 0xB8 &&
- *PTR_BYTE(addr+10) == 0xFF &&
- *PTR_BYTE(addr+11) == 0xE0)
- {
- addr = *PTR_TADDR(addr+2);
- }
- return (addr == target);
- }
-}
-c_CentralJumpCode = {
- { 0x48, 0x0F, 0xB6, 0xC0 }, // movzx rax,al
- { 0x48, 0xC1, 0xE0, MethodDesc::ALIGNMENT_SHIFT }, // shl rax, MethodDesc::ALIGNMENT_SHIFT
- { 0x49, 0xBA }, NULL, // mov r10, pBaseMD
- { 0x4C, 0x03, 0xD0 }, // add r10,rax
- { 0xE9 }, 0 // jmp PreStub
-};
-#include <poppack.h>
-
#elif defined(TARGET_ARM)
#include <pshpack1.h>
diff --git a/src/coreclr/vm/precode.cpp b/src/coreclr/vm/precode.cpp
index 80731c191e7..0bd2bd657f9 100644
--- a/src/coreclr/vm/precode.cpp
+++ b/src/coreclr/vm/precode.cpp
@@ -480,7 +480,9 @@ void Precode::Reset()
#ifdef HAS_FIXUP_PRECODE_CHUNKS
if (t == PRECODE_FIXUP)
{
- size = sizeof(FixupPrecode) + sizeof(PTR_MethodDesc);
+ // The writeable size the Init method accesses is dynamic depending on
+ // the FixupPrecode members.
+ size = ((FixupPrecode*)this)->GetSizeRW();
}
else
#endif
diff --git a/src/coreclr/vm/stackwalk.cpp b/src/coreclr/vm/stackwalk.cpp
index 0971334af4d..e61802b9849 100644
--- a/src/coreclr/vm/stackwalk.cpp
+++ b/src/coreclr/vm/stackwalk.cpp
@@ -713,14 +713,12 @@ UINT_PTR Thread::VirtualUnwindToFirstManagedCallFrame(T_CONTEXT* pContext)
// get our caller's PSP, or our caller's caller's SP.
while (!ExecutionManager::IsManagedCode(uControlPc))
{
-#ifdef FEATURE_WRITEBARRIER_COPY
if (IsIPInWriteBarrierCodeCopy(uControlPc))
{
// Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
uControlPc = AdjustWriteBarrierIP(uControlPc);
SetIP(pContext, uControlPc);
}
-#endif // FEATURE_WRITEBARRIER_COPY
#ifndef TARGET_UNIX
uControlPc = VirtualUnwindCallFrame(pContext);
diff --git a/src/coreclr/vm/stublink.cpp b/src/coreclr/vm/stublink.cpp
index 04a33e39826..304cb4fb35b 100644
--- a/src/coreclr/vm/stublink.cpp
+++ b/src/coreclr/vm/stublink.cpp
@@ -846,7 +846,7 @@ Stub *StubLinker::Link(LoaderHeap *pHeap, DWORD flags)
);
ASSERT(pStub != NULL);
- bool fSuccess = EmitStub(pStub, globalsize, pHeap);
+ bool fSuccess = EmitStub(pStub, globalsize, size, pHeap);
#ifdef STUBLINKER_GENERATES_UNWIND_INFO
if (fSuccess)
@@ -1007,13 +1007,13 @@ int StubLinker::CalculateSize(int* pGlobalSize)
return globalsize + datasize;
}
-bool StubLinker::EmitStub(Stub* pStub, int globalsize, LoaderHeap* pHeap)
+bool StubLinker::EmitStub(Stub* pStub, int globalsize, int totalSize, LoaderHeap* pHeap)
{
STANDARD_VM_CONTRACT;
BYTE *pCode = (BYTE*)(pStub->GetBlob());
- ExecutableWriterHolder<Stub> stubWriterHolder(pStub, sizeof(Stub));
+ ExecutableWriterHolder<Stub> stubWriterHolder(pStub, sizeof(Stub) + totalSize);
Stub *pStubRW = stubWriterHolder.GetRW();
BYTE *pCodeRW = (BYTE*)(pStubRW->GetBlob());
@@ -2013,11 +2013,7 @@ VOID Stub::DeleteStub()
FillMemory(this+1, m_numCodeBytes, 0xcc);
#endif
-#ifndef TARGET_UNIX
- DeleteExecutable((BYTE*)GetAllocationBase());
-#else
delete [] (BYTE*)GetAllocationBase();
-#endif
}
}
@@ -2124,11 +2120,7 @@ Stub* Stub::NewStub(PTR_VOID pCode, DWORD flags)
BYTE *pBlock;
if (pHeap == NULL)
{
-#ifndef TARGET_UNIX
- pBlock = new (executable) BYTE[totalSize];
-#else
pBlock = new BYTE[totalSize];
-#endif
}
else
{
diff --git a/src/coreclr/vm/stublink.h b/src/coreclr/vm/stublink.h
index 94326f9962e..9613fd48f68 100644
--- a/src/coreclr/vm/stublink.h
+++ b/src/coreclr/vm/stublink.h
@@ -395,7 +395,7 @@ private:
// Writes out the code element into memory following the
// stub object.
- bool EmitStub(Stub* pStub, int globalsize, LoaderHeap* pHeap);
+ bool EmitStub(Stub* pStub, int globalsize, int totalSize, LoaderHeap* pHeap);
CodeRun *GetLastCodeRunIfAny();
diff --git a/src/coreclr/vm/threads.cpp b/src/coreclr/vm/threads.cpp
index fa93110399d..2c55f8770b0 100644
--- a/src/coreclr/vm/threads.cpp
+++ b/src/coreclr/vm/threads.cpp
@@ -1078,18 +1078,30 @@ DWORD_PTR Thread::OBJREF_HASH = OBJREF_TABSIZE;
extern "C" void STDCALL JIT_PatchedCodeStart();
extern "C" void STDCALL JIT_PatchedCodeLast();
-#ifdef FEATURE_WRITEBARRIER_COPY
-
static void* s_barrierCopy = NULL;
BYTE* GetWriteBarrierCodeLocation(VOID* barrier)
{
- return (BYTE*)s_barrierCopy + ((BYTE*)barrier - (BYTE*)JIT_PatchedCodeStart);
+ if (IsWriteBarrierCopyEnabled())
+ {
+ return (BYTE*)PINSTRToPCODE((TADDR)s_barrierCopy + ((TADDR)barrier - (TADDR)JIT_PatchedCodeStart));
+ }
+ else
+ {
+ return (BYTE*)barrier;
+ }
}
BOOL IsIPInWriteBarrierCodeCopy(PCODE controlPc)
{
- return (s_barrierCopy <= (void*)controlPc && (void*)controlPc < ((BYTE*)s_barrierCopy + ((BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart)));
+ if (IsWriteBarrierCopyEnabled())
+ {
+ return (s_barrierCopy <= (void*)controlPc && (void*)controlPc < ((BYTE*)s_barrierCopy + ((BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart)));
+ }
+ else
+ {
+ return FALSE;
+ }
}
PCODE AdjustWriteBarrierIP(PCODE controlPc)
@@ -1100,14 +1112,21 @@ PCODE AdjustWriteBarrierIP(PCODE controlPc)
return (PCODE)JIT_PatchedCodeStart + (controlPc - (PCODE)s_barrierCopy);
}
+#ifdef TARGET_X86
+extern "C" void *JIT_WriteBarrierEAX_Loc;
+#else
extern "C" void *JIT_WriteBarrier_Loc;
+#endif
+
#ifdef TARGET_ARM64
extern "C" void (*JIT_WriteBarrier_Table)();
extern "C" void *JIT_WriteBarrier_Loc = 0;
extern "C" void *JIT_WriteBarrier_Table_Loc = 0;
#endif // TARGET_ARM64
-#endif // FEATURE_WRITEBARRIER_COPY
+#ifdef TARGET_ARM
+extern "C" void *JIT_WriteBarrier_Loc = 0;
+#endif // TARGET_ARM
#ifndef TARGET_UNIX
// g_TlsIndex is only used by the DAC. Disable optimizations around it to prevent it from getting optimized out.
@@ -1138,50 +1157,80 @@ void InitThreadManager()
_ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart > (ptrdiff_t)0);
_ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize());
-#ifdef FEATURE_WRITEBARRIER_COPY
- s_barrierCopy = ClrVirtualAlloc(NULL, g_SystemInfo.dwAllocationGranularity, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
- if (s_barrierCopy == NULL)
+ if (IsWriteBarrierCopyEnabled())
{
- _ASSERTE(!"ClrVirtualAlloc of GC barrier code page failed");
- COMPlusThrowWin32();
- }
+ s_barrierCopy = ExecutableAllocator::Instance()->Reserve(g_SystemInfo.dwAllocationGranularity);
+ ExecutableAllocator::Instance()->Commit(s_barrierCopy, g_SystemInfo.dwAllocationGranularity, true);
+ if (s_barrierCopy == NULL)
+ {
+ _ASSERTE(!"Allocation of GC barrier code page failed");
+ COMPlusThrowWin32();
+ }
- {
- size_t writeBarrierSize = (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart;
- ExecutableWriterHolder<void> barrierWriterHolder(s_barrierCopy, writeBarrierSize);
- memcpy(barrierWriterHolder.GetRW(), (BYTE*)JIT_PatchedCodeStart, writeBarrierSize);
- }
+ {
+ size_t writeBarrierSize = (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart;
+ ExecutableWriterHolder<void> barrierWriterHolder(s_barrierCopy, writeBarrierSize);
+ memcpy(barrierWriterHolder.GetRW(), (BYTE*)JIT_PatchedCodeStart, writeBarrierSize);
+ }
- // Store the JIT_WriteBarrier copy location to a global variable so that helpers
- // can jump to it.
- JIT_WriteBarrier_Loc = GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier);
+ // Store the JIT_WriteBarrier copy location to a global variable so that helpers
+ // can jump to it.
+#ifdef TARGET_X86
+ JIT_WriteBarrierEAX_Loc = GetWriteBarrierCodeLocation((void*)JIT_WriteBarrierEAX);
- SetJitHelperFunction(CORINFO_HELP_ASSIGN_REF, GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier));
+#define X86_WRITE_BARRIER_REGISTER(reg) \
+ SetJitHelperFunction(CORINFO_HELP_ASSIGN_REF_##reg, GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier##reg)); \
+ ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier##reg), W("@WriteBarrier" #reg));
-#ifdef TARGET_ARM64
- // Store the JIT_WriteBarrier_Table copy location to a global variable so that it can be updated.
- JIT_WriteBarrier_Table_Loc = GetWriteBarrierCodeLocation((void*)&JIT_WriteBarrier_Table);
+ ENUM_X86_WRITE_BARRIER_REGISTERS()
- SetJitHelperFunction(CORINFO_HELP_CHECKED_ASSIGN_REF, GetWriteBarrierCodeLocation((void*)JIT_CheckedWriteBarrier));
- SetJitHelperFunction(CORINFO_HELP_ASSIGN_BYREF, GetWriteBarrierCodeLocation((void*)JIT_ByRefWriteBarrier));
-#endif // TARGET_ARM64
+#undef X86_WRITE_BARRIER_REGISTER
-#else // FEATURE_WRITEBARRIER_COPY
+#else // TARGET_X86
+ JIT_WriteBarrier_Loc = GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier);
+#endif // TARGET_X86
+ SetJitHelperFunction(CORINFO_HELP_ASSIGN_REF, GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier));
+ ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier), W("@WriteBarrier"));
- // I am using virtual protect to cover the entire range that this code falls in.
- //
+#ifdef TARGET_ARM64
+ // Store the JIT_WriteBarrier_Table copy location to a global variable so that it can be updated.
+ JIT_WriteBarrier_Table_Loc = GetWriteBarrierCodeLocation((void*)&JIT_WriteBarrier_Table);
+#endif // TARGET_ARM64
- // We could reset it to non-writeable inbetween GCs and such, but then we'd have to keep on re-writing back and forth,
- // so instead we'll leave it writable from here forward.
+#if defined(TARGET_ARM64) || defined(TARGET_ARM)
+ SetJitHelperFunction(CORINFO_HELP_CHECKED_ASSIGN_REF, GetWriteBarrierCodeLocation((void*)JIT_CheckedWriteBarrier));
+ ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_CheckedWriteBarrier), W("@CheckedWriteBarrier"));
+ SetJitHelperFunction(CORINFO_HELP_ASSIGN_BYREF, GetWriteBarrierCodeLocation((void*)JIT_ByRefWriteBarrier));
+ ETW::MethodLog::StubInitialized((ULONGLONG)GetWriteBarrierCodeLocation((void*)JIT_ByRefWriteBarrier), W("@ByRefWriteBarrier"));
+#endif // TARGET_ARM64 || TARGET_ARM
- DWORD oldProt;
- if (!ClrVirtualProtect((void *)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart,
- PAGE_EXECUTE_READWRITE, &oldProt))
+ }
+ else
{
- _ASSERTE(!"ClrVirtualProtect of code page failed");
- COMPlusThrowWin32();
+ // I am using virtual protect to cover the entire range that this code falls in.
+ //
+
+ // We could reset it to non-writeable inbetween GCs and such, but then we'd have to keep on re-writing back and forth,
+ // so instead we'll leave it writable from here forward.
+
+ DWORD oldProt;
+ if (!ClrVirtualProtect((void *)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart,
+ PAGE_EXECUTE_READWRITE, &oldProt))
+ {
+ _ASSERTE(!"ClrVirtualProtect of code page failed");
+ COMPlusThrowWin32();
+ }
+
+#ifdef TARGET_X86
+ JIT_WriteBarrierEAX_Loc = (void*)JIT_WriteBarrierEAX;
+#else
+ JIT_WriteBarrier_Loc = (void*)JIT_WriteBarrier;
+#endif
+#ifdef TARGET_ARM64
+ // Store the JIT_WriteBarrier_Table copy location to a global variable so that it can be updated.
+ JIT_WriteBarrier_Table_Loc = (void*)&JIT_WriteBarrier_Table;
+#endif // TARGET_ARM64
}
-#endif // FEATURE_WRITEBARRIER_COPY
#ifndef TARGET_UNIX
_ASSERTE(GetThreadNULLOk() == NULL);
diff --git a/src/coreclr/vm/threads.h b/src/coreclr/vm/threads.h
index d18b21d58f9..7d600dab5ed 100644
--- a/src/coreclr/vm/threads.h
+++ b/src/coreclr/vm/threads.h
@@ -6271,18 +6271,23 @@ private:
BOOL Debug_IsLockedViaThreadSuspension();
-#ifdef FEATURE_WRITEBARRIER_COPY
+inline BOOL IsWriteBarrierCopyEnabled()
+{
+#ifdef DACCESS_COMPILE
+ return FALSE;
+#else // DACCESS_COMPILE
+#ifdef HOST_OSX
+ return TRUE;
+#else
+ return ExecutableAllocator::IsWXORXEnabled();
+#endif
+#endif // DACCESS_COMPILE
+}
BYTE* GetWriteBarrierCodeLocation(VOID* barrier);
BOOL IsIPInWriteBarrierCodeCopy(PCODE controlPc);
PCODE AdjustWriteBarrierIP(PCODE controlPc);
-#else // FEATURE_WRITEBARRIER_COPY
-
-#define GetWriteBarrierCodeLocation(barrier) ((BYTE*)(barrier))
-
-#endif // FEATURE_WRITEBARRIER_COPY
-
#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
extern thread_local Thread* t_pStackWalkerWalkingThread;
#define SET_THREAD_TYPE_STACKWALKER(pThread) t_pStackWalkerWalkingThread = pThread
diff --git a/src/coreclr/vm/virtualcallstub.cpp b/src/coreclr/vm/virtualcallstub.cpp
index 95d568d641c..3af4c52afc9 100644
--- a/src/coreclr/vm/virtualcallstub.cpp
+++ b/src/coreclr/vm/virtualcallstub.cpp
@@ -641,7 +641,7 @@ void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderA
dwTotalReserveMemSize);
}
- initReservedMem = ClrVirtualAllocExecutable (dwTotalReserveMemSize, MEM_RESERVE, PAGE_NOACCESS);
+ initReservedMem = (BYTE*)ExecutableAllocator::Instance()->Reserve(dwTotalReserveMemSize);
m_initialReservedMemForHeaps = (BYTE *) initReservedMem;
@@ -2766,11 +2766,7 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE ad
}
#endif
- ExecutableWriterHolder<DispatchHolder> dispatchWriterHolder(holder, sizeof(DispatchHolder)
-#ifdef TARGET_AMD64
- + sizeof(DispatchStubShort)
-#endif
- );
+ ExecutableWriterHolder<DispatchHolder> dispatchWriterHolder(holder, dispatchHolderSize);
dispatchWriterHolder.GetRW()->Initialize(holder, addrOfCode,
addrOfFail,
(size_t)pMTExpected
@@ -2833,9 +2829,9 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStubLong(PCODE
} CONTRACT_END;
//allocate from the requisite heap and copy the template over it.
- DispatchHolder * holder = (DispatchHolder*) (void*)
- dispatch_heap->AllocAlignedMem(DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_LONG), CODE_SIZE_ALIGN);
- ExecutableWriterHolder<DispatchHolder> dispatchWriterHolder(holder, sizeof(DispatchHolder) + sizeof(DispatchStubLong));
+ size_t dispatchHolderSize = DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_LONG);
+ DispatchHolder * holder = (DispatchHolder*) (void*)dispatch_heap->AllocAlignedMem(dispatchHolderSize, CODE_SIZE_ALIGN);
+ ExecutableWriterHolder<DispatchHolder> dispatchWriterHolder(holder, dispatchHolderSize);
dispatchWriterHolder.GetRW()->Initialize(holder, addrOfCode,
addrOfFail,