Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/dotnet/runtime.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Vorlicek <jan.vorlicek@volny.cz>2021-07-11 17:46:53 +0300
committerGitHub <noreply@github.com>2021-07-11 17:46:53 +0300
commit6a47ecf4a8ee670356f5e554f08afb0b32cdac9a (patch)
treeeee15291b1ce2ad0c90c2e13b646d6e83fdf472b /src/coreclr/vm/amd64
parent83a4d3cc02fb04fce17b24fc09b3cdf77a12ba51 (diff)
W^X support (#54954)
* W^X support This change is the last part of enabling the W^X support. It adds the actual executable allocator that handles all double mapped memory allocations and creating the writeable mappings. The platform specific functionality is placed in a new minipal that is going to be a basis for future removal of Windows APIs usage from the native runtime. The last state of the change was tested on all the platforms that we support using coreclr pri 1 tests with both the W^X enabled and disabled using the COMPlus_EnableWriteXorExecute variable. The debugger changes were tested using the managed debugger testing suite on Windows x64, x86 and on Apple Silicon so far. Further testing on other platforms is in progress. * Replace LeafLock in UMEntryThunkFreeList by a new lock * Also allocate LoaderHeapFreeBlock from regular heap. * Set the W^X default to disabled
Diffstat (limited to 'src/coreclr/vm/amd64')
-rw-r--r--src/coreclr/vm/amd64/JitHelpers_Fast.asm79
-rw-r--r--src/coreclr/vm/amd64/jithelpers_fast.S26
-rw-r--r--src/coreclr/vm/amd64/jitinterfaceamd64.cpp20
3 files changed, 70 insertions, 55 deletions
diff --git a/src/coreclr/vm/amd64/JitHelpers_Fast.asm b/src/coreclr/vm/amd64/JitHelpers_Fast.asm
index 82a301bb0cb..219597eb350 100644
--- a/src/coreclr/vm/amd64/JitHelpers_Fast.asm
+++ b/src/coreclr/vm/amd64/JitHelpers_Fast.asm
@@ -51,37 +51,6 @@ endif
extern JIT_InternalThrow:proc
-; There is an even more optimized version of these helpers possible which takes
-; advantage of knowledge of which way the ephemeral heap is growing to only do 1/2
-; that check (this is more significant in the JIT_WriteBarrier case).
-;
-; Additionally we can look into providing helpers which will take the src/dest from
-; specific registers (like x86) which _could_ (??) make for easier register allocation
-; for the JIT64, however it might lead to having to have some nasty code that treats
-; these guys really special like... :(.
-;
-; Version that does the move, checks whether or not it's in the GC and whether or not
-; it needs to have it's card updated
-;
-; void JIT_CheckedWriteBarrier(Object** dst, Object* src)
-LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT
-
- ; When WRITE_BARRIER_CHECK is defined _NotInHeap will write the reference
- ; but if it isn't then it will just return.
- ;
- ; See if this is in GCHeap
- cmp rcx, [g_lowest_address]
- jb NotInHeap
- cmp rcx, [g_highest_address]
- jnb NotInHeap
-
- jmp JIT_WriteBarrier
-
- NotInHeap:
- ; See comment above about possible AV
- mov [rcx], rdx
- ret
-LEAF_END_MARKED JIT_CheckedWriteBarrier, _TEXT
; Mark start of the code region that we patch at runtime
LEAF_ENTRY JIT_PatchedCodeStart, _TEXT
@@ -99,7 +68,8 @@ LEAF_ENTRY JIT_WriteBarrier, _TEXT
ifdef _DEBUG
; In debug builds, this just contains jump to the debug version of the write barrier by default
- jmp JIT_WriteBarrier_Debug
+ mov rax, JIT_WriteBarrier_Debug
+ jmp rax
endif
ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
@@ -388,6 +358,51 @@ endif
ret
LEAF_END_MARKED JIT_ByRefWriteBarrier, _TEXT
+Section segment para 'DATA'
+
+ align 16
+
+ public JIT_WriteBarrier_Loc
+JIT_WriteBarrier_Loc:
+ dq 0
+
+LEAF_ENTRY JIT_WriteBarrier_Callable, _TEXT
+ ; JIT_WriteBarrier(Object** dst, Object* src)
+ jmp QWORD PTR [JIT_WriteBarrier_Loc]
+LEAF_END JIT_WriteBarrier_Callable, _TEXT
+
+; There is an even more optimized version of these helpers possible which takes
+; advantage of knowledge of which way the ephemeral heap is growing to only do 1/2
+; that check (this is more significant in the JIT_WriteBarrier case).
+;
+; Additionally we can look into providing helpers which will take the src/dest from
+; specific registers (like x86) which _could_ (??) make for easier register allocation
+; for the JIT64, however it might lead to having to have some nasty code that treats
+; these guys really special like... :(.
+;
+; Version that does the move, checks whether or not it's in the GC and whether or not
+; it needs to have it's card updated
+;
+; void JIT_CheckedWriteBarrier(Object** dst, Object* src)
+LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT
+
+ ; When WRITE_BARRIER_CHECK is defined _NotInHeap will write the reference
+ ; but if it isn't then it will just return.
+ ;
+ ; See if this is in GCHeap
+ cmp rcx, [g_lowest_address]
+ jb NotInHeap
+ cmp rcx, [g_highest_address]
+ jnb NotInHeap
+
+ jmp QWORD PTR [JIT_WriteBarrier_Loc]
+
+ NotInHeap:
+ ; See comment above about possible AV
+ mov [rcx], rdx
+ ret
+LEAF_END_MARKED JIT_CheckedWriteBarrier, _TEXT
+
; The following helper will access ("probe") a word on each page of the stack
; starting with the page right beneath rsp down to the one pointed to by r11.
; The procedure is needed to make sure that the "guard" page is pushed down below the allocated stack frame.
diff --git a/src/coreclr/vm/amd64/jithelpers_fast.S b/src/coreclr/vm/amd64/jithelpers_fast.S
index a13afb48785..8109886d0c9 100644
--- a/src/coreclr/vm/amd64/jithelpers_fast.S
+++ b/src/coreclr/vm/amd64/jithelpers_fast.S
@@ -32,26 +32,14 @@ LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT
// See if this is in GCHeap
PREPARE_EXTERNAL_VAR g_lowest_address, rax
cmp rdi, [rax]
-#ifdef FEATURE_WRITEBARRIER_COPY
// jb NotInHeap
.byte 0x72, 0x12
-#else
- // jb NotInHeap
- .byte 0x72, 0x0e
-#endif
PREPARE_EXTERNAL_VAR g_highest_address, rax
cmp rdi, [rax]
-#ifdef FEATURE_WRITEBARRIER_COPY
// jnb NotInHeap
.byte 0x73, 0x06
jmp [rip + C_FUNC(JIT_WriteBarrier_Loc)]
-#else
- // jnb NotInHeap
- .byte 0x73, 0x02
- // jmp C_FUNC(JIT_WriteBarrier)
- .byte 0xeb, 0x05
-#endif
NotInHeap:
// See comment above about possible AV
@@ -398,11 +386,17 @@ LEAF_ENTRY JIT_ByRefWriteBarrier, _TEXT
ret
LEAF_END_MARKED JIT_ByRefWriteBarrier, _TEXT
-#ifdef FEATURE_WRITEBARRIER_COPY
// When JIT_WriteBarrier is copied into an allocated page,
// helpers use this global variable to jump to it. This variable is set in InitThreadManager.
- .global _JIT_WriteBarrier_Loc
- .zerofill __DATA,__common,_JIT_WriteBarrier_Loc,8,3
+ .global C_FUNC(JIT_WriteBarrier_Loc)
+#ifdef TARGET_OSX
+ .zerofill __DATA,__common,C_FUNC(JIT_WriteBarrier_Loc),8,3
+#else
+ .data
+ C_FUNC(JIT_WriteBarrier_Loc):
+ .quad 0
+ .text
+#endif
// ------------------------------------------------------------------
// __declspec(naked) void F_CALL_CONV JIT_WriteBarrier_Callable(Object **dst, Object* val)
@@ -412,8 +406,6 @@ LEAF_ENTRY JIT_WriteBarrier_Callable, _TEXT
jmp [rip + C_FUNC(JIT_WriteBarrier_Loc)]
LEAF_END JIT_WriteBarrier_Callable, _TEXT
-#endif // FEATURE_WRITEBARRIER_COPY
-
// The following helper will access ("probe") a word on each page of the stack
// starting with the page right beneath rsp down to the one pointed to by r11.
diff --git a/src/coreclr/vm/amd64/jitinterfaceamd64.cpp b/src/coreclr/vm/amd64/jitinterfaceamd64.cpp
index 38bff78a54c..02b023777b8 100644
--- a/src/coreclr/vm/amd64/jitinterfaceamd64.cpp
+++ b/src/coreclr/vm/amd64/jitinterfaceamd64.cpp
@@ -293,7 +293,10 @@ int WriteBarrierManager::ChangeWriteBarrierTo(WriteBarrierType newWriteBarrier,
// the memcpy must come before the switch statment because the asserts inside the switch
// are actually looking into the JIT_WriteBarrier buffer
- memcpy(GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier), (LPVOID)GetCurrentWriteBarrierCode(), GetCurrentWriteBarrierSize());
+ {
+ ExecutableWriterHolder<void> writeBarrierWriterHolder(GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier), GetCurrentWriteBarrierSize());
+ memcpy(writeBarrierWriterHolder.GetRW(), (LPVOID)GetCurrentWriteBarrierCode(), GetCurrentWriteBarrierSize());
+ }
switch (newWriteBarrier)
{
@@ -544,7 +547,8 @@ int WriteBarrierManager::UpdateEphemeralBounds(bool isRuntimeSuspended)
// Change immediate if different from new g_ephermeral_high.
if (*(UINT64*)m_pUpperBoundImmediate != (size_t)g_ephemeral_high)
{
- *(UINT64*)m_pUpperBoundImmediate = (size_t)g_ephemeral_high;
+ ExecutableWriterHolder<UINT64> upperBoundWriterHolder((UINT64*)m_pUpperBoundImmediate, sizeof(UINT64));
+ *upperBoundWriterHolder.GetRW() = (size_t)g_ephemeral_high;
stompWBCompleteActions |= SWB_ICACHE_FLUSH;
}
}
@@ -557,7 +561,8 @@ int WriteBarrierManager::UpdateEphemeralBounds(bool isRuntimeSuspended)
// Change immediate if different from new g_ephermeral_low.
if (*(UINT64*)m_pLowerBoundImmediate != (size_t)g_ephemeral_low)
{
- *(UINT64*)m_pLowerBoundImmediate = (size_t)g_ephemeral_low;
+ ExecutableWriterHolder<UINT64> lowerBoundImmediateWriterHolder((UINT64*)m_pLowerBoundImmediate, sizeof(UINT64));
+ *lowerBoundImmediateWriterHolder.GetRW() = (size_t)g_ephemeral_low;
stompWBCompleteActions |= SWB_ICACHE_FLUSH;
}
break;
@@ -609,7 +614,8 @@ int WriteBarrierManager::UpdateWriteWatchAndCardTableLocations(bool isRuntimeSus
#endif // FEATURE_SVR_GC
if (*(UINT64*)m_pWriteWatchTableImmediate != (size_t)g_sw_ww_table)
{
- *(UINT64*)m_pWriteWatchTableImmediate = (size_t)g_sw_ww_table;
+ ExecutableWriterHolder<UINT64> writeWatchTableImmediateWriterHolder((UINT64*)m_pWriteWatchTableImmediate, sizeof(UINT64));
+ *writeWatchTableImmediateWriterHolder.GetRW() = (size_t)g_sw_ww_table;
stompWBCompleteActions |= SWB_ICACHE_FLUSH;
}
break;
@@ -621,14 +627,16 @@ int WriteBarrierManager::UpdateWriteWatchAndCardTableLocations(bool isRuntimeSus
if (*(UINT64*)m_pCardTableImmediate != (size_t)g_card_table)
{
- *(UINT64*)m_pCardTableImmediate = (size_t)g_card_table;
+ ExecutableWriterHolder<UINT64> cardTableImmediateWriterHolder((UINT64*)m_pCardTableImmediate, sizeof(UINT64));
+ *cardTableImmediateWriterHolder.GetRW() = (size_t)g_card_table;
stompWBCompleteActions |= SWB_ICACHE_FLUSH;
}
#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
if (*(UINT64*)m_pCardBundleTableImmediate != (size_t)g_card_bundle_table)
{
- *(UINT64*)m_pCardBundleTableImmediate = (size_t)g_card_bundle_table;
+ ExecutableWriterHolder<UINT64> cardBundleTableImmediateWriterHolder((UINT64*)m_pCardBundleTableImmediate, sizeof(UINT64));
+ *cardBundleTableImmediateWriterHolder.GetRW() = (size_t)g_card_bundle_table;
stompWBCompleteActions |= SWB_ICACHE_FLUSH;
}
#endif