Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/corert.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordotnet bot <dotnet-bot@microsoft.com>2016-04-16 03:28:38 +0300
committerJan Kotas <jkotas@microsoft.com>2016-04-16 03:28:38 +0300
commita4dc55611322fcf8d5d2c1f274155f8a08f3508c (patch)
treeab0e6b983e0d09834dd57e9da720a2b0cd42e5cf /src/Native/Runtime/i386
parentd7c196e5d7b7e4aa01fe0a9afdf27f4ca5890791 (diff)
MRT changes for portability (#1152)
- Replace asm wrappers for GC helpers with regular PInvoke for better portability. There is an extra GC mode switch now but it should not matter because of these helpers are either expensive or rarely used. - Remove asm code for RhpBulkWriteBarrier by switching to C++ implementation unconditionally (remove CORERT ifdef) - Move error handling for RhHandleAlloc* to System.Private.CoreLib (remove CORERT ifdef) - Add missing error handling for RhReRegisterForFinalize - A few other minor fixes and cleanup [tfs-changeset: 1596653]
Diffstat (limited to 'src/Native/Runtime/i386')
-rw-r--r--src/Native/Runtime/i386/AsmMacros.inc6
-rw-r--r--src/Native/Runtime/i386/GC.asm103
-rw-r--r--src/Native/Runtime/i386/WriteBarriers.asm147
3 files changed, 2 insertions, 254 deletions
diff --git a/src/Native/Runtime/i386/AsmMacros.inc b/src/Native/Runtime/i386/AsmMacros.inc
index 66200ee42..1bec2c7af 100644
--- a/src/Native/Runtime/i386/AsmMacros.inc
+++ b/src/Native/Runtime/i386/AsmMacros.inc
@@ -111,8 +111,8 @@ endm
;;
POP_COOP_PINVOKE_FRAME macro
;; We do not need to clear m_pHackPInvokeTunnel here because it is 'on the side' information.
- ;; The actual transition to/from preemptive mode is done elsewhere (HackEnablePreemptiveMode,
- ;; HackDisablePreemptiveMode) and m_pHackPInvokeTunnel need only be valid when that happens,
+ ;; The actual transition to/from preemptive mode is done elsewhere (EnablePreemptiveMode,
+ ;; DisablePreemptiveMode) and m_pHackPInvokeTunnel need only be valid when that happens,
;; so as long as we always set it on the way into a "cooperative pinvoke" method, we're fine
;; because it is only looked at inside these "cooperative pinvoke" methods.
add esp, 4*4
@@ -169,7 +169,6 @@ OFFSETOF__Thread__m_alloc_context__alloc_limit equ OFFSETOF__Thread__m_rgbA
PALDEBUGBREAK equ ?PalDebugBreak@@YGXXZ
REDHAWKGCINTERFACE__ALLOC equ ?Alloc@RedhawkGCInterface@@SGPAXPAVThread@@IIPAVEEType@@@Z
-REDHAWKGCINTERFACE__GARBAGECOLLECT equ ?GarbageCollect@RedhawkGCInterface@@SGXII@Z
G_LOWEST_ADDRESS equ _g_lowest_address
G_HIGHEST_ADDRESS equ _g_highest_address
G_EPHEMERAL_LOW equ _g_ephemeral_low
@@ -190,7 +189,6 @@ endif ;; FEATURE_GC_STRESS
;; IMPORTS
;;
EXTERN REDHAWKGCINTERFACE__ALLOC : PROC
-EXTERN REDHAWKGCINTERFACE__GARBAGECOLLECT : PROC
EXTERN THREADSTORE__ATTACHCURRENTTHREAD : PROC
EXTERN PALDEBUGBREAK : PROC
EXTERN RhpPInvokeWaitEx : PROC
diff --git a/src/Native/Runtime/i386/GC.asm b/src/Native/Runtime/i386/GC.asm
index db5ce3339..3fc0eb513 100644
--- a/src/Native/Runtime/i386/GC.asm
+++ b/src/Native/Runtime/i386/GC.asm
@@ -13,49 +13,6 @@
include AsmMacros.inc
-;; Force a collection.
-;; On entry:
-;; ECX = generation to collect (-1 for all)
-;; EDX = mode (default, forced or optimized)
-;;
-;; This helper is special because it's not called via a p/invoke that transitions to pre-emptive mode. We do
-;; this because the GC wants to be called in co-operative mode. But we are going to cause a GC, so we need to
-;; make this stack crawlable. As a result we use the same trick as the allocation helpers and build an
-;; explicit transition frame based on the entry state so the GC knows where to start crawling this thread's
-;; stack.
-FASTCALL_FUNC RhCollect, 8
-
- ;; Prolog, build an EBP frame
- push ebp
- mov ebp, esp
-
- ;; Save EDX (mode argument) since we need a register to stash thread pointer
- push edx
-
- ;; edx = GetThread(), TRASHES eax
- INLINE_GETTHREAD edx, eax
-
- ;; Save managed state in a frame and update the thread so it can find this frame once we transition to
- ;; pre-emptive mode in the garbage collection.
- PUSH_COOP_PINVOKE_FRAME edx
-
- ;; Initiate the collection.
- push [ebp - 4] ;; Push mode
- push ecx ;; Push generation number
- call REDHAWKGCINTERFACE__GARBAGECOLLECT
-
- ;; Restore register state.
- POP_COOP_PINVOKE_FRAME
-
- ;; Discard saved EDX
- add esp, 4
-
- ;; Epilog, tear down EBP frame and return.
- pop ebp
- ret
-
-FASTCALL_ENDFUNC
-
;; DWORD getcpuid(DWORD arg, unsigned char result[16])
FASTCALL_FUNC getcpuid, 8
@@ -104,64 +61,4 @@ FASTCALL_FUNC getextcpuid, 12
FASTCALL_ENDFUNC
-;; Re-register an object of a finalizable type for finalization.
-;; ecx : object
-;;
-FASTCALL_FUNC RhReRegisterForFinalize, 4
-
- EXTERN @RhReRegisterForFinalizeHelper@4 : PROC
-
- ;; Prolog, build an EBP frame
- push ebp
- mov ebp, esp
-
- ;; edx = GetThread(), TRASHES eax
- INLINE_GETTHREAD edx, eax
-
- ;; Save managed state in a frame and update the thread so it can find this frame if we transition to
- ;; pre-emptive mode in the helper below.
- PUSH_COOP_PINVOKE_FRAME edx
-
- ;; Call to the C++ helper that does most of the work.
- call @RhReRegisterForFinalizeHelper@4
-
- ;; Restore register state.
- POP_COOP_PINVOKE_FRAME
-
- ;; Epilog, tear down EBP frame and return.
- pop ebp
- ret
-
-FASTCALL_ENDFUNC
-
-;; RhGetGcTotalMemory
-;; No inputs, returns total GC memory as 64-bit value in eax/edx.
-;;
-FASTCALL_FUNC RhGetGcTotalMemory, 0
-
- EXTERN @RhGetGcTotalMemoryHelper@0 : PROC
-
- ;; Prolog, build an EBP frame
- push ebp
- mov ebp, esp
-
- ;; edx = GetThread(), TRASHES eax
- INLINE_GETTHREAD edx, eax
-
- ;; Save managed state in a frame and update the thread so it can find this frame if we transition to
- ;; pre-emptive mode in the helper below.
- PUSH_COOP_PINVOKE_FRAME edx
-
- ;; Call to the C++ helper that does most of the work.
- call @RhGetGcTotalMemoryHelper@0
-
- ;; Restore register state.
- POP_COOP_PINVOKE_FRAME
-
- ;; Epilog, tear down EBP frame and return.
- pop ebp
- ret
-
-FASTCALL_ENDFUNC
-
end
diff --git a/src/Native/Runtime/i386/WriteBarriers.asm b/src/Native/Runtime/i386/WriteBarriers.asm
index 796468fd1..d22a422d6 100644
--- a/src/Native/Runtime/i386/WriteBarriers.asm
+++ b/src/Native/Runtime/i386/WriteBarriers.asm
@@ -262,151 +262,4 @@ ALTERNATE_ENTRY RhpCheckedXchgAVLocation
FASTCALL_ENDFUNC
-ifndef CORERT
-
-;;
-;; Write barrier used when a large number of bytes possibly containing GC references have been updated. For
-;; speed we don't try to determine GC series information for the value or array of values. Instead we just
-;; mark all the cards covered by the memory range given to us. Additionally, at least for now, we don't try to
-;; mark card bits individually, which incurs the cost of an interlocked operation. Instead, like the single
-;; write barrier case, we mark 8 cards at a time by writing byte values of 0xff.
-;;
-;; On entry:
-;; ecx : Start of memory region that was written
-;; edx : Length of memory region written
-;;
-;; On exit:
-;; ecx/edx : Trashed
-;;
-FASTCALL_FUNC RhpBulkWriteBarrier, 8
-
- ;; For the following range checks we assume it is sufficient to test just the start address. No valid
- ;; write region should span a GC heap or generation boundary.
-
- ;; Check whether the writes were even into the heap. If not there's no card update required.
- cmp ecx, [G_LOWEST_ADDRESS]
- jb NoBarrierRequired
- cmp ecx, [G_HIGHEST_ADDRESS]
- jae NoBarrierRequired
-
- ;; If the size is smaller than a pointer, no write barrier is required
- ;; This case can occur with universal shared generic code where the size
- ;; is not known at compile time
- cmp edx, 4
- jb NoBarrierRequired
-
-ifdef WRITE_BARRIER_CHECK
-
- ;; Perform shadow heap updates corresponding to the gc heap updates that immediately preceded this helper
- ;; call. See the comment for UPDATE_GC_SHADOW above for a more detailed explanation of why we do this and
- ;; the synchronization implications.
-
- ;; If g_GCShadow is 0, don't perform the check.
- cmp g_GCShadow, 0
- je BulkWriteBarrier_UpdateShadowHeap_Done
-
- ;; We need some scratch registers and to preserve eax\ecx.
- push eax
- push ebx
- push esi
- push ecx
-
- ;; Compute the shadow heap address corresponding to the beginning of the range of heap addresses modified
- ;; and in the process range check it to make sure we have the shadow version allocated.
- mov ebx, ecx
- sub ebx, G_LOWEST_ADDRESS
- jb BulkWriteBarrier_UpdateShadowHeap_PopThenDone
- add ebx, [g_GCShadow]
- cmp ebx, [g_GCShadowEnd]
- ja BulkWriteBarrier_UpdateShadowHeap_PopThenDone
-
- ;; Initialize esi to the length of data to copy.
- mov esi, edx
-
- ;; Iterate over every pointer sized slot in the range, copying data from the real heap to the shadow heap.
- ;; As we perform each copy we need to recheck the real heap contents with an ordered read to ensure we're
- ;; not racing with another heap updater. If we discover a race we invalidate the corresponding shadow heap
- ;; slot using a special well-known value so that this location will not be tested during the next shadow
- ;; heap validation.
-BulkWriteBarrier_UpdateShadowHeap_CopyLoop:
- ;; Decrement the copy count.
- sub esi, 4
- jb BulkWriteBarrier_UpdateShadowHeap_PopThenDone
-
- ;; Ecx == current real heap slot
- ;; Ebx == current shadow heap slot
-
- ;; Update shadow slot from real slot.
- mov eax, [ecx]
- mov [ebx], eax
-
- ;; Memory barrier to ensure the next read is ordered wrt to the shadow heap write we just made.
- mfence
-
- ;; Read the real slot contents again. If they don't agree with what we just wrote then someone just raced
- ;; with us and updated the heap again. In such cases we invalidate the shadow slot.
- cmp [ecx], eax
- jne BulkWriteBarrier_UpdateShadowHeap_LostRace
-
-BulkWriteBarrier_UpdateShadowHeap_NextIteration:
- ;; Advance the heap pointers and loop again.
- add ecx, 4
- add ebx, 4
- jmp BulkWriteBarrier_UpdateShadowHeap_CopyLoop
-
-BulkWriteBarrier_UpdateShadowHeap_LostRace:
- mov dword ptr [ebx], INVALIDGCVALUE
- jmp BulkWriteBarrier_UpdateShadowHeap_NextIteration
-
-BulkWriteBarrier_UpdateShadowHeap_PopThenDone:
- pop ecx
- pop esi
- pop ebx
- pop eax
-
-BulkWriteBarrier_UpdateShadowHeap_Done:
-
-endif ; WRITE_BARRIER_CHECK
-
- ;; Compute the starting card address and the number of bytes to write (groups of 8 cards). We could try
- ;; for further optimization here using aligned 32-bit writes but there's some overhead in setup required
- ;; and additional complexity. It's not clear this is warranted given that a single byte of card table
- ;; update already covers 1K of object space (2K on 64-bit platforms). It's also not worth probing that
- ;; 1K/2K range to see if any of the pointers appear to be non-ephemeral GC references. Given the size of
- ;; the area the chances are high that at least one interesting GC refenence is present.
-
- add edx, ecx ; edx <- end address
- shr ecx, LOG2_CLUMP_SIZE ; ecx <- starting clump
- add edx, CLUMP_SIZE-1 ; edx <- end address + round up
- shr edx, LOG2_CLUMP_SIZE ; edx <- ending clump index (rounded up)
-
- ;; calculate the number of clumps to mark (round_up(end) - start)
- sub edx, ecx
-
- ;; Starting card address.
- add ecx, [G_CARD_TABLE]
-
- ; ecx: pointer to starting byte in card table
- ; edx: number of bytes to set
-
- ;; Fill the cards. To avoid cache line thrashing we check whether the cards have already been set before
- ;; writing.
-CardUpdateLoop:
- cmp byte ptr [ecx], 0FFh
- jz SkipCardUpdate
-
- mov byte ptr [ecx], 0FFh
-
-SkipCardUpdate:
- inc ecx
- dec edx
- jnz CardUpdateLoop
-
-NoBarrierRequired:
- ret
-
-FASTCALL_ENDFUNC
-
-endif ; CORERT
-
end