Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/corert.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Kotas <jkotas@microsoft.com>2016-06-05 07:51:43 +0300
committerJan Kotas <jkotas@microsoft.com>2016-06-05 07:51:43 +0300
commit19cd48aa811dfc432d429d3ddc8d13a0da607ecb (patch)
tree91bb57d62d67905c99281bb839413eac9254277a /src/Native/Runtime/amd64
parent6a5ce31a85db741159ef06df9eb2e8c4fe0064e7 (diff)
Implement alloc helpers for Unix (#1360)
These helpers have to be implemented in assembly for non-portable runtime flavor to make stackcrawling work.
Diffstat (limited to 'src/Native/Runtime/amd64')
-rw-r--r--src/Native/Runtime/amd64/AllocFast.S265
-rw-r--r--src/Native/Runtime/amd64/PInvoke.S4
2 files changed, 267 insertions, 2 deletions
diff --git a/src/Native/Runtime/amd64/AllocFast.S b/src/Native/Runtime/amd64/AllocFast.S
new file mode 100644
index 000000000..a0dedc4cc
--- /dev/null
+++ b/src/Native/Runtime/amd64/AllocFast.S
@@ -0,0 +1,265 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+.intel_syntax noprefix
+#include <unixasmmacros.inc>
+#include <AsmOffsets.inc> // generated by the build from AsmOffsets.cpp
+
+// Allocate non-array, non-finalizable object. If the allocation doesn't fit into the current thread's
+// allocation context then automatically fallback to the slow allocation path.
+// RDI == EEType
+NESTED_ENTRY RhpNewFast, _TEXT, NoHandler
+ push_nonvol_reg rbx
+ mov rbx, rdi
+
+ // rax = GetThread()
+ INLINE_GETTHREAD
+
+ //
+ // rbx contains EEType pointer
+ //
+ mov edx, [rbx + OFFSETOF__EEType__m_uBaseSize]
+
+ //
+ // rax: Thread pointer
+ // rbx: EEType pointer
+ // rdx: base size
+ //
+
+ mov rsi, [rax + OFFSETOF__Thread__m_alloc_context__alloc_ptr]
+ add rdx, rsi
+ cmp rdx, [rax + OFFSETOF__Thread__m_alloc_context__alloc_limit]
+ ja LOCAL_LABEL(RhpNewFast_RarePath)
+
+ // set the new alloc pointer
+ mov [rax + OFFSETOF__Thread__m_alloc_context__alloc_ptr], rdx
+
+ mov rax, rsi
+
+ // set the new object's EEType pointer
+ mov [rsi], rbx
+
+ .cfi_remember_state
+ pop_nonvol_reg rbx
+ ret
+
+ .cfi_restore_state
+ .cfi_def_cfa_offset 16 // workaround cfi_restore_state bug
+LOCAL_LABEL(RhpNewFast_RarePath):
+ mov rdi, rbx // restore EEType
+ xor esi, esi
+ pop_nonvol_reg rbx
+ jmp C_FUNC(RhpNewObject)
+
+NESTED_END RhpNewFast, _TEXT
+
+
+
+// Allocate non-array object with finalizer
+// RDI == EEType
+LEAF_ENTRY RhpNewFinalizable, _TEXT
+ mov esi, GC_ALLOC_FINALIZE
+ jmp C_FUNC(RhpNewObject)
+LEAF_END RhpNewFinalizable, _TEXT
+
+
+
+// Allocate non-array object
+// RDI == EEType
+// ESI == alloc flags
+NESTED_ENTRY RhpNewObject, _TEXT, NoHandler
+
+ PUSH_COOP_PINVOKE_FRAME rcx
+ END_PROLOGUE
+
+ // RCX: transition frame
+
+ // Preserve the EEType in RBX
+ mov rbx, rdi
+
+ mov edx, [rdi + OFFSETOF__EEType__m_uBaseSize] // cbSize
+
+ // Call the rest of the allocation helper.
+ // void* RhpGcAlloc(EEType *pEEType, UInt32 uFlags, UIntNative cbSize, void * pTransitionFrame)
+ call C_FUNC(RhpGcAlloc)
+
+ // Set the new object's EEType pointer on success.
+ test rax, rax
+ jz LOCAL_LABEL(NewOutOfMemory)
+ mov [rax + OFFSETOF__Object__m_pEEType], rbx
+
+ // If the object is bigger than RH_LARGE_OBJECT_SIZE, we must publish it to the BGC
+ mov esi, [rbx + OFFSETOF__EEType__m_uBaseSize]
+.att_syntax
+ cmp $RH_LARGE_OBJECT_SIZE, %rsi
+.intel_syntax noprefix
+ jb LOCAL_LABEL(New_SkipPublish)
+ mov rdi, rax // rdi: object
+ // rsi: already contains object size
+ call C_FUNC(RhpPublishObject) // rax: this function returns the object that was passed-in
+LOCAL_LABEL(New_SkipPublish):
+
+ .cfi_remember_state
+ POP_COOP_PINVOKE_FRAME
+ ret
+
+ .cfi_restore_state
+ .cfi_def_cfa_offset 96 // workaround cfi_restore_state bug
+LOCAL_LABEL(NewOutOfMemory):
+ // This is the OOM failure path. We're going to tail-call to a managed helper that will throw
+ // an out of memory exception that the caller of this allocator understands.
+
+ mov rdi, rbx // EEType pointer
+ xor esi, esi // Indicate that we should throw OOM.
+
+ POP_COOP_PINVOKE_FRAME
+
+ jmp C_FUNC(RhExceptionHandling_FailedAllocation)
+NESTED_END RhpNewObject, _TEXT
+
+
+// Allocate one dimensional, zero based array (SZARRAY).
+// RDI == EEType
+// ESI == element count
+NESTED_ENTRY RhpNewArray, _TEXT, NoHandler
+ // we want to limit the element count to the non-negative 32-bit int range
+ cmp rsi, 07fffffffh
+ ja LOCAL_LABEL(ArraySizeOverflow)
+
+ push_nonvol_reg rbx
+ push_nonvol_reg r12
+ push_register rcx // padding
+
+ mov rbx, rdi // save EEType
+ mov r12, rsi // save element count
+
+ // rax = GetThread()
+ INLINE_GETTHREAD
+
+ mov rcx, rax // rcx = Thread*
+
+ // Compute overall allocation size (align(base size + (element size * elements), 8)).
+ movzx eax, word ptr [rbx + OFFSETOF__EEType__m_usComponentSize]
+ mul r12
+ mov edx, [rdi + OFFSETOF__EEType__m_uBaseSize]
+ add rax, rdx
+ add rax, 7
+ and rax, -8
+
+ // rax == array size
+ // rbx == EEType
+ // rcx == Thread*
+ // r12 == element count
+
+ mov rdx, rax
+ add rax, [rcx + OFFSETOF__Thread__m_alloc_context__alloc_ptr]
+ jc LOCAL_LABEL(RhpNewArray_RarePath)
+
+ // rax == new alloc ptr
+ // rbx == EEType
+ // rcx == Thread*
+ // rdx == array size
+ // r12 == element count
+ cmp rax, [rcx + OFFSETOF__Thread__m_alloc_context__alloc_limit]
+ ja LOCAL_LABEL(RhpNewArray_RarePath)
+
+ mov [rcx + OFFSETOF__Thread__m_alloc_context__alloc_ptr], rax
+
+ // calc the new object pointer
+ sub rax, rdx
+
+ mov [rax + OFFSETOF__Object__m_pEEType], rbx
+ mov [rax + OFFSETOF__Array__m_Length], r12d
+
+ .cfi_remember_state
+ pop_register rcx // padding
+ pop_nonvol_reg r12
+ pop_nonvol_reg rbx
+ ret
+
+ .cfi_restore_state
+ .cfi_def_cfa_offset 32 // workaround cfi_restore_state bug
+LOCAL_LABEL(RhpNewArray_RarePath):
+ mov rdi, rbx // restore EEType
+ mov rsi, r12 // restore element count
+ // passing array size in rdx
+
+ pop_register rcx // padding
+ pop_nonvol_reg r12
+ pop_nonvol_reg rbx
+ jmp C_FUNC(RhpNewArrayRare)
+
+LOCAL_LABEL(ArraySizeOverflow):
+ // We get here if the size of the final array object can't be represented as an unsigned
+ // 32-bit value. We're going to tail-call to a managed helper that will throw
+ // an overflow exception that the caller of this allocator understands.
+
+ // rdi holds EEType pointer already
+ mov esi, 1 // Indicate that we should throw OverflowException
+ jmp C_FUNC(RhExceptionHandling_FailedAllocation)
+
+NESTED_END RhpNewArray, _TEXT
+
+NESTED_ENTRY RhpNewArrayRare, _TEXT, NoHandler
+
+ // rdi == EEType
+ // rsi == element count
+ // rdx == array size
+
+ PUSH_COOP_PINVOKE_FRAME rcx
+ END_PROLOGUE
+
+ // rcx: transition frame
+
+ // Preserve the EEType in RBX
+ mov rbx, rdi
+ // Preserve the element count in R12
+ mov r12, rsi
+ // Preserve the size in R13
+ mov r13, rdx
+
+ // passing EEType in rdi
+ xor rsi, rsi // uFlags
+ // pasing size in rdx
+ // pasing pTransitionFrame in rcx
+
+ // Call the rest of the allocation helper.
+ // void* RhpGcAlloc(EEType *pEEType, UInt32 uFlags, UIntNative cbSize, void * pTransitionFrame)
+ call C_FUNC(RhpGcAlloc)
+
+ // Set the new object's EEType pointer and length on success.
+ test rax, rax
+ jz LOCAL_LABEL(ArrayOutOfMemory)
+ mov [rax + OFFSETOF__Object__m_pEEType], rbx
+ mov [rax + OFFSETOF__Array__m_Length], r12d
+
+ // If the object is bigger than RH_LARGE_OBJECT_SIZE, we must publish it to the BGC
+.att_syntax
+ cmp $RH_LARGE_OBJECT_SIZE, %r13
+.intel_syntax noprefix
+ jb LOCAL_LABEL(NewArray_SkipPublish)
+ mov rdi, rax // rcx: object
+ mov rsi, r13 // rdx: object size
+ call C_FUNC(RhpPublishObject) // rax: this function returns the object that was passed-in
+LOCAL_LABEL(NewArray_SkipPublish):
+
+ .cfi_remember_state
+ POP_COOP_PINVOKE_FRAME
+ ret
+
+ .cfi_restore_state
+ .cfi_def_cfa_offset 96 // workaround cfi_restore_state bug
+LOCAL_LABEL(ArrayOutOfMemory):
+ // This is the OOM failure path. We're going to tail-call to a managed helper that will throw
+ // an out of memory exception that the caller of this allocator understands.
+
+ mov rdi, rbx // EEType pointer
+ xor esi, esi // Indicate that we should throw OOM.
+
+ POP_COOP_PINVOKE_FRAME
+
+ jmp C_FUNC(RhExceptionHandling_FailedAllocation)
+
+NESTED_END RhpNewArrayRare, _TEXT
+
diff --git a/src/Native/Runtime/amd64/PInvoke.S b/src/Native/Runtime/amd64/PInvoke.S
index 96ca4e525..958da80f5 100644
--- a/src/Native/Runtime/amd64/PInvoke.S
+++ b/src/Native/Runtime/amd64/PInvoke.S
@@ -16,7 +16,7 @@
// Also, the codegenerator must ensure that there are no live GC references in callee saved registers.
//
NESTED_ENTRY RhpPInvoke, _TEXT, NoHandler
- push_register rbx
+ push_nonvol_reg rbx
mov rbx, rdi
// RAX = GetThread()
@@ -34,7 +34,7 @@ NESTED_ENTRY RhpPInvoke, _TEXT, NoHandler
mov qword ptr [rax + OFFSETOF__Thread__m_pTransitionFrame], rbx
cmp dword ptr [C_VAR(RhpTrapThreads)], 0
- pop_register rbx
+ pop_nonvol_reg rbx
jne 0f // forward branch - predicted not taken
ret
0: