Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/corert.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Kotas <jkotas@microsoft.com>2015-12-18 21:11:13 +0300
committerJan Kotas <jkotas@microsoft.com>2015-12-18 22:17:18 +0300
commited0beaf63473dde0eeff93e1e5afe99011fea709 (patch)
tree7bf289b21af763bc3e7c7749bfb590afc9c6bd5f /src/Native
parent6372118f2f477a101cc2f74c6c94c8e0e652a897 (diff)
Refactor force-inline GC-safe memory
- Move force-inline GC-safe memory helpers to .inl files to fix link errors for some build flavors. - Fix a bug in RhUnbox - it was not using GC-safe zero memory.
Diffstat (limited to 'src/Native')
-rw-r--r--src/Native/Runtime/CommonMacros.h2
-rw-r--r--src/Native/Runtime/GCMemoryHelpers.cpp101
-rw-r--r--src/Native/Runtime/GCMemoryHelpers.h3
-rw-r--r--src/Native/Runtime/GCMemoryHelpers.inl117
-rw-r--r--src/Native/Runtime/MiscHelpers.cpp198
-rw-r--r--src/Native/Runtime/gcrhenv.cpp40
6 files changed, 229 insertions, 232 deletions
diff --git a/src/Native/Runtime/CommonMacros.h b/src/Native/Runtime/CommonMacros.h
index da619bd24..509742404 100644
--- a/src/Native/Runtime/CommonMacros.h
+++ b/src/Native/Runtime/CommonMacros.h
@@ -44,6 +44,7 @@ char (*COUNTOF_helper(_CountofType (&_Array)[_SizeOfArray]))[_SizeOfArray];
#ifndef GCENV_INCLUDED
#define FORCEINLINE __forceinline
+#endif // GCENV_INCLUDED
inline UIntNative ALIGN_UP(UIntNative val, UIntNative alignment);
template <typename T>
@@ -56,7 +57,6 @@ inline T* ALIGN_DOWN(T* val, UIntNative alignment);
inline bool IS_ALIGNED(UIntNative val, UIntNative alignment);
template <typename T>
inline bool IS_ALIGNED(T* val, UIntNative alignment);
-#endif // GCENV_INCLUDED
#ifndef DACCESS_COMPILE
//
diff --git a/src/Native/Runtime/GCMemoryHelpers.cpp b/src/Native/Runtime/GCMemoryHelpers.cpp
index f53a43ca6..64eefde01 100644
--- a/src/Native/Runtime/GCMemoryHelpers.cpp
+++ b/src/Native/Runtime/GCMemoryHelpers.cpp
@@ -10,10 +10,111 @@
#include "common.h"
#include "gcenv.h"
#include "PalRedhawkCommon.h"
+#include "CommonMacros.inl"
#include "GCMemoryHelpers.h"
#include "GCMemoryHelpers.inl"
+// This function clears a piece of memory in a GC safe way. It makes the guarantee that it will clear memory in at
+// least pointer sized chunks whenever possible. Unaligned memory at the beginning and remaining bytes at the end are
+// written bytewise. We must make this guarantee whenever we clear memory in the GC heap that could contain object
+// references. The GC or other user threads can read object references at any time, clearing them bytewise can result
+// in a read on another thread getting incorrect data.
+//
+// USAGE: The caller is responsible for hoisting any null reference exceptions to a place where the hardware exception
+// can be properly translated to a managed exception.
+COOP_PINVOKE_CDECL_HELPER(void *, RhpInitMultibyte, (void * mem, int c, size_t size))
+{
+ // The caller must do the null-check because we cannot take an AV in the runtime and translate it to managed.
+ ASSERT(mem != nullptr);
+
+ UIntNative bv = (UInt8)c;
+ UIntNative pv = 0;
+
+ if (bv != 0)
+ {
+ pv =
+#if (POINTER_SIZE == 8)
+ bv << 7*8 | bv << 6*8 | bv << 5*8 | bv << 4*8 |
+#endif
+ bv << 3*8 | bv << 2*8 | bv << 1*8 | bv;
+ }
+
+ InlineGCSafeFillMemory(mem, size, pv);
+
+ // memset returns the destination buffer
+ return mem;
+}
+
+
+// This is a GC-safe variant of memcpy. It guarantees that the object references in the GC heap are updated atomically.
+// This is required for type safety and proper operation of the background GC.
+//
+// USAGE: 1) The caller is responsible for performing the appropriate bulk write barrier.
+// 2) The caller is responsible for hoisting any null reference exceptions to a place where the hardware
+// exception can be properly translated to a managed exception. This is handled by RhpCopyMultibyte.
+// 3) The caller must ensure that all three parameters are pointer-size-aligned. This should be the case for
+// value types which contain GC refs anyway, so if you want to copy structs without GC refs which might be
+// unaligned, then you must use RhpCopyMultibyteNoGCRefs.
+COOP_PINVOKE_CDECL_HELPER(void *, memcpyGCRefs, (void * dest, const void *src, size_t len))
+{
+ // null pointers are not allowed (they are checked by RhpCopyMultibyte)
+ ASSERT(dest != nullptr);
+ ASSERT(src != nullptr);
+
+ InlineForwardGCSafeCopy(dest, src, len);
+
+ // memcpy returns the destination buffer
+ return dest;
+}
+
+// This is a GC-safe variant of memcpy. It guarantees that the object references in the GC heap are updated atomically.
+// This is required for type safety and proper operation of the background GC.
+// Writebarrier is included.
+//
+// USAGE:
+// 1) The caller is responsible for hoisting any null reference exceptions to a place where the hardware
+// exception can be properly translated to a managed exception. This is handled by RhpCopyMultibyte.
+// 2) The caller must ensure that all three parameters are pointer-size-aligned. This should be the case for
+// value types which contain GC refs anyway, so if you want to copy structs without GC refs which might be
+// unaligned, then you must use RhpCopyMultibyteNoGCRefs.
+COOP_PINVOKE_CDECL_HELPER(void *, memcpyGCRefsWithWriteBarrier, (void * dest, const void *src, size_t len))
+{
+ // null pointers are not allowed (they are checked by RhpCopyMultibyteWithWriteBarrier)
+ ASSERT(dest != nullptr);
+ ASSERT(src != nullptr);
+
+ InlineForwardGCSafeCopy(dest, src, len);
+ InlinedBulkWriteBarrier(dest, (UInt32)len);
+
+ // memcpy returns the destination buffer
+ return dest;
+}
+
+// Move memory, in a way that is compatible with a move onto the heap, but
+// does not require the destination pointer to be on the heap.
+
+COOP_PINVOKE_HELPER(void, RhBulkMoveWithWriteBarrier, (uint8_t* pDest, uint8_t* pSrc, int cbDest))
+{
+ if (pDest <= pSrc || pSrc + cbDest <= pDest)
+ InlineForwardGCSafeCopy(pDest, pSrc, cbDest);
+ else
+ InlineBackwardGCSafeCopy(pDest, pSrc, cbDest);
+
+ InlinedBulkWriteBarrier(pDest, cbDest);
+}
+
+void GCSafeZeroMemory(void * dest, size_t len)
+{
+ InlineGCSafeFillMemory(dest, len, 0);
+}
+
+void GCSafeCopyMemoryWithWriteBarrier(void * dest, const void *src, size_t len)
+{
+ InlineForwardGCSafeCopy(dest, src, len);
+ InlinedBulkWriteBarrier(dest, (UInt32)len);
+}
+
#ifdef CORERT
void RhpBulkWriteBarrier(void* pMemStart, UInt32 cbMemSize)
{
diff --git a/src/Native/Runtime/GCMemoryHelpers.h b/src/Native/Runtime/GCMemoryHelpers.h
index 762abe1c9..0bbdad51a 100644
--- a/src/Native/Runtime/GCMemoryHelpers.h
+++ b/src/Native/Runtime/GCMemoryHelpers.h
@@ -7,4 +7,7 @@
// Unmanaged GC memory helpers
//
+void GCSafeZeroMemory(void * dest, size_t len);
+void GCSafeCopyMemoryWithWriteBarrier(void * dest, const void *src, size_t len);
+
EXTERN_C void REDHAWK_CALLCONV RhpBulkWriteBarrier(void* pMemStart, UInt32 cbMemSize);
diff --git a/src/Native/Runtime/GCMemoryHelpers.inl b/src/Native/Runtime/GCMemoryHelpers.inl
index 7ce242231..0bfd960f0 100644
--- a/src/Native/Runtime/GCMemoryHelpers.inl
+++ b/src/Native/Runtime/GCMemoryHelpers.inl
@@ -7,6 +7,123 @@
// Unmanaged GC memory helpers
//
+// This function fills a piece of memory in a GC safe way. It makes the guarantee
+// that it will fill memory in at least pointer sized chunks whenever possible.
+// Unaligned memory at the beginning and remaining bytes at the end are written bytewise.
+// We must make this guarantee whenever we clear memory in the GC heap that could contain
+// object references. The GC or other user threads can read object references at any time,
+// clearing them bytewise can result in a read on another thread getting incorrect data.
+FORCEINLINE void InlineGCSafeFillMemory(void * mem, size_t size, size_t pv)
+{
+ UInt8 * memBytes = (UInt8 *)mem;
+ UInt8 * endBytes = &memBytes[size];
+
+ // handle unaligned bytes at the beginning
+ while (!IS_ALIGNED(memBytes, sizeof(void *)) && (memBytes < endBytes))
+ *memBytes++ = (UInt8)pv;
+
+ // now write pointer sized pieces
+ size_t nPtrs = (endBytes - memBytes) / sizeof(void *);
+ UIntNative* memPtr = (UIntNative*)memBytes;
+ for (size_t i = 0; i < nPtrs; i++)
+ *memPtr++ = pv;
+
+ // handle remaining bytes at the end
+ memBytes = (UInt8*)memPtr;
+ while (memBytes < endBytes)
+ *memBytes++ = (UInt8)pv;
+}
+
+// These functions copy memory in a GC safe way. They makes the guarantee
+// that the memory is copies in at least pointer sized chunks.
+
+FORCEINLINE void InlineForwardGCSafeCopy(void * dest, const void *src, size_t len)
+{
+ // All parameters must be pointer-size-aligned
+ ASSERT(IS_ALIGNED(dest, sizeof(size_t)));
+ ASSERT(IS_ALIGNED(src, sizeof(size_t)));
+ ASSERT(IS_ALIGNED(len, sizeof(size_t)));
+
+ size_t size = len;
+ UInt8 * dmem = (UInt8 *)dest;
+ UInt8 * smem = (UInt8 *)src;
+
+ // regions must be non-overlapping
+ ASSERT(dmem <= smem || smem + size <= dmem);
+
+ // copy 4 pointers at a time
+ while (size >= 4 * sizeof(size_t))
+ {
+ size -= 4 * sizeof(size_t);
+ ((size_t *)dmem)[0] = ((size_t *)smem)[0];
+ ((size_t *)dmem)[1] = ((size_t *)smem)[1];
+ ((size_t *)dmem)[2] = ((size_t *)smem)[2];
+ ((size_t *)dmem)[3] = ((size_t *)smem)[3];
+ smem += 4 * sizeof(size_t);
+ dmem += 4 * sizeof(size_t);
+ }
+
+ // copy 2 trailing pointers, if needed
+ if ((size & (2 * sizeof(size_t))) != 0)
+ {
+ ((size_t *)dmem)[0] = ((size_t *)smem)[0];
+ ((size_t *)dmem)[1] = ((size_t *)smem)[1];
+ smem += 2 * sizeof(size_t);
+ dmem += 2 * sizeof(size_t);
+ }
+
+ // finish with one pointer, if needed
+ if ((size & sizeof(size_t)) != 0)
+ {
+ ((size_t *)dmem)[0] = ((size_t *)smem)[0];
+ }
+}
+
+FORCEINLINE void InlineBackwardGCSafeCopy(void * dest, const void *src, size_t len)
+{
+ // All parameters must be pointer-size-aligned
+ ASSERT(IS_ALIGNED(dest, sizeof(size_t)));
+ ASSERT(IS_ALIGNED(src, sizeof(size_t)));
+ ASSERT(IS_ALIGNED(len, sizeof(size_t)));
+
+ size_t size = len;
+ UInt8 * dmem = (UInt8 *)dest + len;
+ UInt8 * smem = (UInt8 *)src + len;
+
+ // regions must be non-overlapping
+ ASSERT(smem <= dmem || dmem + size <= smem);
+
+ // copy 4 pointers at a time
+ while (size >= 4 * sizeof(size_t))
+ {
+ size -= 4 * sizeof(size_t);
+ smem -= 4 * sizeof(size_t);
+ dmem -= 4 * sizeof(size_t);
+ ((size_t *)dmem)[3] = ((size_t *)smem)[3];
+ ((size_t *)dmem)[2] = ((size_t *)smem)[2];
+ ((size_t *)dmem)[1] = ((size_t *)smem)[1];
+ ((size_t *)dmem)[0] = ((size_t *)smem)[0];
+ }
+
+ // copy 2 trailing pointers, if needed
+ if ((size & (2 * sizeof(size_t))) != 0)
+ {
+ smem -= 2 * sizeof(size_t);
+ dmem -= 2 * sizeof(size_t);
+ ((size_t *)dmem)[1] = ((size_t *)smem)[1];
+ ((size_t *)dmem)[0] = ((size_t *)smem)[0];
+ }
+
+ // finish with one pointer, if needed
+ if ((size & sizeof(size_t)) != 0)
+ {
+ smem -= sizeof(size_t);
+ dmem -= sizeof(size_t);
+ ((size_t *)dmem)[0] = ((size_t *)smem)[0];
+ }
+}
+
+
#ifndef DACCESS_COMPILE
#ifdef WRITE_BARRIER_CHECK
extern uint8_t* g_GCShadow;
diff --git a/src/Native/Runtime/MiscHelpers.cpp b/src/Native/Runtime/MiscHelpers.cpp
index 81475383d..be74876e6 100644
--- a/src/Native/Runtime/MiscHelpers.cpp
+++ b/src/Native/Runtime/MiscHelpers.cpp
@@ -467,196 +467,6 @@ COOP_PINVOKE_HELPER(UInt8 *, RhGetCodeTarget, (UInt8 * pCodeOrg))
return pCodeOrg;
}
-FORCEINLINE void ForwardGCSafeCopy(void * dest, const void *src, size_t len)
-{
- // All parameters must be pointer-size-aligned
- ASSERT(IS_ALIGNED(dest, sizeof(size_t)));
- ASSERT(IS_ALIGNED(src, sizeof(size_t)));
- ASSERT(IS_ALIGNED(len, sizeof(size_t)));
-
- size_t size = len;
- UInt8 * dmem = (UInt8 *)dest;
- UInt8 * smem = (UInt8 *)src;
-
- // regions must be non-overlapping
- ASSERT(dmem <= smem || smem + size <= dmem);
-
- // copy 4 pointers at a time
- while (size >= 4 * sizeof(size_t))
- {
- size -= 4 * sizeof(size_t);
- ((size_t *)dmem)[0] = ((size_t *)smem)[0];
- ((size_t *)dmem)[1] = ((size_t *)smem)[1];
- ((size_t *)dmem)[2] = ((size_t *)smem)[2];
- ((size_t *)dmem)[3] = ((size_t *)smem)[3];
- smem += 4 * sizeof(size_t);
- dmem += 4 * sizeof(size_t);
- }
-
- // copy 2 trailing pointers, if needed
- if ((size & (2 * sizeof(size_t))) != 0)
- {
- ((size_t *)dmem)[0] = ((size_t *)smem)[0];
- ((size_t *)dmem)[1] = ((size_t *)smem)[1];
- smem += 2 * sizeof(size_t);
- dmem += 2 * sizeof(size_t);
- }
-
- // finish with one pointer, if needed
- if ((size & sizeof(size_t)) != 0)
- {
- ((size_t *)dmem)[0] = ((size_t *)smem)[0];
- }
-}
-
-FORCEINLINE void BackwardGCSafeCopy(void * dest, const void *src, size_t len)
-{
- // All parameters must be pointer-size-aligned
- ASSERT(IS_ALIGNED(dest, sizeof(size_t)));
- ASSERT(IS_ALIGNED(src, sizeof(size_t)));
- ASSERT(IS_ALIGNED(len, sizeof(size_t)));
-
- size_t size = len;
- UInt8 * dmem = (UInt8 *)dest + len;
- UInt8 * smem = (UInt8 *)src + len;
-
- // regions must be non-overlapping
- ASSERT(smem <= dmem || dmem + size <= smem);
-
- // copy 4 pointers at a time
- while (size >= 4 * sizeof(size_t))
- {
- size -= 4 * sizeof(size_t);
- smem -= 4 * sizeof(size_t);
- dmem -= 4 * sizeof(size_t);
- ((size_t *)dmem)[3] = ((size_t *)smem)[3];
- ((size_t *)dmem)[2] = ((size_t *)smem)[2];
- ((size_t *)dmem)[1] = ((size_t *)smem)[1];
- ((size_t *)dmem)[0] = ((size_t *)smem)[0];
- }
-
- // copy 2 trailing pointers, if needed
- if ((size & (2 * sizeof(size_t))) != 0)
- {
- smem -= 2 * sizeof(size_t);
- dmem -= 2 * sizeof(size_t);
- ((size_t *)dmem)[1] = ((size_t *)smem)[1];
- ((size_t *)dmem)[0] = ((size_t *)smem)[0];
- }
-
- // finish with one pointer, if needed
- if ((size & sizeof(size_t)) != 0)
- {
- smem -= sizeof(size_t);
- dmem -= sizeof(size_t);
- ((size_t *)dmem)[0] = ((size_t *)smem)[0];
- }
-}
-
-// This function fills a piece of memory in a GC safe way. It makes the guarantee
-// that it will fill memory in at least pointer sized chunks whenever possible.
-// Unaligned memory at the beginning and remaining bytes at the end are written bytewise.
-// We must make this guarantee whenever we clear memory in the GC heap that could contain
-// object references. The GC or other user threads can read object references at any time,
-// clearing them bytewise can result in a read on another thread getting incorrect data.
-FORCEINLINE void GCSafeFillMemory(void * mem, size_t size, size_t pv)
-{
- UInt8 * memBytes = (UInt8 *)mem;
- UInt8 * endBytes = &memBytes[size];
-
- // handle unaligned bytes at the beginning
- while (!IS_ALIGNED(memBytes, sizeof(void *)) && (memBytes < endBytes))
- *memBytes++ = (UInt8)pv;
-
- // now write pointer sized pieces
- size_t nPtrs = (endBytes - memBytes) / sizeof(void *);
- UIntNative* memPtr = (UIntNative*)memBytes;
- for (size_t i = 0; i < nPtrs; i++)
- *memPtr++ = pv;
-
- // handle remaining bytes at the end
- memBytes = (UInt8*)memPtr;
- while (memBytes < endBytes)
- *memBytes++ = (UInt8)pv;
-}
-
-// This is a GC-safe variant of memcpy. It guarantees that the object references in the GC heap are updated atomically.
-// This is required for type safety and proper operation of the background GC.
-//
-// USAGE: 1) The caller is responsible for performing the appropriate bulk write barrier.
-// 2) The caller is responsible for hoisting any null reference exceptions to a place where the hardware
-// exception can be properly translated to a managed exception. This is handled by RhpCopyMultibyte.
-// 3) The caller must ensure that all three parameters are pointer-size-aligned. This should be the case for
-// value types which contain GC refs anyway, so if you want to copy structs without GC refs which might be
-// unaligned, then you must use RhpCopyMultibyteNoGCRefs.
-COOP_PINVOKE_CDECL_HELPER(void *, memcpyGCRefs, (void * dest, const void *src, size_t len))
-{
- // null pointers are not allowed (they are checked by RhpCopyMultibyte)
- ASSERT(dest != nullptr);
- ASSERT(src != nullptr);
-
- ForwardGCSafeCopy(dest, src, len);
-
- // memcpy returns the destination buffer
- return dest;
-}
-
-EXTERN_C void REDHAWK_CALLCONV RhpBulkWriteBarrier(void* pMemStart, UInt32 cbMemSize);
-
-// This is a GC-safe variant of memcpy. It guarantees that the object references in the GC heap are updated atomically.
-// This is required for type safety and proper operation of the background GC.
-// Writebarrier is included.
-//
-// USAGE:
-// 1) The caller is responsible for hoisting any null reference exceptions to a place where the hardware
-// exception can be properly translated to a managed exception. This is handled by RhpCopyMultibyte.
-// 2) The caller must ensure that all three parameters are pointer-size-aligned. This should be the case for
-// value types which contain GC refs anyway, so if you want to copy structs without GC refs which might be
-// unaligned, then you must use RhpCopyMultibyteNoGCRefs.
-COOP_PINVOKE_CDECL_HELPER(void *, memcpyGCRefsWithWriteBarrier, (void * dest, const void *src, size_t len))
-{
- // null pointers are not allowed (they are checked by RhpCopyMultibyteWithWriteBarrier)
- ASSERT(dest != nullptr);
- ASSERT(src != nullptr);
-
- ForwardGCSafeCopy(dest, src, len);
- RhpBulkWriteBarrier(dest, (UInt32)len);
-
- // memcpy returns the destination buffer
- return dest;
-}
-
-// This function clears a piece of memory in a GC safe way. It makes the guarantee that it will clear memory in at
-// least pointer sized chunks whenever possible. Unaligned memory at the beginning and remaining bytes at the end are
-// written bytewise. We must make this guarantee whenever we clear memory in the GC heap that could contain object
-// references. The GC or other user threads can read object references at any time, clearing them bytewise can result
-// in a read on another thread getting incorrect data.
-//
-// USAGE: The caller is responsible for hoisting any null reference exceptions to a place where the hardware exception
-// can be properly translated to a managed exception.
-COOP_PINVOKE_CDECL_HELPER(void *, RhpInitMultibyte, (void * mem, int c, size_t size))
-{
- // The caller must do the null-check because we cannot take an AV in the runtime and translate it to managed.
- ASSERT(mem != nullptr);
-
- UIntNative bv = (UInt8)c;
- UIntNative pv = 0;
-
- if (bv != 0)
- {
- pv =
-#if (POINTER_SIZE == 8)
- bv << 7*8 | bv << 6*8 | bv << 5*8 | bv << 4*8 |
-#endif
- bv << 3*8 | bv << 2*8 | bv << 1*8 | bv;
- }
-
- GCSafeFillMemory(mem, size, pv);
-
- // memset returns the destination buffer
- return mem;
-}
-
//
// Return true if the array slice is valid
//
@@ -669,8 +479,6 @@ FORCEINLINE bool CheckArraySlice(Array * pArray, Int32 index, Int32 length)
(length <= arrayLength - index);
}
-EXTERN_C void * __cdecl memmove(void *, const void *, size_t);
-
//
// This function handles all cases of Array.Copy that do not require conversions or casting. It returns false if the copy cannot be performed, leaving
// the handling of the complex cases or throwing appropriate exception to the higher level framework.
@@ -708,9 +516,9 @@ COOP_PINVOKE_HELPER(Boolean, RhpArrayCopy, (Array * pSourceArray, Int32 sourceIn
if (pArrayType->HasReferenceFields())
{
if (pDestinationData <= pSourceData || pSourceData + size <= pDestinationData)
- ForwardGCSafeCopy(pDestinationData, pSourceData, size);
+ InlineForwardGCSafeCopy(pDestinationData, pSourceData, size);
else
- BackwardGCSafeCopy(pDestinationData, pSourceData, size);
+ InlineBackwardGCSafeCopy(pDestinationData, pSourceData, size);
InlinedBulkWriteBarrier(pDestinationData, (UInt32)size);
}
@@ -744,7 +552,7 @@ COOP_PINVOKE_HELPER(Boolean, RhpArrayClear, (Array * pArray, Int32 index, Int32
if (length == 0)
return true;
- GCSafeFillMemory((UInt8 *)pArray->GetArrayData() + index * componentSize, length * componentSize, 0);
+ InlineGCSafeFillMemory((UInt8 *)pArray->GetArrayData() + index * componentSize, length * componentSize, 0);
return true;
}
diff --git a/src/Native/Runtime/gcrhenv.cpp b/src/Native/Runtime/gcrhenv.cpp
index db97a3245..46334038a 100644
--- a/src/Native/Runtime/gcrhenv.cpp
+++ b/src/Native/Runtime/gcrhenv.cpp
@@ -40,7 +40,6 @@
#include "daccess.h"
#include "GCMemoryHelpers.h"
-#include "GCMemoryHelpers.inl"
GPTR_IMPL(EEType, g_pFreeObjectEEType);
@@ -648,11 +647,6 @@ UInt32 RedhawkGCInterface::GetGCDescSize(void * pType)
return (UInt32)CGCDesc::GetCGCDescFromMT(pMT)->GetSize();
}
-void ForwardGCSafeCopy(void * dest, const void *src, size_t len);
-void BackwardGCSafeCopy(void * dest, const void *src, size_t len);
-EXTERN_C void REDHAWK_CALLCONV RhpBulkWriteBarrier(void* pMemStart, UInt32 cbMemSize);
-
-
COOP_PINVOKE_HELPER(void, RhpCopyObjectContents, (Object* pobjDest, Object* pobjSrc))
{
SIZE_T cbDest = pobjDest->GetSize() - sizeof(ObjHeader);
@@ -664,8 +658,7 @@ COOP_PINVOKE_HELPER(void, RhpCopyObjectContents, (Object* pobjDest, Object* pobj
if (pobjDest->get_EEType()->HasReferenceFields())
{
- ForwardGCSafeCopy(pobjDest, pobjSrc, cbDest);
- GCHeap::GetGCHeap()->SetCardsAfterBulkCopy((Object**)pobjDest, cbDest);
+ GCSafeCopyMemoryWithWriteBarrier(pobjDest, pobjSrc, cbDest);
}
else
{
@@ -673,21 +666,6 @@ COOP_PINVOKE_HELPER(void, RhpCopyObjectContents, (Object* pobjDest, Object* pobj
}
}
-// Move memory, in a way that is compatible with a move onto the heap, but
-// does not require the destination pointer to be on the heap.
-
-COOP_PINVOKE_HELPER(void, RhBulkMoveWithWriteBarrier, (uint8_t* pDest, uint8_t* pSrc, int cbDest))
-{
- if (pDest <= pSrc || pSrc + cbDest <= pDest)
- ForwardGCSafeCopy(pDest, pSrc, cbDest);
- else
- BackwardGCSafeCopy(pDest, pSrc, cbDest);
-
- // Use RhpBulkWriteBarrier here instead of SetCardsAfterBulkCopy as RhpBulkWriteBarrier
- // is both faster, and is compatible with a destination that isn't the GC heap.
- InlinedBulkWriteBarrier(pDest, cbDest);
-}
-
COOP_PINVOKE_HELPER(void, RhpBox, (Object * pObj, void * pData))
{
EEType * pEEType = pObj->get_EEType();
@@ -708,8 +686,7 @@ COOP_PINVOKE_HELPER(void, RhpBox, (Object * pObj, void * pData))
// Perform any write barriers necessary for embedded reference fields.
if (pEEType->HasReferenceFields())
{
- ForwardGCSafeCopy(pbFields, pData, cbFields);
- GCHeap::GetGCHeap()->SetCardsAfterBulkCopy((Object**)pbFields, cbFields);
+ GCSafeCopyMemoryWithWriteBarrier(pbFields, pData, cbFields);
}
else
{
@@ -732,7 +709,7 @@ COOP_PINVOKE_HELPER(void, RhUnbox, (Object * pObj, void * pData, EEType * pUnbox
EEType * pEEType = pUnboxToEEType->GetNullableType();
SIZE_T cbFieldPadding = pEEType->get_ValueTypeFieldPadding();
SIZE_T cbFields = pEEType->get_BaseSize() - (sizeof(ObjHeader) + sizeof(EEType*) + cbFieldPadding);
- memset((UInt8*)pData + pUnboxToEEType->GetNullableValueOffset(), 0, cbFields);
+ GCSafeZeroMemory((UInt8*)pData + pUnboxToEEType->GetNullableValueOffset(), cbFields);
return;
}
@@ -760,19 +737,10 @@ COOP_PINVOKE_HELPER(void, RhUnbox, (Object * pObj, void * pData, EEType * pUnbox
SIZE_T cbFields = pEEType->get_BaseSize() - (sizeof(ObjHeader) + sizeof(EEType*) + cbFieldPadding);
UInt8 * pbFields = (UInt8*)pObj + sizeof(EEType*);
- // Perform any write barriers necessary for embedded reference fields. SetCardsAfterBulkCopy doesn't range
- // check the address we pass it and in this case we don't know whether pData really points into the GC
- // heap or not. If we call it with an address outside of the GC range we could end up setting a card
- // outside of the allocated range of the card table, i.e. corrupt memory.
if (pEEType->HasReferenceFields())
{
// Copy the boxed fields into the new location in a GC safe manner
- ForwardGCSafeCopy(pData, pbFields, cbFields);
-
- if ((pData >= g_lowest_address) && (pData < g_highest_address))
- {
- GCHeap::GetGCHeap()->SetCardsAfterBulkCopy((Object**)pData, cbFields);
- }
+ GCSafeCopyMemoryWithWriteBarrier(pData, pbFields, cbFields);
}
else
{