Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/corert.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Kotas <jkotas@microsoft.com>2017-01-13 23:29:13 +0300
committerGitHub <noreply@github.com>2017-01-13 23:29:13 +0300
commit90354c614dee967f002097d4e8780efcdc1a5559 (patch)
treecfc46a7bfbf59d834759026cb47601d94d6b82c5
parenta483c9a061a1e0b2f0cdf4a436f9fa3e4652c768 (diff)
parente5678962aab23ce6bac2ba53185781c4467904be (diff)
Merge pull request #2503 from dotnet-bot/from-tfs
Merge changes from TFS
-rw-r--r--src/Native/Runtime/eventtrace.h11
-rw-r--r--src/Native/Runtime/gcheaputilities.cpp12
-rw-r--r--src/Native/Runtime/gcheaputilities.h13
-rw-r--r--src/Native/Runtime/gcrhenv.cpp209
-rw-r--r--src/Native/Runtime/profheapwalkhelper.cpp4
-rw-r--r--src/Native/Runtime/unix/PalRedhawkUnix.cpp5
-rw-r--r--src/Native/Runtime/windows/PalRedhawkMinWin.cpp2
-rw-r--r--src/Native/gc/env/gcenv.base.h2
-rw-r--r--src/Native/gc/env/gcenv.ee.h10
-rw-r--r--src/Native/gc/env/gcenv.os.h3
-rw-r--r--src/Native/gc/gc.cpp783
-rw-r--r--src/Native/gc/gc.h16
-rw-r--r--src/Native/gc/gccommon.cpp14
-rw-r--r--src/Native/gc/gcee.cpp207
-rw-r--r--src/Native/gc/gcenv.ee.standalone.inl48
-rw-r--r--src/Native/gc/gcimpl.h19
-rw-r--r--src/Native/gc/gcinterface.ee.h36
-rw-r--r--src/Native/gc/gcinterface.h140
-rw-r--r--src/Native/gc/gcpriv.h57
-rw-r--r--src/Native/gc/gcrecord.h2
-rw-r--r--src/Native/gc/gcscan.cpp17
-rw-r--r--src/Native/gc/gcscan.h6
-rw-r--r--src/Native/gc/gcsvr.cpp1
-rw-r--r--src/Native/gc/gcwks.cpp1
-rw-r--r--src/Native/gc/handletablecache.cpp6
-rw-r--r--src/Native/gc/handletablecore.cpp2
-rw-r--r--src/Native/gc/objecthandle.cpp99
-rw-r--r--src/Native/gc/objecthandle.h7
-rw-r--r--src/Native/gc/sample/CMakeLists.txt4
-rw-r--r--src/Native/gc/sample/GCSample.cpp15
-rw-r--r--src/Native/gc/sample/GCSample.vcxproj7
-rw-r--r--src/Native/gc/sample/GCSample.vcxproj.filters2
-rw-r--r--src/Native/gc/sample/gcenv.ee.cpp46
-rw-r--r--src/Native/gc/softwarewritewatch.cpp12
-rw-r--r--src/Native/gc/softwarewritewatch.h40
35 files changed, 967 insertions, 891 deletions
diff --git a/src/Native/Runtime/eventtrace.h b/src/Native/Runtime/eventtrace.h
index 77ab912e9..ae88847ab 100644
--- a/src/Native/Runtime/eventtrace.h
+++ b/src/Native/Runtime/eventtrace.h
@@ -29,7 +29,18 @@
#define _VMEVENTTRACE_H_
#include "eventtracebase.h"
+#include "gcinterface.h"
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+struct ProfilingScanContext : ScanContext
+{
+ BOOL fProfilerPinned;
+ void * pvEtwContext;
+ void *pHeapId;
+
+ ProfilingScanContext(BOOL fProfilerPinnedParam);
+};
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
namespace ETW
{
diff --git a/src/Native/Runtime/gcheaputilities.cpp b/src/Native/Runtime/gcheaputilities.cpp
index 0ff2910fe..dd328fc46 100644
--- a/src/Native/Runtime/gcheaputilities.cpp
+++ b/src/Native/Runtime/gcheaputilities.cpp
@@ -7,4 +7,14 @@
#include "gcheaputilities.h"
// This is the global GC heap, maintained by the VM.
-GPTR_IMPL(IGCHeap, g_pGCHeap); \ No newline at end of file
+GPTR_IMPL(IGCHeap, g_pGCHeap);
+
+// These globals are variables used within the GC and maintained
+// by the EE for use in write barriers. It is the responsibility
+// of the GC to communicate updates to these globals to the EE through
+// GCToEEInterface::StompWriteBarrier.
+GPTR_IMPL_INIT(uint32_t, g_card_table, nullptr);
+GPTR_IMPL_INIT(uint8_t, g_lowest_address, nullptr);
+GPTR_IMPL_INIT(uint8_t, g_highest_address, nullptr);
+uint8_t* g_ephemeral_low = (uint8_t*)1;
+uint8_t* g_ephemeral_high = (uint8_t*)~0;
diff --git a/src/Native/Runtime/gcheaputilities.h b/src/Native/Runtime/gcheaputilities.h
index 6e08472a2..5aec56bb2 100644
--- a/src/Native/Runtime/gcheaputilities.h
+++ b/src/Native/Runtime/gcheaputilities.h
@@ -10,6 +10,19 @@
// The singular heap instance.
GPTR_DECL(IGCHeap, g_pGCHeap);
+#ifndef DACCESS_COMPILE
+extern "C" {
+#endif // !DACCESS_COMPILE
+GPTR_DECL(uint8_t,g_lowest_address);
+GPTR_DECL(uint8_t,g_highest_address);
+GPTR_DECL(uint32_t,g_card_table);
+#ifndef DACCESS_COMPILE
+}
+#endif // !DACCESS_COMPILE
+
+extern "C" uint8_t* g_ephemeral_low;
+extern "C" uint8_t* g_ephemeral_high;
+
// GCHeapUtilities provides a number of static methods
// that operate on the global heap instance. It can't be
// instantiated.
diff --git a/src/Native/Runtime/gcrhenv.cpp b/src/Native/Runtime/gcrhenv.cpp
index 2ff736292..c60f09f0a 100644
--- a/src/Native/Runtime/gcrhenv.cpp
+++ b/src/Native/Runtime/gcrhenv.cpp
@@ -51,6 +51,25 @@
#include "holder.h"
+#ifdef FEATURE_ETW
+ #ifndef _INC_WINDOWS
+ typedef void* LPVOID;
+ typedef uint32_t UINT;
+ typedef void* PVOID;
+ typedef uint64_t ULONGLONG;
+ typedef uint32_t ULONG;
+ typedef int64_t LONGLONG;
+ typedef uint8_t BYTE;
+ typedef uint16_t UINT16;
+ #endif // _INC_WINDOWS
+
+ #include "etwevents.h"
+ #include "eventtrace.h"
+#else // FEATURE_ETW
+ #include "etmdummy.h"
+ #define ETW_EVENT_ENABLED(e,f) false
+#endif // FEATURE_ETW
+
GPTR_IMPL(EEType, g_pFreeObjectEEType);
#define USE_CLR_CACHE_SIZE_BEHAVIOR
@@ -119,7 +138,7 @@ UInt32 EtwCallback(UInt32 IsEnabled, RH_ETW_CONTEXT * pContext)
FireEtwGCSettings(GCHeapUtilities::GetGCHeap()->GetValidSegmentSize(FALSE),
GCHeapUtilities::GetGCHeap()->GetValidSegmentSize(TRUE),
GCHeapUtilities::IsServerHeap());
- GCHeapUtilities::GetGCHeap()->TraceGCSegments();
+ GCHeapUtilities::GetGCHeap()->DiagTraceGCSegments();
}
// Special check for the runtime provider's GCHeapCollectKeyword. Profilers
@@ -686,8 +705,8 @@ void RedhawkGCInterface::ScanHeap(GcScanObjectFunction pfnScanCallback, void *pC
// static
void RedhawkGCInterface::ScanObject(void *pObject, GcScanObjectFunction pfnScanCallback, void *pContext)
{
-#if !defined(DACCESS_COMPILE) && (defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE))
- GCHeapUtilities::GetGCHeap()->WalkObject((Object*)pObject, (walk_fn)pfnScanCallback, pContext);
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_EVENT_TRACE)
+ GCHeapUtilities::GetGCHeap()->DiagWalkObject((Object*)pObject, (walk_fn)pfnScanCallback, pContext);
#else
UNREFERENCED_PARAMETER(pObject);
UNREFERENCED_PARAMETER(pfnScanCallback);
@@ -759,7 +778,7 @@ void RedhawkGCInterface::ScanStaticRoots(GcScanRootFunction pfnScanCallback, voi
// static
void RedhawkGCInterface::ScanHandleTableRoots(GcScanRootFunction pfnScanCallback, void *pContext)
{
-#if !defined(DACCESS_COMPILE) && (defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE))
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_EVENT_TRACE)
ScanRootsContext sContext;
sContext.m_pfnCallback = pfnScanCallback;
sContext.m_pContext = pContext;
@@ -1148,6 +1167,166 @@ Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunction threa
return threadStubArgs.m_pThread;
}
+void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
+{
+ UNREFERENCED_PARAMETER(gen);
+ UNREFERENCED_PARAMETER(isInduced);
+}
+
+void GCToEEInterface::DiagUpdateGenerationBounds()
+{
+}
+
+void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
+{
+ UNREFERENCED_PARAMETER(gcContext);
+}
+
+void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
+{
+ UNREFERENCED_PARAMETER(index);
+ UNREFERENCED_PARAMETER(gen);
+ UNREFERENCED_PARAMETER(reason);
+ UNREFERENCED_PARAMETER(fConcurrent);
+}
+
+// Note on last parameter: when calling this for bgc, only ETW
+// should be sending these events so that existing profapi profilers
+// don't get confused.
+void WalkMovedReferences(uint8_t* begin, uint8_t* end,
+ ptrdiff_t reloc,
+ size_t context,
+ BOOL fCompacting,
+ BOOL fBGC)
+{
+ UNREFERENCED_PARAMETER(begin);
+ UNREFERENCED_PARAMETER(end);
+ UNREFERENCED_PARAMETER(reloc);
+ UNREFERENCED_PARAMETER(context);
+ UNREFERENCED_PARAMETER(fCompacting);
+ UNREFERENCED_PARAMETER(fBGC);
+}
+
+//
+// Diagnostics code
+//
+
+#ifdef FEATURE_EVENT_TRACE
+inline BOOL ShouldTrackMovementForProfilerOrEtw()
+{
+ if (ETW::GCLog::ShouldTrackMovementForEtw())
+ return true;
+
+ return false;
+}
+#endif // FEATURE_EVENT_TRACE
+
+void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
+{
+#ifdef FEATURE_EVENT_TRACE
+ if (ShouldTrackMovementForProfilerOrEtw())
+ {
+ size_t context = 0;
+ ETW::GCLog::BeginMovedReferences(&context);
+ GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_gc);
+ ETW::GCLog::EndMovedReferences(context);
+ }
+#else
+ UNREFERENCED_PARAMETER(gcContext);
+#endif // FEATURE_EVENT_TRACE
+}
+
+void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
+{
+#ifdef FEATURE_EVENT_TRACE
+ if (ShouldTrackMovementForProfilerOrEtw())
+ {
+ size_t context = 0;
+ ETW::GCLog::BeginMovedReferences(&context);
+ GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_loh);
+ ETW::GCLog::EndMovedReferences(context);
+ }
+#else
+ UNREFERENCED_PARAMETER(gcContext);
+#endif // FEATURE_EVENT_TRACE
+}
+
+void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
+{
+#ifdef FEATURE_EVENT_TRACE
+ if (ShouldTrackMovementForProfilerOrEtw())
+ {
+ size_t context = 0;
+ ETW::GCLog::BeginMovedReferences(&context);
+ GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_bgc);
+ ETW::GCLog::EndMovedReferences(context);
+ }
+#else
+ UNREFERENCED_PARAMETER(gcContext);
+#endif // FEATURE_EVENT_TRACE
+}
+
+void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
+{
+ // CoreRT doesn't patch the write barrier like CoreCLR does, but it
+ // still needs to record the changes in the GC heap.
+ assert(args != nullptr);
+ switch (args->operation)
+ {
+ case WriteBarrierOp::StompResize:
+ // StompResize requires a new card table, a new lowest address, and
+ // a new highest address
+ assert(args->card_table != nullptr);
+ assert(args->lowest_address != nullptr);
+ assert(args->highest_address != nullptr);
+ g_card_table = args->card_table;
+
+ // We need to make sure that other threads executing checked write barriers
+ // will see the g_card_table update before g_lowest/highest_address updates.
+ // Otherwise, the checked write barrier may AV accessing the old card table
+ // with address that it does not cover. Write barriers access card table
+ // without memory barriers for performance reasons, so we need to flush
+ // the store buffers here.
+ FlushProcessWriteBuffers();
+
+ g_lowest_address = args->lowest_address;
+ VolatileStore(&g_highest_address, args->highest_address);
+ return;
+ case WriteBarrierOp::StompEphemeral:
+ // StompEphemeral requires a new ephemeral low and a new ephemeral high
+ assert(args->ephemeral_low != nullptr);
+ assert(args->ephemeral_high != nullptr);
+ g_ephemeral_low = args->ephemeral_low;
+ g_ephemeral_high = args->ephemeral_high;
+ return;
+ case WriteBarrierOp::Initialize:
+ // This operation should only be invoked once, upon initialization.
+ assert(g_card_table == nullptr);
+ assert(g_lowest_address == nullptr);
+ assert(g_highest_address == nullptr);
+ assert(args->card_table != nullptr);
+ assert(args->lowest_address != nullptr);
+ assert(args->highest_address != nullptr);
+ assert(args->ephemeral_low != nullptr);
+ assert(args->ephemeral_high != nullptr);
+ assert(args->is_runtime_suspended && "the runtime must be suspended here!");
+
+ g_card_table = args->card_table;
+ g_lowest_address = args->lowest_address;
+ g_highest_address = args->highest_address;
+ g_ephemeral_low = args->ephemeral_low;
+ g_ephemeral_high = args->ephemeral_high;
+ return;
+ case WriteBarrierOp::SwitchToWriteWatch:
+ case WriteBarrierOp::SwitchToNonWriteWatch:
+ assert(!"CoreRT does not have an implementation of non-OS WriteWatch");
+ return;
+ default:
+ assert(!"Unknokwn WriteBarrierOp enum");
+ return;
+ }
+}
+
#endif // !DACCESS_COMPILE
// NOTE: this method is not in thread.cpp because it needs access to the layout of alloc_context for DAC to know the
@@ -1339,14 +1518,6 @@ MethodTable * g_pFreeObjectMethodTable;
int32_t g_TrapReturningThreads;
bool g_fFinalizerRunOnShutDown;
-void StompWriteBarrierEphemeral(bool /* isRuntimeSuspended */)
-{
-}
-
-void StompWriteBarrierResize(bool /* isRuntimeSuspended */, bool /*bReqUpperBoundsCheck*/)
-{
-}
-
bool IsGCThread()
{
return false;
@@ -1425,3 +1596,17 @@ void CPUGroupInfo::GetGroupForProcessor(uint16_t /*processor_number*/, uint16_t
{
ASSERT_UNCONDITIONALLY("NYI: CPUGroupInfo::GetGroupForProcessor");
}
+
+#ifdef FEATURE_EVENT_TRACE
+ProfilingScanContext::ProfilingScanContext(BOOL fProfilerPinnedParam)
+ : ScanContext()
+{
+ pHeapId = NULL;
+ fProfilerPinned = fProfilerPinnedParam;
+ pvEtwContext = NULL;
+#ifdef FEATURE_CONSERVATIVE_GC
+ // To not confuse GCScan::GcScanRoots
+ promotion = g_pConfig->GetGCConservative();
+#endif
+}
+#endif // FEATURE_EVENT_TRACE
diff --git a/src/Native/Runtime/profheapwalkhelper.cpp b/src/Native/Runtime/profheapwalkhelper.cpp
index b4dddca18..038e99ee0 100644
--- a/src/Native/Runtime/profheapwalkhelper.cpp
+++ b/src/Native/Runtime/profheapwalkhelper.cpp
@@ -141,7 +141,7 @@ BOOL HeapWalkHelper(Object * pBO, void * pvContext)
//if (pMT->ContainsPointersOrCollectible())
{
// First round through calculates the number of object refs for this class
- GCHeapUtilities::GetGCHeap()->WalkObject(pBO, &CountContainedObjectRef, (void *)&cNumRefs);
+ GCHeapUtilities::GetGCHeap()->DiagWalkObject(pBO, &CountContainedObjectRef, (void *)&cNumRefs);
if (cNumRefs > 0)
{
@@ -166,7 +166,7 @@ BOOL HeapWalkHelper(Object * pBO, void * pvContext)
// Second round saves off all of the ref values
OBJECTREF * pCurObjRef = arrObjRef;
- GCHeapUtilities::GetGCHeap()->WalkObject(pBO, &SaveContainedObjectRef, (void *)&pCurObjRef);
+ GCHeapUtilities::GetGCHeap()->DiagWalkObject(pBO, &SaveContainedObjectRef, (void *)&pCurObjRef);
}
}
diff --git a/src/Native/Runtime/unix/PalRedhawkUnix.cpp b/src/Native/Runtime/unix/PalRedhawkUnix.cpp
index 8b699c07c..91c5842cc 100644
--- a/src/Native/Runtime/unix/PalRedhawkUnix.cpp
+++ b/src/Native/Runtime/unix/PalRedhawkUnix.cpp
@@ -1457,13 +1457,12 @@ void GCToOSInterface::YieldThread(uint32_t switchCount)
// Reserve virtual memory range.
// Parameters:
-// address - starting virtual address, it can be NULL to let the function choose the starting address
// size - size of the virtual memory range
// alignment - requested memory alignment, 0 means no specific alignment requested
// flags - flags to control special settings like write watching
// Return:
// Starting virtual address of the reserved range
-void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags)
+void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
{
ASSERT_MSG(!(flags & VirtualReserveFlags::WriteWatch), "WriteWatch not supported on Unix");
@@ -1474,7 +1473,7 @@ void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignme
size_t alignedSize = size + (alignment - OS_PAGE_SIZE);
- void * pRetVal = mmap(address, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
+ void * pRetVal = mmap(nullptr, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
if (pRetVal != NULL)
{
diff --git a/src/Native/Runtime/windows/PalRedhawkMinWin.cpp b/src/Native/Runtime/windows/PalRedhawkMinWin.cpp
index 49097edd3..87b52a1ec 100644
--- a/src/Native/Runtime/windows/PalRedhawkMinWin.cpp
+++ b/src/Native/Runtime/windows/PalRedhawkMinWin.cpp
@@ -1482,7 +1482,7 @@ void GCToOSInterface::YieldThread(uint32_t /*switchCount*/)
// flags - flags to control special settings like write watching
// Return:
// Starting virtual address of the reserved range
-void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags)
+void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
{
DWORD memFlags = (flags & VirtualReserveFlags::WriteWatch) ? (MEM_RESERVE | MEM_WRITE_WATCH) : MEM_RESERVE;
return ::VirtualAlloc(0, size, memFlags, PAGE_READWRITE);
diff --git a/src/Native/gc/env/gcenv.base.h b/src/Native/gc/env/gcenv.base.h
index 94f73762f..0a0de73ee 100644
--- a/src/Native/gc/env/gcenv.base.h
+++ b/src/Native/gc/env/gcenv.base.h
@@ -96,7 +96,7 @@ inline HRESULT HRESULT_FROM_WIN32(unsigned long x)
#define UNREFERENCED_PARAMETER(P) (void)(P)
#ifdef PLATFORM_UNIX
-#define _vsnprintf vsnprintf
+#define _vsnprintf_s(string, sizeInBytes, count, format, args) vsnprintf(string, sizeInBytes, format, args)
#define sprintf_s snprintf
#define swprintf_s swprintf
#endif
diff --git a/src/Native/gc/env/gcenv.ee.h b/src/Native/gc/env/gcenv.ee.h
index dc6c1d84b..beb0c1a98 100644
--- a/src/Native/gc/env/gcenv.ee.h
+++ b/src/Native/gc/env/gcenv.ee.h
@@ -56,6 +56,16 @@ public:
static void GcEnumAllocContexts(enum_alloc_context_func* fn, void* param);
static Thread* CreateBackgroundThread(GCBackgroundThreadFunction threadStart, void* arg);
+
+ // Diagnostics methods.
+ static void DiagGCStart(int gen, bool isInduced);
+ static void DiagUpdateGenerationBounds();
+ static void DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent);
+ static void DiagWalkFReachableObjects(void* gcContext);
+ static void DiagWalkSurvivors(void* gcContext);
+ static void DiagWalkLOHSurvivors(void* gcContext);
+ static void DiagWalkBGCSurvivors(void* gcContext);
+ static void StompWriteBarrier(WriteBarrierParameters* args);
};
#endif // __GCENV_EE_H__
diff --git a/src/Native/gc/env/gcenv.os.h b/src/Native/gc/env/gcenv.os.h
index bb0153f11..6a126f29e 100644
--- a/src/Native/gc/env/gcenv.os.h
+++ b/src/Native/gc/env/gcenv.os.h
@@ -73,13 +73,12 @@ public:
// Reserve virtual memory range.
// Parameters:
- // address - starting virtual address, it can be NULL to let the function choose the starting address
// size - size of the virtual memory range
// alignment - requested memory alignment
// flags - flags to control special settings like write watching
// Return:
// Starting virtual address of the reserved range
- static void* VirtualReserve(void *address, size_t size, size_t alignment, uint32_t flags);
+ static void* VirtualReserve(size_t size, size_t alignment, uint32_t flags);
// Release virtual memory range previously reserved using VirtualReserve
// Parameters:
diff --git a/src/Native/gc/gc.cpp b/src/Native/gc/gc.cpp
index 3ba5369a7..99e646c50 100644
--- a/src/Native/gc/gc.cpp
+++ b/src/Native/gc/gc.cpp
@@ -21,22 +21,6 @@
#define USE_INTROSORT
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-inline BOOL ShouldTrackMovementForProfilerOrEtw()
-{
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- return true;
-#endif
-
-#ifdef FEATURE_EVENT_TRACE
- if (ETW::GCLog::ShouldTrackMovementForEtw())
- return true;
-#endif
-
- return false;
-}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
BOOL bgc_heap_walk_for_etw_p = FALSE;
@@ -349,8 +333,8 @@ void gc_heap::add_to_history_per_heap()
#endif //BACKGROUND_GC
current_hist->fgc_lowest = lowest_address;
current_hist->fgc_highest = highest_address;
- current_hist->g_lowest = g_lowest_address;
- current_hist->g_highest = g_highest_address;
+ current_hist->g_lowest = g_gc_lowest_address;
+ current_hist->g_highest = g_gc_highest_address;
gchist_index_per_heap++;
if (gchist_index_per_heap == max_history_count)
@@ -405,7 +389,7 @@ void log_va_msg(const char *fmt, va_list args)
int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging());
buffer_start += pid_len;
memset(&pBuffer[buffer_start], '-', BUFFERSIZE - buffer_start);
- int msg_len = _vsnprintf(&pBuffer[buffer_start], BUFFERSIZE - buffer_start, fmt, args );
+ int msg_len = _vsnprintf_s(&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args );
if (msg_len == -1)
{
msg_len = BUFFERSIZE - buffer_start;
@@ -1418,9 +1402,6 @@ int mark_time, plan_time, sweep_time, reloc_time, compact_time;
#ifndef MULTIPLE_HEAPS
-#define ephemeral_low g_ephemeral_low
-#define ephemeral_high g_ephemeral_high
-
#endif // MULTIPLE_HEAPS
#ifdef TRACE_GC
@@ -2192,6 +2173,52 @@ int log2(unsigned int n)
return pos;
}
+#ifndef DACCESS_COMPILE
+
+void stomp_write_barrier_resize(bool is_runtime_suspended, bool requires_upper_bounds_check)
+{
+ WriteBarrierParameters args = {};
+ args.operation = WriteBarrierOp::StompResize;
+ args.is_runtime_suspended = is_runtime_suspended;
+ args.requires_upper_bounds_check = requires_upper_bounds_check;
+ args.card_table = g_gc_card_table;
+ args.lowest_address = g_gc_lowest_address;
+ args.highest_address = g_gc_highest_address;
+#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
+ if (SoftwareWriteWatch::IsEnabledForGCHeap())
+ {
+ args.write_watch_table = g_gc_sw_ww_table;
+ }
+#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
+ GCToEEInterface::StompWriteBarrier(&args);
+}
+
+void stomp_write_barrier_ephemeral(uint8_t* ephemeral_low, uint8_t* ephemeral_high)
+{
+ WriteBarrierParameters args = {};
+ args.operation = WriteBarrierOp::StompEphemeral;
+ args.is_runtime_suspended = true;
+ args.ephemeral_low = ephemeral_low;
+ args.ephemeral_high = ephemeral_high;
+ GCToEEInterface::StompWriteBarrier(&args);
+}
+
+void stomp_write_barrier_initialize()
+{
+ WriteBarrierParameters args = {};
+ args.operation = WriteBarrierOp::Initialize;
+ args.is_runtime_suspended = true;
+ args.requires_upper_bounds_check = false;
+ args.card_table = g_gc_card_table;
+ args.lowest_address = g_gc_lowest_address;
+ args.highest_address = g_gc_highest_address;
+ args.ephemeral_low = reinterpret_cast<uint8_t*>(1);
+ args.ephemeral_high = reinterpret_cast<uint8_t*>(~0);
+ GCToEEInterface::StompWriteBarrier(&args);
+}
+
+#endif // DACCESS_COMPILE
+
//extract the low bits [0,low[ of a uint32_t
#define lowbits(wrd, bits) ((wrd) & ((1 << (bits))-1))
//extract the high bits [high, 32] of a uint32_t
@@ -2397,6 +2424,10 @@ BOOL gc_heap::ro_segments_in_range;
size_t gc_heap::gen0_big_free_spaces = 0;
+uint8_t* gc_heap::ephemeral_low;
+
+uint8_t* gc_heap::ephemeral_high;
+
uint8_t* gc_heap::lowest_address;
uint8_t* gc_heap::highest_address;
@@ -3422,7 +3453,7 @@ inline
size_t ro_seg_begin_index (heap_segment* seg)
{
size_t begin_index = (size_t)seg / gc_heap::min_segment_size;
- begin_index = max (begin_index, (size_t)g_lowest_address / gc_heap::min_segment_size);
+ begin_index = max (begin_index, (size_t)g_gc_lowest_address / gc_heap::min_segment_size);
return begin_index;
}
@@ -3430,14 +3461,14 @@ inline
size_t ro_seg_end_index (heap_segment* seg)
{
size_t end_index = (size_t)(heap_segment_reserved (seg) - 1) / gc_heap::min_segment_size;
- end_index = min (end_index, (size_t)g_highest_address / gc_heap::min_segment_size);
+ end_index = min (end_index, (size_t)g_gc_highest_address / gc_heap::min_segment_size);
return end_index;
}
void seg_mapping_table_add_ro_segment (heap_segment* seg)
{
#ifdef GROWABLE_SEG_MAPPING_TABLE
- if ((heap_segment_reserved (seg) <= g_lowest_address) || (heap_segment_mem (seg) >= g_highest_address))
+ if ((heap_segment_reserved (seg) <= g_gc_lowest_address) || (heap_segment_mem (seg) >= g_gc_highest_address))
return;
#endif //GROWABLE_SEG_MAPPING_TABLE
@@ -3621,7 +3652,7 @@ gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o)
gc_heap* seg_mapping_table_heap_of (uint8_t* o)
{
#ifdef GROWABLE_SEG_MAPPING_TABLE
- if ((o < g_lowest_address) || (o >= g_highest_address))
+ if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
return 0;
#endif //GROWABLE_SEG_MAPPING_TABLE
@@ -3631,7 +3662,7 @@ gc_heap* seg_mapping_table_heap_of (uint8_t* o)
gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o)
{
#if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
- if ((o < g_lowest_address) || (o >= g_highest_address))
+ if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
return 0;
#endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE
@@ -3643,7 +3674,7 @@ gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o)
heap_segment* seg_mapping_table_segment_of (uint8_t* o)
{
#if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
- if ((o < g_lowest_address) || (o >= g_highest_address))
+ if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
#ifdef FEATURE_BASICFREEZE
return ro_segment_lookup (o);
#else
@@ -3686,7 +3717,7 @@ heap_segment* seg_mapping_table_segment_of (uint8_t* o)
#ifdef FEATURE_BASICFREEZE
// TODO: This was originally written assuming that the seg_mapping_table would always contain entries for ro
- // segments whenever the ro segment falls into the [g_lowest_address,g_highest_address) range. I.e., it had an
+ // segments whenever the ro segment falls into the [g_gc_lowest_address,g_gc_highest_address) range. I.e., it had an
// extra "&& (size_t)(entry->seg1) & ro_in_entry" expression. However, at the moment, grow_brick_card_table does
// not correctly go through the ro segments and add them back to the seg_mapping_table when the [lowest,highest)
// range changes. We should probably go ahead and modify grow_brick_card_table and put back the
@@ -4086,8 +4117,8 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
memory_details.current_block_normal = 0;
memory_details.current_block_large = 0;
- g_lowest_address = MAX_PTR;
- g_highest_address = 0;
+ g_gc_lowest_address = MAX_PTR;
+ g_gc_highest_address = 0;
if (((size_t)MAX_PTR - large_size) < normal_size)
{
@@ -4107,8 +4138,8 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory);
if (allatonce_block)
{
- g_lowest_address = allatonce_block;
- g_highest_address = allatonce_block + (memory_details.block_count * (large_size + normal_size));
+ g_gc_lowest_address = allatonce_block;
+ g_gc_highest_address = allatonce_block + (memory_details.block_count * (large_size + normal_size));
memory_details.allocation_pattern = initial_memory_details::ALLATONCE;
for(size_t i = 0; i < memory_details.block_count; i++)
@@ -4131,8 +4162,8 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
if (b2)
{
memory_details.allocation_pattern = initial_memory_details::TWO_STAGE;
- g_lowest_address = min(b1,b2);
- g_highest_address = max(b1 + memory_details.block_count*normal_size,
+ g_gc_lowest_address = min(b1,b2);
+ g_gc_highest_address = max(b1 + memory_details.block_count*normal_size,
b2 + memory_details.block_count*large_size);
for(size_t i = 0; i < memory_details.block_count; i++)
{
@@ -4178,10 +4209,10 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h
}
else
{
- if (current_block->memory_base < g_lowest_address)
- g_lowest_address = current_block->memory_base;
- if (((uint8_t *) current_block->memory_base + block_size) > g_highest_address)
- g_highest_address = (current_block->memory_base + block_size);
+ if (current_block->memory_base < g_gc_lowest_address)
+ g_gc_lowest_address = current_block->memory_base;
+ if (((uint8_t *) current_block->memory_base + block_size) > g_gc_highest_address)
+ g_gc_highest_address = (current_block->memory_base + block_size);
}
reserve_success = TRUE;
}
@@ -4288,7 +4319,7 @@ void* virtual_alloc (size_t size)
flags = VirtualReserveFlags::WriteWatch;
}
#endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
- void* prgmem = GCToOSInterface::VirtualReserve (0, requested_size, card_size * card_word_width, flags);
+ void* prgmem = GCToOSInterface::VirtualReserve (requested_size, card_size * card_word_width, flags);
void *aligned_mem = prgmem;
// We don't want (prgmem + size) to be right at the end of the address space
@@ -4623,22 +4654,22 @@ gc_heap::get_segment (size_t size, BOOL loh_p)
{
uint8_t* start;
uint8_t* end;
- if (mem < g_lowest_address)
+ if (mem < g_gc_lowest_address)
{
start = (uint8_t*)mem;
}
else
{
- start = (uint8_t*)g_lowest_address;
+ start = (uint8_t*)g_gc_lowest_address;
}
- if (((uint8_t*)mem + size) > g_highest_address)
+ if (((uint8_t*)mem + size) > g_gc_highest_address)
{
end = (uint8_t*)mem + size;
}
else
{
- end = (uint8_t*)g_highest_address;
+ end = (uint8_t*)g_gc_highest_address;
}
if (gc_heap::grow_brick_card_tables (start, end, size, result, __this, loh_p) != 0)
@@ -4703,10 +4734,7 @@ heap_segment* gc_heap::get_segment_for_loh (size_t size
FireEtwGCCreateSegment_V1((size_t)heap_segment_mem(res), (size_t)(heap_segment_reserved (res) - heap_segment_mem(res)), ETW::GCLog::ETW_GC_INFO::LARGE_OBJECT_HEAP, GetClrInstanceId());
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- UpdateGenerationBounds();
-#endif // GC_PROFILING
+ GCToEEInterface::DiagUpdateGenerationBounds();
#ifdef MULTIPLE_HEAPS
hp->thread_loh_segment (res);
@@ -5340,7 +5368,7 @@ heap_segment* gc_heap::segment_of (uint8_t* add, ptrdiff_t& delta, BOOL verify_p
uint8_t* sadd = add;
heap_segment* hs = 0;
heap_segment* hs1 = 0;
- if (!((add >= g_lowest_address) && (add < g_highest_address)))
+ if (!((add >= g_gc_lowest_address) && (add < g_gc_highest_address)))
{
delta = 0;
return 0;
@@ -5523,7 +5551,6 @@ public:
saved_post_plug_reloc = temp;
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void swap_pre_plug_and_saved_for_profiler()
{
gap_reloc_pair temp;
@@ -5539,7 +5566,6 @@ public:
memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug));
saved_post_plug = temp;
}
-#endif //GC_PROFILING || //FEATURE_EVENT_TRACE
// We should think about whether it's really necessary to have to copy back the pre plug
// info since it was already copied during compacting plugs. But if a plug doesn't move
@@ -6399,7 +6425,7 @@ void gc_heap::set_card (size_t card)
inline
void gset_card (size_t card)
{
- g_card_table [card_word (card)] |= (1 << card_bit (card));
+ g_gc_card_table [card_word (card)] |= (1 << card_bit (card));
}
inline
@@ -6510,7 +6536,7 @@ size_t size_card_bundle_of (uint8_t* from, uint8_t* end)
uint32_t* translate_card_bundle_table (uint32_t* cb)
{
- return (uint32_t*)((uint8_t*)cb - ((((size_t)g_lowest_address) / (card_size*card_word_width*card_bundle_size*card_bundle_word_width)) * sizeof (uint32_t)));
+ return (uint32_t*)((uint8_t*)cb - ((((size_t)g_gc_lowest_address) / (card_size*card_word_width*card_bundle_size*card_bundle_word_width)) * sizeof (uint32_t)));
}
void gc_heap::enable_card_bundles ()
@@ -6722,7 +6748,7 @@ size_t size_mark_array_of (uint8_t* from, uint8_t* end)
// according to the lowest_address.
uint32_t* translate_mark_array (uint32_t* ma)
{
- return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_lowest_address));
+ return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_gc_lowest_address));
}
// from and end must be page aligned addresses.
@@ -6850,16 +6876,16 @@ void release_card_table (uint32_t* c_table)
{
destroy_card_table (c_table);
// sever the link from the parent
- if (&g_card_table[card_word (gcard_of(g_lowest_address))] == c_table)
+ if (&g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))] == c_table)
{
- g_card_table = 0;
+ g_gc_card_table = 0;
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
SoftwareWriteWatch::StaticClose();
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
}
else
{
- uint32_t* p_table = &g_card_table[card_word (gcard_of(g_lowest_address))];
+ uint32_t* p_table = &g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))];
if (p_table)
{
while (p_table && (card_table_next (p_table) != c_table))
@@ -6881,8 +6907,8 @@ void destroy_card_table (uint32_t* c_table)
uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
{
- assert (g_lowest_address == start);
- assert (g_highest_address == end);
+ assert (g_gc_lowest_address == start);
+ assert (g_gc_highest_address == end);
uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
@@ -6902,7 +6928,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
if (can_use_write_watch_for_card_table())
{
virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
- cb = size_card_bundle_of (g_lowest_address, g_highest_address);
+ cb = size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address);
}
#endif //CARD_BUNDLE
@@ -6918,7 +6944,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
#ifdef GROWABLE_SEG_MAPPING_TABLE
- size_t st = size_seg_mapping_table_of (g_lowest_address, g_highest_address);
+ size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
@@ -6932,7 +6958,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
size_t alloc_size_aligned = Align (alloc_size, g_SystemInfo.dwAllocationGranularity-1);
- uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (0, alloc_size_aligned, 0, virtual_reserve_flags);
+ uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size_aligned, 0, virtual_reserve_flags);
if (!mem)
return 0;
@@ -6973,7 +6999,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
#ifdef GROWABLE_SEG_MAPPING_TABLE
seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
seg_mapping_table = (seg_mapping*)((uint8_t*)seg_mapping_table -
- size_seg_mapping_table_of (0, (align_lower_segment (g_lowest_address))));
+ size_seg_mapping_table_of (0, (align_lower_segment (g_gc_lowest_address))));
#endif //GROWABLE_SEG_MAPPING_TABLE
#ifdef MARK_ARRAY
@@ -7012,10 +7038,10 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
gc_heap* hp,
BOOL loh_p)
{
- uint8_t* la = g_lowest_address;
- uint8_t* ha = g_highest_address;
- uint8_t* saved_g_lowest_address = min (start, g_lowest_address);
- uint8_t* saved_g_highest_address = max (end, g_highest_address);
+ uint8_t* la = g_gc_lowest_address;
+ uint8_t* ha = g_gc_highest_address;
+ uint8_t* saved_g_lowest_address = min (start, g_gc_lowest_address);
+ uint8_t* saved_g_highest_address = max (end, g_gc_highest_address);
#ifdef BACKGROUND_GC
// This value is only for logging purpose - it's not necessarily exactly what we
// would commit for mark array but close enough for diagnostics purpose.
@@ -7045,18 +7071,18 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
#endif // BIT64
ps *= 2;
- if (saved_g_lowest_address < g_lowest_address)
+ if (saved_g_lowest_address < g_gc_lowest_address)
{
- if (ps > (size_t)g_lowest_address)
+ if (ps > (size_t)g_gc_lowest_address)
saved_g_lowest_address = (uint8_t*)OS_PAGE_SIZE;
else
{
- assert (((size_t)g_lowest_address - ps) >= OS_PAGE_SIZE);
- saved_g_lowest_address = min (saved_g_lowest_address, (g_lowest_address - ps));
+ assert (((size_t)g_gc_lowest_address - ps) >= OS_PAGE_SIZE);
+ saved_g_lowest_address = min (saved_g_lowest_address, (g_gc_lowest_address - ps));
}
}
- if (saved_g_highest_address > g_highest_address)
+ if (saved_g_highest_address > g_gc_highest_address)
{
saved_g_highest_address = max ((saved_g_lowest_address + ps), saved_g_highest_address);
if (saved_g_highest_address > top)
@@ -7069,7 +7095,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
bool write_barrier_updated = false;
uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
- uint32_t* saved_g_card_table = g_card_table;
+ uint32_t* saved_g_card_table = g_gc_card_table;
uint32_t* ct = 0;
uint32_t* translated_ct = 0;
short* bt = 0;
@@ -7125,7 +7151,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
dprintf (GC_TABLE_LOG, ("card table: %Id; brick table: %Id; card bundle: %Id; sw ww table: %Id; seg table: %Id; mark array: %Id",
cs, bs, cb, wws, st, ms));
- uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (0, alloc_size_aligned, 0, virtual_reserve_flags);
+ uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size_aligned, 0, virtual_reserve_flags);
if (!mem)
{
@@ -7152,7 +7178,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
card_table_refcount (ct) = 0;
card_table_lowest_address (ct) = saved_g_lowest_address;
card_table_highest_address (ct) = saved_g_highest_address;
- card_table_next (ct) = &g_card_table[card_word (gcard_of (la))];
+ card_table_next (ct) = &g_gc_card_table[card_word (gcard_of (la))];
//clear the card table
/*
@@ -7179,9 +7205,9 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
seg_mapping* new_seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table -
size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address))));
- memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_lowest_address)],
- &seg_mapping_table[seg_mapping_word_of(g_lowest_address)],
- size_seg_mapping_table_of(g_lowest_address, g_highest_address));
+ memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
+ &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
+ size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address));
seg_mapping_table = new_seg_mapping_table;
}
@@ -7243,13 +7269,12 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
// Note on points where the runtime is suspended anywhere in this function. Upon an attempt to suspend the
// runtime, a different thread may suspend first, causing this thread to block at the point of the suspend call.
// So, at any suspend point, externally visible state needs to be consistent, as code that depends on that state
- // may run while this thread is blocked. This includes updates to g_card_table, g_lowest_address, and
- // g_highest_address.
+ // may run while this thread is blocked. This includes updates to g_gc_card_table, g_gc_lowest_address, and
+ // g_gc_highest_address.
suspend_EE();
}
- g_card_table = translated_ct;
-
+ g_gc_card_table = translated_ct;
SoftwareWriteWatch::SetResizedUntranslatedTable(
mem + sw_ww_table_offset,
saved_g_lowest_address,
@@ -7260,7 +7285,9 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
// grow version of the write barrier. This test tells us if the new
// segment was allocated at a lower address than the old, requiring
// that we start doing an upper bounds check in the write barrier.
- StompWriteBarrierResize(true, la != saved_g_lowest_address);
+ g_gc_lowest_address = saved_g_lowest_address;
+ g_gc_highest_address = saved_g_highest_address;
+ stomp_write_barrier_resize(true, la != saved_g_lowest_address);
write_barrier_updated = true;
if (!is_runtime_suspended)
@@ -7271,9 +7298,12 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
else
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
{
- g_card_table = translated_ct;
+ g_gc_card_table = translated_ct;
}
+ g_gc_lowest_address = saved_g_lowest_address;
+ g_gc_highest_address = saved_g_highest_address;
+
if (!write_barrier_updated)
{
// This passes a bool telling whether we need to switch to the post
@@ -7284,19 +7314,9 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
// to be changed, so we are doing this after all global state has
// been updated. See the comment above suspend_EE() above for more
// info.
- StompWriteBarrierResize(!!IsGCThread(), la != saved_g_lowest_address);
+ stomp_write_barrier_resize(!!IsGCThread(), la != saved_g_lowest_address);
}
- // We need to make sure that other threads executing checked write barriers
- // will see the g_card_table update before g_lowest/highest_address updates.
- // Otherwise, the checked write barrier may AV accessing the old card table
- // with address that it does not cover. Write barriers access card table
- // without memory barriers for performance reasons, so we need to flush
- // the store buffers here.
- GCToOSInterface::FlushProcessWriteBuffers();
-
- g_lowest_address = saved_g_lowest_address;
- VolatileStore(&g_highest_address, saved_g_highest_address);
return 0;
@@ -7305,7 +7325,7 @@ fail:
if (mem)
{
- assert(g_card_table == saved_g_card_table);
+ assert(g_gc_card_table == saved_g_card_table);
//delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info));
if (!GCToOSInterface::VirtualRelease (mem, alloc_size_aligned))
@@ -7463,7 +7483,7 @@ void gc_heap::copy_brick_card_table()
assert (ha == card_table_highest_address (&old_card_table[card_word (card_of (la))]));
/* todo: Need a global lock for this */
- uint32_t* ct = &g_card_table[card_word (gcard_of (g_lowest_address))];
+ uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
own_card_table (ct);
card_table = translate_card_table (ct);
/* End of global lock */
@@ -7476,8 +7496,8 @@ void gc_heap::copy_brick_card_table()
if (gc_can_use_concurrent)
{
mark_array = translate_mark_array (card_table_mark_array (ct));
- assert (mark_word_of (g_highest_address) ==
- mark_word_of (align_on_mark_word (g_highest_address)));
+ assert (mark_word_of (g_gc_highest_address) ==
+ mark_word_of (align_on_mark_word (g_gc_highest_address)));
}
else
mark_array = NULL;
@@ -7486,13 +7506,13 @@ void gc_heap::copy_brick_card_table()
#ifdef CARD_BUNDLE
#if defined(MARK_ARRAY) && defined(_DEBUG)
#ifdef GROWABLE_SEG_MAPPING_TABLE
- size_t st = size_seg_mapping_table_of (g_lowest_address, g_highest_address);
+ size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
#else //GROWABLE_SEG_MAPPING_TABLE
size_t st = 0;
#endif //GROWABLE_SEG_MAPPING_TABLE
#endif //MARK_ARRAY && _DEBUG
card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct));
- assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_lowest_address))))] ==
+ assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
card_table_card_bundle_table (ct));
//set the card table if we are in a heap growth scenario
@@ -9330,13 +9350,13 @@ void gc_heap::update_card_table_bundle()
bool success = GCToOSInterface::GetWriteWatch (false /* resetState */ , base_address, region_size,
(void**)g_addresses,
&bcount);
- assert (success);
+ assert (success && "GetWriteWatch failed!");
dprintf (3,("Found %d pages written", bcount));
for (unsigned i = 0; i < bcount; i++)
{
size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0];
size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0];
- assert (bcardw >= card_word (card_of (g_lowest_address)));
+ assert (bcardw >= card_word (card_of (g_gc_lowest_address)));
card_bundles_set (cardw_card_bundle (bcardw),
cardw_card_bundle (align_cardw_on_bundle (ecardw)));
@@ -9639,7 +9659,7 @@ void gc_heap::make_generation (generation& gen, heap_segment* seg, uint8_t* star
#endif //FREE_USAGE_STATS
}
-void gc_heap::adjust_ephemeral_limits (bool is_runtime_suspended)
+void gc_heap::adjust_ephemeral_limits ()
{
ephemeral_low = generation_allocation_start (generation_of (max_generation - 1));
ephemeral_high = heap_segment_reserved (ephemeral_heap_segment);
@@ -9647,8 +9667,10 @@ void gc_heap::adjust_ephemeral_limits (bool is_runtime_suspended)
dprintf (3, ("new ephemeral low: %Ix new ephemeral high: %Ix",
(size_t)ephemeral_low, (size_t)ephemeral_high))
+#ifndef MULTIPLE_HEAPS
// This updates the write barrier helpers with the new info.
- StompWriteBarrierEphemeral(is_runtime_suspended);
+ stomp_write_barrier_ephemeral(ephemeral_low, ephemeral_high);
+#endif // MULTIPLE_HEAPS
}
#if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN)
@@ -9821,9 +9843,9 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
settings.first_init();
- g_card_table = make_card_table (g_lowest_address, g_highest_address);
+ g_gc_card_table = make_card_table (g_gc_lowest_address, g_gc_highest_address);
- if (!g_card_table)
+ if (!g_gc_card_table)
return E_OUTOFMEMORY;
gc_started = FALSE;
@@ -10306,7 +10328,7 @@ gc_heap::init_gc_heap (int h_number)
#endif //MULTIPLE_HEAPS
/* todo: Need a global lock for this */
- uint32_t* ct = &g_card_table [card_word (card_of (g_lowest_address))];
+ uint32_t* ct = &g_gc_card_table [card_word (card_of (g_gc_lowest_address))];
own_card_table (ct);
card_table = translate_card_table (ct);
/* End of global lock */
@@ -10317,13 +10339,13 @@ gc_heap::init_gc_heap (int h_number)
#ifdef CARD_BUNDLE
card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct));
- assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_lowest_address))))] ==
+ assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
card_table_card_bundle_table (ct));
#endif //CARD_BUNDLE
#ifdef MARK_ARRAY
if (gc_can_use_concurrent)
- mark_array = translate_mark_array (card_table_mark_array (&g_card_table[card_word (card_of (g_lowest_address))]));
+ mark_array = translate_mark_array (card_table_mark_array (&g_gc_card_table[card_word (card_of (g_gc_lowest_address))]));
else
mark_array = NULL;
#endif //MARK_ARRAY
@@ -10360,6 +10382,7 @@ gc_heap::init_gc_heap (int h_number)
(size_t)(heap_segment_reserved (lseg) - heap_segment_mem(lseg)),
ETW::GCLog::ETW_GC_INFO::LARGE_OBJECT_HEAP,
GetClrInstanceId());
+
#ifdef SEG_MAPPING_TABLE
seg_mapping_table_add_segment (lseg, __this);
#else //SEG_MAPPING_TABLE
@@ -10442,7 +10465,7 @@ gc_heap::init_gc_heap (int h_number)
make_background_mark_stack (b_arr);
#endif //BACKGROUND_GC
- adjust_ephemeral_limits(true);
+ adjust_ephemeral_limits();
#ifdef MARK_ARRAY
// why would we clear the mark array for this page? it should be cleared..
@@ -13043,12 +13066,12 @@ int gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size,
if (can_allocate)
{
- //ETW trace for allocation tick
size_t alloc_context_bytes = acontext->alloc_limit + Align (min_obj_size, align_const) - acontext->alloc_ptr;
int etw_allocation_index = ((gen_number == 0) ? 0 : 1);
etw_allocation_running_amount[etw_allocation_index] += alloc_context_bytes;
+
if (etw_allocation_running_amount[etw_allocation_index] > etw_allocation_tick)
{
#ifdef FEATURE_REDHAWK
@@ -14785,6 +14808,9 @@ int gc_heap::generation_to_condemn (int n_initial,
dprintf (GTC_LOG, ("h%d: alloc full - BLOCK", heap_number));
n = max_generation;
*blocking_collection_p = TRUE;
+ if ((local_settings->reason == reason_oos_loh) ||
+ (local_settings->reason == reason_alloc_loh))
+ evaluate_elevation = FALSE;
local_condemn_reasons->set_condition (gen_before_oom);
}
@@ -15183,7 +15209,7 @@ void gc_heap::gc1()
vm_heap->GcCondemnedGeneration = settings.condemned_generation;
- assert (g_card_table == card_table);
+ assert (g_gc_card_table == card_table);
{
if (n == max_generation)
@@ -15337,7 +15363,11 @@ void gc_heap::gc1()
if (!settings.concurrent)
#endif //BACKGROUND_GC
{
- adjust_ephemeral_limits(!!IsGCThread());
+#ifndef FEATURE_REDHAWK
+ // IsGCThread() always returns false on CoreRT, but this assert is useful in CoreCLR.
+ assert(!!IsGCThread());
+#endif // FEATURE_REDHAWK
+ adjust_ephemeral_limits();
}
#ifdef BACKGROUND_GC
@@ -15472,7 +15502,15 @@ void gc_heap::gc1()
#ifdef FEATURE_EVENT_TRACE
if (bgc_heap_walk_for_etw_p && settings.concurrent)
{
- make_free_lists_for_profiler_for_bgc();
+ GCToEEInterface::DiagWalkBGCSurvivors(__this);
+
+#ifdef MULTIPLE_HEAPS
+ bgc_t_join.join(this, gc_join_after_profiler_heap_walk);
+ if (bgc_t_join.joined())
+ {
+ bgc_t_join.restart();
+ }
+#endif // MULTIPLE_HEAPS
}
#endif // FEATURE_EVENT_TRACE
#endif //BACKGROUND_GC
@@ -16169,7 +16207,11 @@ BOOL gc_heap::expand_soh_with_minimal_gc()
dd_gc_new_allocation (dynamic_data_of (max_generation)) -= ephemeral_size;
dd_new_allocation (dynamic_data_of (max_generation)) = dd_gc_new_allocation (dynamic_data_of (max_generation));
- adjust_ephemeral_limits(!!IsGCThread());
+#ifndef FEATURE_REDHAWK
+ // IsGCThread() always returns false on CoreRT, but this assert is useful in CoreCLR.
+ assert(!!IsGCThread());
+#endif // FEATURE_REDHAWK
+ adjust_ephemeral_limits();
return TRUE;
}
else
@@ -16382,7 +16424,7 @@ int gc_heap::garbage_collect (int n)
for (int i = 0; i < n_heaps; i++)
{
//copy the card and brick tables
- if (g_card_table != g_heaps[i]->card_table)
+ if (g_gc_card_table != g_heaps[i]->card_table)
{
g_heaps[i]->copy_brick_card_table();
}
@@ -16406,100 +16448,67 @@ int gc_heap::garbage_collect (int n)
}
#endif //BACKGROUND_GC
// check for card table growth
- if (g_card_table != card_table)
+ if (g_gc_card_table != card_table)
copy_brick_card_table();
#endif //MULTIPLE_HEAPS
- BOOL should_evaluate_elevation = FALSE;
- BOOL should_do_blocking_collection = FALSE;
+ BOOL should_evaluate_elevation = FALSE;
+ BOOL should_do_blocking_collection = FALSE;
#ifdef MULTIPLE_HEAPS
- int gen_max = condemned_generation_num;
- for (int i = 0; i < n_heaps; i++)
- {
- if (gen_max < g_heaps[i]->condemned_generation_num)
- gen_max = g_heaps[i]->condemned_generation_num;
- if ((!should_evaluate_elevation) && (g_heaps[i]->elevation_requested))
- should_evaluate_elevation = TRUE;
- if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection))
- should_do_blocking_collection = TRUE;
- }
+ int gen_max = condemned_generation_num;
+ for (int i = 0; i < n_heaps; i++)
+ {
+ if (gen_max < g_heaps[i]->condemned_generation_num)
+ gen_max = g_heaps[i]->condemned_generation_num;
+ if ((!should_evaluate_elevation) && (g_heaps[i]->elevation_requested))
+ should_evaluate_elevation = TRUE;
+ if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection))
+ should_do_blocking_collection = TRUE;
+ }
- settings.condemned_generation = gen_max;
-//logically continues after GC_PROFILING.
+ settings.condemned_generation = gen_max;
#else //MULTIPLE_HEAPS
- settings.condemned_generation = generation_to_condemn (n,
- &blocking_collection,
- &elevation_requested,
- FALSE);
- should_evaluate_elevation = elevation_requested;
- should_do_blocking_collection = blocking_collection;
-#endif //MULTIPLE_HEAPS
-
- settings.condemned_generation = joined_generation_to_condemn (
- should_evaluate_elevation,
- settings.condemned_generation,
- &should_do_blocking_collection
- STRESS_HEAP_ARG(n)
- );
+ settings.condemned_generation = generation_to_condemn (n,
+ &blocking_collection,
+ &elevation_requested,
+ FALSE);
+ should_evaluate_elevation = elevation_requested;
+ should_do_blocking_collection = blocking_collection;
+#endif //MULTIPLE_HEAPS
+
+ settings.condemned_generation = joined_generation_to_condemn (
+ should_evaluate_elevation,
+ settings.condemned_generation,
+ &should_do_blocking_collection
+ STRESS_HEAP_ARG(n)
+ );
- STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10,
- "condemned generation num: %d\n", settings.condemned_generation);
+ STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10,
+ "condemned generation num: %d\n", settings.condemned_generation);
- record_gcs_during_no_gc();
+ record_gcs_during_no_gc();
- if (settings.condemned_generation > 1)
- settings.promotion = TRUE;
+ if (settings.condemned_generation > 1)
+ settings.promotion = TRUE;
#ifdef HEAP_ANALYZE
- // At this point we've decided what generation is condemned
- // See if we've been requested to analyze survivors after the mark phase
- if (AnalyzeSurvivorsRequested(settings.condemned_generation))
- {
- heap_analyze_enabled = TRUE;
- }
-#endif // HEAP_ANALYZE
-
-#ifdef GC_PROFILING
-
- // If we're tracking GCs, then we need to walk the first generation
- // before collection to track how many items of each class has been
- // allocated.
- UpdateGenerationBounds();
- GarbageCollectionStartedCallback(settings.condemned_generation, settings.reason == reason_induced);
+ // At this point we've decided what generation is condemned
+ // See if we've been requested to analyze survivors after the mark phase
+ if (AnalyzeSurvivorsRequested(settings.condemned_generation))
{
- BEGIN_PIN_PROFILER(CORProfilerTrackGC());
- size_t profiling_context = 0;
-
-#ifdef MULTIPLE_HEAPS
- int hn = 0;
- for (hn = 0; hn < gc_heap::n_heaps; hn++)
- {
- gc_heap* hp = gc_heap::g_heaps [hn];
-
- // When we're walking objects allocated by class, then we don't want to walk the large
- // object heap because then it would count things that may have been around for a while.
- hp->walk_heap (&AllocByClassHelper, (void *)&profiling_context, 0, FALSE);
- }
-#else
- // When we're walking objects allocated by class, then we don't want to walk the large
- // object heap because then it would count things that may have been around for a while.
- gc_heap::walk_heap (&AllocByClassHelper, (void *)&profiling_context, 0, FALSE);
-#endif //MULTIPLE_HEAPS
-
- // Notify that we've reached the end of the Gen 0 scan
- g_profControlBlock.pProfInterface->EndAllocByClass(&profiling_context);
- END_PIN_PROFILER();
+ heap_analyze_enabled = TRUE;
}
+#endif // HEAP_ANALYZE
-#endif // GC_PROFILING
+ GCToEEInterface::DiagGCStart(settings.condemned_generation, settings.reason == reason_induced);
#ifdef BACKGROUND_GC
if ((settings.condemned_generation == max_generation) &&
(recursive_gc_sync::background_running_p()))
{
- //TODO BACKGROUND_GC If we just wait for the end of gc, it won't woork
+ //TODO BACKGROUND_GC If we just wait for the end of gc, it won't work
// because we have to collect 0 and 1 properly
// in particular, the allocation contexts are gone.
// For now, it is simpler to collect max_generation-1
@@ -19625,12 +19634,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
dprintf (3, ("Finalize marking"));
finalize_queue->ScanForFinalization (GCHeap::Promote, condemned_gen_number, mark_only_p, __this);
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- {
- finalize_queue->WalkFReachableObjects (__this);
- }
-#endif //GC_PROFILING
+ GCToEEInterface::DiagWalkFReachableObjects(__this);
#endif // FEATURE_PREMORTEM_FINALIZATION
// Scan dependent handles again to promote any secondaries associated with primaries that were promoted
@@ -21105,8 +21109,7 @@ void gc_heap::relocate_in_loh_compact()
generation_free_obj_space (gen)));
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void gc_heap::walk_relocation_loh (size_t profiling_context)
+void gc_heap::walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn)
{
generation* gen = large_object_generation;
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
@@ -21136,14 +21139,7 @@ void gc_heap::walk_relocation_loh (size_t profiling_context)
STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc);
- {
- ETW::GCLog::MovedReference(
- o,
- (o + size),
- reloc,
- profiling_context,
- settings.compaction);
- }
+ fn (o, (o + size), reloc, profiling_context, settings.compaction, FALSE);
o = o + size;
if (o < heap_segment_allocated (seg))
@@ -21160,7 +21156,6 @@ void gc_heap::walk_relocation_loh (size_t profiling_context)
}
}
}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
BOOL gc_heap::loh_object_p (uint8_t* o)
{
@@ -22318,10 +22313,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
if (!loh_compacted_p)
#endif //FEATURE_LOH_COMPACTION
{
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- if (ShouldTrackMovementForProfilerOrEtw())
- notify_profiler_of_surviving_large_objects();
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ GCToEEInterface::DiagWalkLOHSurvivors(__this);
sweep_large_objects();
}
}
@@ -22432,7 +22424,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
for (i = 0; i < n_heaps; i++)
{
//copy the card and brick tables
- if (g_card_table!= g_heaps[i]->card_table)
+ if (g_gc_card_table!= g_heaps[i]->card_table)
{
g_heaps[i]->copy_brick_card_table();
}
@@ -22523,12 +22515,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
assert (generation_allocation_segment (consing_gen) ==
ephemeral_heap_segment);
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- if (ShouldTrackMovementForProfilerOrEtw())
- {
- record_survived_for_profiler(condemned_gen_number, first_condemned_address);
- }
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ GCToEEInterface::DiagWalkSurvivors(__this);
relocate_phase (condemned_gen_number, first_condemned_address);
compact_phase (condemned_gen_number, first_condemned_address,
@@ -22738,12 +22725,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
fix_older_allocation_area (older_gen);
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- if (ShouldTrackMovementForProfilerOrEtw())
- {
- record_survived_for_profiler(condemned_gen_number, first_condemned_address);
- }
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ GCToEEInterface::DiagWalkSurvivors(__this);
gen0_big_free_spaces = 0;
make_free_lists (condemned_gen_number);
@@ -23949,8 +23931,7 @@ void gc_heap::relocate_survivors (int condemned_gen_number,
}
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args, size_t profiling_context)
+void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args)
{
if (check_last_object_p)
{
@@ -23970,15 +23951,10 @@ void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, w
}
ptrdiff_t last_plug_relocation = node_relocation_distance (plug);
- ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;
-
STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation);
+ ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;
- ETW::GCLog::MovedReference(plug,
- (plug + size),
- reloc,
- profiling_context,
- settings.compaction);
+ (args->fn) (plug, (plug + size), reloc, args->profiling_context, settings.compaction, FALSE);
if (check_last_object_p)
{
@@ -23995,12 +23971,12 @@ void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, w
}
}
-void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args, size_t profiling_context)
+void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args)
{
assert ((tree != NULL));
if (node_left_child (tree))
{
- walk_relocation_in_brick (tree + node_left_child (tree), args, profiling_context);
+ walk_relocation_in_brick (tree + node_left_child (tree), args);
}
uint8_t* plug = tree;
@@ -24029,7 +24005,7 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args,
assert (last_plug_size >= Align (min_obj_size));
}
- walk_plug (args->last_plug, last_plug_size, check_last_object_p, args, profiling_context);
+ walk_plug (args->last_plug, last_plug_size, check_last_object_p, args);
}
else
{
@@ -24042,18 +24018,14 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args,
if (node_right_child (tree))
{
- walk_relocation_in_brick (tree + node_right_child (tree), args, profiling_context);
-
+ walk_relocation_in_brick (tree + node_right_child (tree), args);
}
}
-void gc_heap::walk_relocation (int condemned_gen_number,
- uint8_t* first_condemned_address,
- size_t profiling_context)
-
+void gc_heap::walk_relocation (size_t profiling_context, record_surv_fn fn)
{
- generation* condemned_gen = generation_of (condemned_gen_number);
- uint8_t* start_address = first_condemned_address;
+ generation* condemned_gen = generation_of (settings.condemned_generation);
+ uint8_t* start_address = generation_allocation_start (condemned_gen);
size_t current_brick = brick_of (start_address);
heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));
@@ -24066,6 +24038,8 @@ void gc_heap::walk_relocation (int condemned_gen_number,
args.is_shortened = FALSE;
args.pinned_plug_entry = 0;
args.last_plug = 0;
+ args.profiling_context = profiling_context;
+ args.fn = fn;
while (1)
{
@@ -24075,8 +24049,8 @@ void gc_heap::walk_relocation (int condemned_gen_number,
{
walk_plug (args.last_plug,
(heap_segment_allocated (current_heap_segment) - args.last_plug),
- args.is_shortened,
- &args, profiling_context);
+ args.is_shortened,
+ &args);
args.last_plug = 0;
}
if (heap_segment_next_rw (current_heap_segment))
@@ -24097,16 +24071,29 @@ void gc_heap::walk_relocation (int condemned_gen_number,
{
walk_relocation_in_brick (brick_address (current_brick) +
brick_entry - 1,
- &args,
- profiling_context);
+ &args);
}
}
current_brick++;
}
}
+void gc_heap::walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type)
+{
+ if (type == walk_for_gc)
+ walk_survivors_relocation (context, fn);
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
-void gc_heap::walk_relocation_for_bgc(size_t profiling_context)
+ else if (type == walk_for_bgc)
+ walk_survivors_for_bgc (context, fn);
+#endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
+ else if (type == walk_for_loh)
+ walk_survivors_for_loh (context, fn);
+ else
+ assert (!"unknown type!");
+}
+
+#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
+void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn)
{
// This should only be called for BGCs
assert(settings.concurrent);
@@ -24140,8 +24127,7 @@ void gc_heap::walk_relocation_for_bgc(size_t profiling_context)
uint8_t* end = heap_segment_allocated (seg);
while (o < end)
- {
-
+ {
if (method_table(o) == g_pFreeObjectMethodTable)
{
o += Align (size (o), align_const);
@@ -24164,51 +24150,18 @@ void gc_heap::walk_relocation_for_bgc(size_t profiling_context)
uint8_t* plug_end = o;
- // Note on last parameter: since this is for bgc, only ETW
- // should be sending these events so that existing profapi profilers
- // don't get confused.
- ETW::GCLog::MovedReference(
- plug_start,
+ fn (plug_start,
plug_end,
0, // Reloc distance == 0 as this is non-compacting
profiling_context,
FALSE, // Non-compacting
- FALSE); // fAllowProfApiNotification
+ TRUE); // BGC
}
seg = heap_segment_next (seg);
}
}
-
-void gc_heap::make_free_lists_for_profiler_for_bgc ()
-{
- assert(settings.concurrent);
-
- size_t profiling_context = 0;
- ETW::GCLog::BeginMovedReferences(&profiling_context);
-
- // This provides the profiler with information on what blocks of
- // memory are moved during a gc.
-
- walk_relocation_for_bgc(profiling_context);
-
- // Notify the EE-side profiling code that all the references have been traced for
- // this heap, and that it needs to flush all cached data it hasn't sent to the
- // profiler and release resources it no longer needs. Since this is for bgc, only
- // ETW should be sending these events so that existing profapi profilers don't get confused.
- ETW::GCLog::EndMovedReferences(profiling_context, FALSE /* fAllowProfApiNotification */);
-
-#ifdef MULTIPLE_HEAPS
- bgc_t_join.join(this, gc_join_after_profiler_heap_walk);
- if (bgc_t_join.joined())
- {
- bgc_t_join.restart();
- }
-#endif // MULTIPLE_HEAPS
-}
-
#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void gc_heap::relocate_phase (int condemned_gen_number,
uint8_t* first_condemned_address)
@@ -24809,7 +24762,7 @@ void gc_heap::compact_phase (int condemned_gen_number,
#pragma warning(push)
#pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
#endif //_MSC_VER
-void __stdcall gc_heap::gc_thread_stub (void* arg)
+void gc_heap::gc_thread_stub (void* arg)
{
ClrFlsSetThreadType (ThreadType_GC);
STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
@@ -25177,14 +25130,14 @@ BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp,
if (new_card_table == 0)
{
- new_card_table = g_card_table;
+ new_card_table = g_gc_card_table;
}
if (hp->card_table != new_card_table)
{
if (new_lowest_address == 0)
{
- new_lowest_address = g_lowest_address;
+ new_lowest_address = g_gc_lowest_address;
}
uint32_t* ct = &new_card_table[card_word (gcard_of (new_lowest_address))];
@@ -29174,7 +29127,7 @@ generation* gc_heap::expand_heap (int condemned_generation,
return consing_gen;
//copy the card and brick tables
- if (g_card_table!= card_table)
+ if (g_gc_card_table!= card_table)
copy_brick_card_table();
BOOL new_segment_p = (heap_segment_next (new_seg) == 0);
@@ -30619,35 +30572,21 @@ BOOL gc_heap::large_object_marked (uint8_t* o, BOOL clearp)
return m;
}
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void gc_heap::record_survived_for_profiler(int condemned_gen_number, uint8_t * start_address)
+void gc_heap::walk_survivors_relocation (size_t profiling_context, record_surv_fn fn)
{
- size_t profiling_context = 0;
-
- ETW::GCLog::BeginMovedReferences(&profiling_context);
-
// Now walk the portion of memory that is actually being relocated.
- walk_relocation(condemned_gen_number, start_address, profiling_context);
+ walk_relocation (profiling_context, fn);
#ifdef FEATURE_LOH_COMPACTION
if (loh_compacted_p)
{
- walk_relocation_loh (profiling_context);
+ walk_relocation_for_loh (profiling_context, fn);
}
#endif //FEATURE_LOH_COMPACTION
-
- // Notify the EE-side profiling code that all the references have been traced for
- // this heap, and that it needs to flush all cached data it hasn't sent to the
- // profiler and release resources it no longer needs.
- ETW::GCLog::EndMovedReferences(profiling_context);
}
-void gc_heap::notify_profiler_of_surviving_large_objects ()
+void gc_heap::walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn)
{
- size_t profiling_context = 0;
-
- ETW::GCLog::BeginMovedReferences(&profiling_context);
-
generation* gen = large_object_generation;
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));;
@@ -30657,13 +30596,6 @@ void gc_heap::notify_profiler_of_surviving_large_objects ()
uint8_t* plug_end = o;
uint8_t* plug_start = o;
- // Generally, we can only get here if this is TRUE:
- // (CORProfilerTrackGC() || ETW::GCLog::ShouldTrackMovementForEtw())
- // But we can't always assert that, as races could theoretically cause GC profiling
- // or ETW to turn off just before we get here. This is harmless (we do checks later
- // on, under appropriate locks, before actually calling into profilers), though it's
- // a slowdown to determine these plugs for nothing.
-
while (1)
{
if (o >= heap_segment_allocated (seg))
@@ -30691,12 +30623,7 @@ void gc_heap::notify_profiler_of_surviving_large_objects ()
plug_end = o;
- ETW::GCLog::MovedReference(
- plug_start,
- plug_end,
- 0,
- profiling_context,
- FALSE);
+ fn (plug_start, plug_end, 0, profiling_context, FALSE, FALSE);
}
else
{
@@ -30706,9 +30633,7 @@ void gc_heap::notify_profiler_of_surviving_large_objects ()
}
}
}
- ETW::GCLog::EndMovedReferences(profiling_context);
}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifdef BACKGROUND_GC
@@ -31940,7 +31865,6 @@ void gc_heap::descr_card_table ()
void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
{
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifdef MULTIPLE_HEAPS
int n_heaps = g_theGCHeap->GetNumberOfHeaps ();
for (int i = 0; i < n_heaps; i++)
@@ -32018,7 +31942,6 @@ void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
curr_gen_number0--;
}
}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
}
#ifdef TRACE_GC
@@ -32514,7 +32437,7 @@ void gc_heap::clear_all_mark_array()
void gc_heap::verify_mark_array_cleared (heap_segment* seg)
{
#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
- assert (card_table == g_card_table);
+ assert (card_table == g_gc_card_table);
size_t markw = mark_word_of (heap_segment_mem (seg));
size_t markw_end = mark_word_of (heap_segment_reserved (seg));
@@ -32862,8 +32785,8 @@ gc_heap::verify_heap (BOOL begin_gc_p)
#endif //BACKGROUND_GC
#ifndef MULTIPLE_HEAPS
- if ((g_ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) ||
- (g_ephemeral_high != heap_segment_reserved (ephemeral_heap_segment)))
+ if ((ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) ||
+ (ephemeral_high != heap_segment_reserved (ephemeral_heap_segment)))
{
FATAL_GC_ERROR();
}
@@ -32922,7 +32845,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
for (int i = 0; i < n_heaps; i++)
{
//copy the card and brick tables
- if (g_card_table != g_heaps[i]->card_table)
+ if (g_gc_card_table != g_heaps[i]->card_table)
{
g_heaps[i]->copy_brick_card_table();
}
@@ -32931,7 +32854,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
current_join->restart();
}
#else
- if (g_card_table != card_table)
+ if (g_gc_card_table != card_table)
copy_brick_card_table();
#endif //MULTIPLE_HEAPS
@@ -33356,11 +33279,11 @@ HRESULT GCHeap::Shutdown ()
//CloseHandle (WaitForGCEvent);
//find out if the global card table hasn't been used yet
- uint32_t* ct = &g_card_table[card_word (gcard_of (g_lowest_address))];
+ uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
if (card_table_refcount (ct) == 0)
{
destroy_card_table (ct);
- g_card_table = 0;
+ g_gc_card_table = 0;
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
SoftwareWriteWatch::StaticClose();
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
@@ -33520,7 +33443,7 @@ HRESULT GCHeap::Initialize ()
return E_FAIL;
}
- StompWriteBarrierResize(true, false);
+ stomp_write_barrier_initialize();
#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#if defined (STRESS_HEAP) && !defined (MULTIPLE_HEAPS)
@@ -33557,10 +33480,7 @@ HRESULT GCHeap::Initialize ()
{
GCScan::GcRuntimeStructuresValid (TRUE);
-#ifdef GC_PROFILING
- if (CORProfilerTrackGC())
- UpdateGenerationBounds();
-#endif // GC_PROFILING
+ GCToEEInterface::DiagUpdateGenerationBounds();
}
return hr;
@@ -33644,7 +33564,7 @@ Object * GCHeap::NextObj (Object * object)
uint8_t* o = (uint8_t*)object;
#ifndef FEATURE_BASICFREEZE
- if (!((o < g_highest_address) && (o >= g_lowest_address)))
+ if (!((o < g_gc_highest_address) && (o >= g_gc_lowest_address)))
{
return NULL;
}
@@ -33715,7 +33635,7 @@ BOOL GCHeap::IsHeapPointer (void* vpObject, BOOL small_heap_only)
uint8_t* object = (uint8_t*) vpObject;
#ifndef FEATURE_BASICFREEZE
- if (!((object < g_highest_address) && (object >= g_lowest_address)))
+ if (!((object < g_gc_highest_address) && (object >= g_gc_lowest_address)))
return FALSE;
#endif //!FEATURE_BASICFREEZE
@@ -34969,7 +34889,6 @@ void gc_heap::do_post_gc()
{
if (!settings.concurrent)
{
- GCProfileWalkHeap();
initGCShadow();
}
@@ -34989,13 +34908,10 @@ void gc_heap::do_post_gc()
GCToEEInterface::GcDone(settings.condemned_generation);
-#ifdef GC_PROFILING
- if (!settings.concurrent)
- {
- UpdateGenerationBounds();
- GarbageCollectionFinishedCallback();
- }
-#endif // GC_PROFILING
+ GCToEEInterface::DiagGCEnd(VolatileLoad(&settings.gc_index),
+ (uint32_t)settings.condemned_generation,
+ (uint32_t)settings.reason,
+ !!settings.concurrent);
//dprintf (1, (" ****end of Garbage Collection**** %d(gen0:%d)(%d)",
dprintf (1, ("*EGC* %d(gen0:%d)(%d)(%s)",
@@ -35772,85 +35688,6 @@ void GCHeap::SetFinalizationRun (Object* obj)
#endif // FEATURE_PREMORTEM_FINALIZATION
-//----------------------------------------------------------------------------
-//
-// Write Barrier Support for bulk copy ("Clone") operations
-//
-// StartPoint is the target bulk copy start point
-// len is the length of the bulk copy (in bytes)
-//
-//
-// Performance Note:
-//
-// This is implemented somewhat "conservatively", that is we
-// assume that all the contents of the bulk copy are object
-// references. If they are not, and the value lies in the
-// ephemeral range, we will set false positives in the card table.
-//
-// We could use the pointer maps and do this more accurately if necessary
-
-#if defined(_MSC_VER) && defined(_TARGET_X86_)
-#pragma optimize("y", on) // Small critical routines, don't put in EBP frame
-#endif //_MSC_VER && _TARGET_X86_
-
-void
-GCHeap::SetCardsAfterBulkCopy( Object **StartPoint, size_t len )
-{
- Object **rover;
- Object **end;
-
- // Target should aligned
- assert(Aligned ((size_t)StartPoint));
-
-
- // Don't optimize the Generation 0 case if we are checking for write barrier voilations
- // since we need to update the shadow heap even in the generation 0 case.
-#if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
- if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK)
- for(unsigned i=0; i < len / sizeof(Object*); i++)
- updateGCShadow(&StartPoint[i], StartPoint[i]);
-#endif //WRITE_BARRIER_CHECK && !SERVER_GC
-
-#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
- if (SoftwareWriteWatch::IsEnabledForGCHeap())
- {
- SoftwareWriteWatch::SetDirtyRegion(StartPoint, len);
- }
-#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
-
- // If destination is in Gen 0 don't bother
- if (
-#ifdef BACKGROUND_GC
- (!gc_heap::settings.concurrent) &&
-#endif //BACKGROUND_GC
- (g_theGCHeap->WhichGeneration( (Object*) StartPoint ) == 0))
- return;
-
- rover = StartPoint;
- end = StartPoint + (len/sizeof(Object*));
- while (rover < end)
- {
- if ( (((uint8_t*)*rover) >= g_ephemeral_low) && (((uint8_t*)*rover) < g_ephemeral_high) )
- {
- // Set Bit For Card and advance to next card
- size_t card = gcard_of ((uint8_t*)rover);
-
- Interlocked::Or (&g_card_table[card/card_word_width], (1U << (card % card_word_width)));
- // Skip to next card for the object
- rover = (Object**)align_on_card ((uint8_t*)(rover+1));
- }
- else
- {
- rover++;
- }
- }
-}
-
-#if defined(_MSC_VER) && defined(_TARGET_X86_)
-#pragma optimize("", on) // Go back to command line default optimizations
-#endif //_MSC_VER && _TARGET_X86_
-
-
#ifdef FEATURE_PREMORTEM_FINALIZATION
//--------------------------------------------------------------------
@@ -36278,21 +36115,17 @@ CFinalize::GcScanRoots (promote_func* fn, int hn, ScanContext *pSC)
}
}
-#ifdef GC_PROFILING
-void CFinalize::WalkFReachableObjects (gc_heap* hp)
+void CFinalize::WalkFReachableObjects (fq_walk_fn fn)
{
- BEGIN_PIN_PROFILER(CORProfilerPresent());
Object** startIndex = SegQueue (CriticalFinalizerListSeg);
Object** stopCriticalIndex = SegQueueLimit (CriticalFinalizerListSeg);
Object** stopIndex = SegQueueLimit (FinalizerListSeg);
for (Object** po = startIndex; po < stopIndex; po++)
{
//report *po
- g_profControlBlock.pProfInterface->FinalizeableObjectQueued(po < stopCriticalIndex, (ObjectID)*po);
+ fn(po < stopCriticalIndex, *po);
}
- END_PIN_PROFILER();
}
-#endif //GC_PROFILING
BOOL
CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
@@ -36528,8 +36361,7 @@ void CFinalize::CheckFinalizerObjects()
// End of VM specific support
//
//------------------------------------------------------------------------------
-
-void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
+void gc_heap::walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
{
generation* gen = gc_heap::generation_of (gen_number);
heap_segment* seg = generation_start_segment (gen);
@@ -36585,9 +36417,29 @@ void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_la
}
}
-void GCHeap::WalkObject (Object* obj, walk_fn fn, void* context)
+void gc_heap::walk_finalize_queue (fq_walk_fn fn)
+{
+#ifdef FEATURE_PREMORTEM_FINALIZATION
+ finalize_queue->WalkFReachableObjects (fn);
+#endif //FEATURE_PREMORTEM_FINALIZATION
+}
+
+void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
+{
+#ifdef MULTIPLE_HEAPS
+ for (int hn = 0; hn < gc_heap::n_heaps; hn++)
+ {
+ gc_heap* hp = gc_heap::g_heaps [hn];
+
+ hp->walk_heap_per_heap (fn, context, gen_number, walk_large_object_heap_p);
+ }
+#else
+ walk_heap_per_heap(fn, context, gen_number, walk_large_object_heap_p);
+#endif //MULTIPLE_HEAPS
+}
+
+void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context)
{
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
uint8_t* o = (uint8_t*)obj;
if (o)
{
@@ -36602,7 +36454,48 @@ void GCHeap::WalkObject (Object* obj, walk_fn fn, void* context)
}
);
}
-#endif //defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+}
+
+void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type)
+{
+ gc_heap* hp = (gc_heap*)gc_context;
+ hp->walk_survivors (fn, diag_context, type);
+}
+
+void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
+{
+ gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p);
+}
+
+void GCHeap::DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn)
+{
+ gc_heap* hp = (gc_heap*)gc_context;
+ hp->walk_finalize_queue (fn);
+}
+
+void GCHeap::DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* sc)
+{
+#ifdef MULTIPLE_HEAPS
+ for (int hn = 0; hn < gc_heap::n_heaps; hn++)
+ {
+ gc_heap* hp = gc_heap::g_heaps [hn];
+ hp->finalize_queue->GcScanRoots(fn, hn, sc);
+ }
+#else
+ pGenGCHeap->finalize_queue->GcScanRoots(fn, 0, sc);
+#endif //MULTIPLE_HEAPS
+}
+
+void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
+{
+ UNREFERENCED_PARAMETER(gen_number);
+ GCScan::GcScanHandlesForProfilerAndETW (max_generation, context, fn);
+}
+
+void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
+{
+ UNREFERENCED_PARAMETER(gen_number);
+ GCScan::GcScanDependentHandlesForProfilerAndETW (max_generation, context, fn);
}
// Go through and touch (read) each page straddled by a memory block.
@@ -36649,11 +36542,11 @@ void initGCShadow()
if (!(g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK))
return;
- size_t len = g_highest_address - g_lowest_address;
+ size_t len = g_gc_highest_address - g_gc_lowest_address;
if (len > (size_t)(g_GCShadowEnd - g_GCShadow))
{
deleteGCShadow();
- g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(0, len, 0, VirtualReserveFlags::None);
+ g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(len, 0, VirtualReserveFlags::None);
if (g_GCShadow == NULL || !GCToOSInterface::VirtualCommit(g_GCShadow, len))
{
_ASSERTE(!"Not enough memory to run HeapVerify level 2");
@@ -36668,10 +36561,10 @@ void initGCShadow()
g_GCShadowEnd += len;
}
- // save the value of g_lowest_address at this time. If this value changes before
+ // save the value of g_gc_lowest_address at this time. If this value changes before
// the next call to checkGCWriteBarrier() it means we extended the heap (with a
// large object segment most probably), and the whole shadow segment is inconsistent.
- g_shadow_lowest_address = g_lowest_address;
+ g_shadow_lowest_address = g_gc_lowest_address;
//****** Copy the whole GC heap ******
//
@@ -36681,7 +36574,7 @@ void initGCShadow()
generation* gen = gc_heap::generation_of (max_generation);
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
- ptrdiff_t delta = g_GCShadow - g_lowest_address;
+ ptrdiff_t delta = g_GCShadow - g_gc_lowest_address;
BOOL small_object_segments = TRUE;
while(1)
{
@@ -36709,7 +36602,7 @@ void initGCShadow()
// test to see if 'ptr' was only updated via the write barrier.
inline void testGCShadow(Object** ptr)
{
- Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_lowest_address)];
+ Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_gc_lowest_address)];
if (*ptr != 0 && (uint8_t*) shadow < g_GCShadowEnd && *ptr != *shadow)
{
@@ -36768,9 +36661,9 @@ void testGCShadowHelper (uint8_t* x)
// Walk the whole heap, looking for pointers that were not updated with the write barrier.
void checkGCWriteBarrier()
{
- // g_shadow_lowest_address != g_lowest_address means the GC heap was extended by a segment
+ // g_shadow_lowest_address != g_gc_lowest_address means the GC heap was extended by a segment
// and the GC shadow segment did not track that change!
- if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_lowest_address)
+ if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_gc_lowest_address)
{
// No shadow stack, nothing to check.
return;
diff --git a/src/Native/gc/gc.h b/src/Native/gc/gc.h
index ca9c28d8f..6f8626a3d 100644
--- a/src/Native/gc/gc.h
+++ b/src/Native/gc/gc.h
@@ -14,7 +14,19 @@ Module Name:
#ifndef __GC_H
#define __GC_H
+#ifdef Sleep
+// This is a funny workaround for the fact that "common.h" defines Sleep to be
+// Dont_Use_Sleep, with the hope of causing linker errors whenever someone tries to use sleep.
+//
+// However, GCToOSInterface defines a function called Sleep, which (due to this define) becomes
+// "Dont_Use_Sleep", which the GC in turn happily uses. The symbol that GCToOSInterface actually
+// exported was called "GCToOSInterface::Dont_Use_Sleep". While we progress in making the GC standalone,
+// we'll need to break the dependency on common.h (the VM header) and this problem will become moot.
+#undef Sleep
+#endif // Sleep
+
#include "gcinterface.h"
+#include "env/gcenv.os.h"
#include "env/gcenv.ee.h"
#ifdef FEATURE_STANDALONE_GC
@@ -125,6 +137,10 @@ class DacHeapWalker;
#define MP_LOCKS
+extern "C" uint32_t* g_gc_card_table;
+extern "C" uint8_t* g_gc_lowest_address;
+extern "C" uint8_t* g_gc_highest_address;
+
namespace WKS {
::IGCHeapInternal* CreateGCHeap();
class GCHeap;
diff --git a/src/Native/gc/gccommon.cpp b/src/Native/gc/gccommon.cpp
index 2e6bfce83..0292705a1 100644
--- a/src/Native/gc/gccommon.cpp
+++ b/src/Native/gc/gccommon.cpp
@@ -26,28 +26,22 @@ IGCHeapInternal* g_theGCHeap;
IGCToCLR* g_theGCToCLR;
#endif // FEATURE_STANDALONE_GC
-/* global versions of the card table and brick table */
-GPTR_IMPL(uint32_t,g_card_table);
-
-/* absolute bounds of the GC memory */
-GPTR_IMPL_INIT(uint8_t,g_lowest_address,0);
-GPTR_IMPL_INIT(uint8_t,g_highest_address,0);
-
#ifdef GC_CONFIG_DRIVEN
GARY_IMPL(size_t, gc_global_mechanisms, MAX_GLOBAL_GC_MECHANISMS_COUNT);
#endif //GC_CONFIG_DRIVEN
#ifndef DACCESS_COMPILE
-uint8_t* g_ephemeral_low = (uint8_t*)1;
-uint8_t* g_ephemeral_high = (uint8_t*)~0;
-
#ifdef WRITE_BARRIER_CHECK
uint8_t* g_GCShadow;
uint8_t* g_GCShadowEnd;
uint8_t* g_shadow_lowest_address = NULL;
#endif
+uint32_t* g_gc_card_table;
+uint8_t* g_gc_lowest_address = 0;
+uint8_t* g_gc_highest_address = 0;
+
VOLATILE(int32_t) m_GCLock = -1;
#ifdef GC_CONFIG_DRIVEN
diff --git a/src/Native/gc/gcee.cpp b/src/Native/gc/gcee.cpp
index 58a553661..c93cc91b5 100644
--- a/src/Native/gc/gcee.cpp
+++ b/src/Native/gc/gcee.cpp
@@ -381,209 +381,6 @@ size_t GCHeap::GetNow()
return GetHighPrecisionTimeStamp();
}
-void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
-{
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- Object *pObj = *ppObject;
-#ifdef INTERIOR_POINTERS
- if (dwFlags & GC_CALL_INTERIOR)
- {
- uint8_t *o = (uint8_t*)pObj;
- gc_heap* hp = gc_heap::heap_of (o);
-
- if ((o < hp->gc_low) || (o >= hp->gc_high))
- {
- return;
- }
- pObj = (Object*) hp->find_object(o, hp->gc_low);
- }
-#endif //INTERIOR_POINTERS
- ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-}
-
-// TODO - at some point we would like to completely decouple profiling
-// from ETW tracing using a pattern similar to this, where the
-// ProfilingScanContext has flags about whether or not certain things
-// should be tracked, and each one of these ProfilerShouldXYZ functions
-// will check these flags and determine what to do based upon that.
-// GCProfileWalkHeapWorker can, in turn, call those methods without fear
-// of things being ifdef'd out.
-
-// Returns TRUE if GC profiling is enabled and the profiler
-// should scan dependent handles, FALSE otherwise.
-BOOL ProfilerShouldTrackConditionalWeakTableElements()
-{
-#if defined(GC_PROFILING)
- return CORProfilerTrackConditionalWeakTableElements();
-#else
- return FALSE;
-#endif // defined (GC_PROFILING)
-}
-
-// If GC profiling is enabled, informs the profiler that we are done
-// tracing dependent handles.
-void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
-{
-#if defined (GC_PROFILING)
- g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
-#else
- UNREFERENCED_PARAMETER(heapId);
-#endif // defined (GC_PROFILING)
-}
-
-// If GC profiling is enabled, informs the profiler that we are done
-// tracing root references.
-void ProfilerEndRootReferences2(void* heapId)
-{
-#if defined (GC_PROFILING)
- g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
-#else
- UNREFERENCED_PARAMETER(heapId);
-#endif // defined (GC_PROFILING)
-}
-
-// This is called only if we've determined that either:
-// a) The Profiling API wants to do a walk of the heap, and it has pinned the
-// profiler in place (so it cannot be detached), and it's thus safe to call into the
-// profiler, OR
-// b) ETW infrastructure wants to do a walk of the heap either to log roots,
-// objects, or both.
-// This can also be called to do a single walk for BOTH a) and b) simultaneously. Since
-// ETW can ask for roots, but not objects
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
-void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
-{
- {
- ProfilingScanContext SC(fProfilerPinned);
-
- // **** Scan roots: Only scan roots if profiling API wants them or ETW wants them.
- if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
- {
-#ifdef MULTIPLE_HEAPS
- int hn;
-
- // Must emulate each GC thread number so we can hit each
- // heap for enumerating the roots.
- for (hn = 0; hn < gc_heap::n_heaps; hn++)
- {
- // Ask the vm to go over all of the roots for this specific
- // heap.
- gc_heap* hp = gc_heap::g_heaps [hn];
- SC.thread_number = hn;
- GCScan::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
-
- // The finalizer queue is also a source of roots
- SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
- hp->finalize_queue->GcScanRoots(&ProfScanRootsHelper, hn, &SC);
- }
-#else
- // Ask the vm to go over all of the roots
- GCScan::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
-
- // The finalizer queue is also a source of roots
- SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
- pGenGCHeap->finalize_queue->GcScanRoots(&ProfScanRootsHelper, 0, &SC);
-
-#endif // MULTIPLE_HEAPS
- // Handles are kept independent of wks/svr/concurrent builds
- SC.dwEtwRootKind = kEtwGCRootKindHandle;
- GCScan::GcScanHandlesForProfilerAndETW(max_generation, &SC);
-
- // indicate that regular handle scanning is over, so we can flush the buffered roots
- // to the profiler. (This is for profapi only. ETW will flush after the
- // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
- if (fProfilerPinned)
- {
- ProfilerEndRootReferences2(&SC.pHeapId);
- }
- }
-
- // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
- if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
- fShouldWalkHeapRootsForEtw)
- {
- // GcScanDependentHandlesForProfiler double-checks
- // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
-
- GCScan::GcScanDependentHandlesForProfilerAndETW(max_generation, &SC);
-
- // indicate that dependent handle scanning is over, so we can flush the buffered roots
- // to the profiler. (This is for profapi only. ETW will flush after the
- // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
- if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
- {
- ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
- }
- }
-
- ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
-
- // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
- if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
- {
-#ifdef MULTIPLE_HEAPS
- int hn;
-
- // Walk the heap and provide the objref to the profiler
- for (hn = 0; hn < gc_heap::n_heaps; hn++)
- {
- gc_heap* hp = gc_heap::g_heaps [hn];
- hp->walk_heap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE /* walk the large object heap */);
- }
-#else
- gc_heap::walk_heap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE);
-#endif //MULTIPLE_HEAPS
- }
-
-#ifdef FEATURE_EVENT_TRACE
- // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
- // should be flushed into the ETW stream
- if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
- {
- ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
- }
-#endif // FEATURE_EVENT_TRACE
- }
-}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
-void GCProfileWalkHeap()
-{
- BOOL fWalkedHeapForProfiler = FALSE;
-
-#ifdef FEATURE_EVENT_TRACE
- if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
- ETW::GCLog::WalkStaticsAndCOMForETW();
-
- BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
- BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
-#else // !FEATURE_EVENT_TRACE
- BOOL fShouldWalkHeapRootsForEtw = FALSE;
- BOOL fShouldWalkHeapObjectsForEtw = FALSE;
-#endif // FEATURE_EVENT_TRACE
-
-#if defined (GC_PROFILING)
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackGC());
- GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
- fWalkedHeapForProfiler = TRUE;
- END_PIN_PROFILER();
- }
-#endif // defined (GC_PROFILING)
-
-#if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
- // is defined, since both of them make use of the walk heap worker.
- if (!fWalkedHeapForProfiler &&
- (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
- {
- GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
- }
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-}
-
BOOL GCHeap::IsGCInProgressHelper (BOOL bConsiderGCStart)
{
return GcInProgress || (bConsiderGCStart? VolatileLoad(&gc_heap::gc_started) : FALSE);
@@ -786,7 +583,7 @@ IGCHeapInternal* CreateGCHeap() {
return new(nothrow) GCHeap(); // we return wks or svr
}
-void GCHeap::TraceGCSegments()
+void GCHeap::DiagTraceGCSegments()
{
#ifdef FEATURE_EVENT_TRACE
heap_segment* seg = 0;
@@ -823,7 +620,7 @@ void GCHeap::TraceGCSegments()
#endif // FEATURE_EVENT_TRACE
}
-void GCHeap::DescrGenerationsToProfiler (gen_walk_fn fn, void *context)
+void GCHeap::DiagDescrGenerations (gen_walk_fn fn, void *context)
{
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
pGenGCHeap->descr_generations_to_profiler(fn, context);
diff --git a/src/Native/gc/gcenv.ee.standalone.inl b/src/Native/gc/gcenv.ee.standalone.inl
index 2ecc6fc83..3b64586d7 100644
--- a/src/Native/gc/gcenv.ee.standalone.inl
+++ b/src/Native/gc/gcenv.ee.standalone.inl
@@ -125,4 +125,52 @@ inline Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunctio
return g_theGCToCLR->CreateBackgroundThread(threadStart, arg);
}
+inline void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagGCStart(gen, isInduced);
+}
+
+inline void GCToEEInterface::DiagUpdateGenerationBounds()
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagUpdateGenerationBounds();
+}
+
+inline void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagGCEnd(index, gen, reason, fConcurrent);
+}
+
+inline void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagWalkFReachableObjects(gcContext);
+}
+
+inline void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagWalkSurvivors(gcContext);
+}
+
+inline void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->DiagWalkLOHSurvivors(gcContext);
+}
+
+inline void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->DiagWalkBGCSurvivors(gcContext);
+}
+
+inline void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
+{
+ assert(g_theGCToCLR != nullptr);
+ g_theGCToCLR->StompWriteBarrier(args);
+}
+
#endif // __GCTOENV_EE_STANDALONE_INL__
diff --git a/src/Native/gc/gcimpl.h b/src/Native/gc/gcimpl.h
index d7393c357..cb91c4dc3 100644
--- a/src/Native/gc/gcimpl.h
+++ b/src/Native/gc/gcimpl.h
@@ -77,7 +77,7 @@ public:
size_t GetLastGCDuration(int generation);
size_t GetNow();
- void TraceGCSegments ();
+ void DiagTraceGCSegments ();
void PublishObject(uint8_t* obj);
BOOL IsGCInProgressHelper (BOOL bConsiderGCStart = FALSE);
@@ -198,8 +198,7 @@ public:
BOOL FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers);
BOOL ShouldRestartFinalizerWatchDog();
- void SetCardsAfterBulkCopy( Object**, size_t);
- void WalkObject (Object* obj, walk_fn fn, void* context);
+ void DiagWalkObject (Object* obj, walk_fn fn, void* context);
public: // FIX
@@ -272,7 +271,19 @@ protected:
#endif // STRESS_HEAP
#endif // FEATURE_REDHAWK
- virtual void DescrGenerationsToProfiler (gen_walk_fn fn, void *context);
+ virtual void DiagDescrGenerations (gen_walk_fn fn, void *context);
+
+ virtual void DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type);
+
+ virtual void DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn);
+
+ virtual void DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* context);
+
+ virtual void DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context);
+
+ virtual void DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context);
+
+ virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
public:
Object * NextObj (Object * object);
diff --git a/src/Native/gc/gcinterface.ee.h b/src/Native/gc/gcinterface.ee.h
index 36d20c719..c5f87ef03 100644
--- a/src/Native/gc/gcinterface.ee.h
+++ b/src/Native/gc/gcinterface.ee.h
@@ -92,6 +92,42 @@ public:
// Creates and returns a new background thread.
virtual
Thread* CreateBackgroundThread(GCBackgroundThreadFunction threadStart, void* arg) = 0;
+
+ // When a GC starts, gives the diagnostics code a chance to run.
+ virtual
+ void DiagGCStart(int gen, bool isInduced) = 0;
+
+ // When GC heap segments change, gives the diagnostics code a chance to run.
+ virtual
+ void DiagUpdateGenerationBounds() = 0;
+
+ // When a GC ends, gives the diagnostics code a chance to run.
+ virtual
+ void DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent) = 0;
+
+ // During a GC after we discover what objects' finalizers should run, gives the diagnostics code a chance to run.
+ virtual
+ void DiagWalkFReachableObjects(void* gcContext) = 0;
+
+ // During a GC after we discover the survivors and the relocation info,
+ // gives the diagnostics code a chance to run. This includes LOH if we are
+ // compacting LOH.
+ virtual
+ void DiagWalkSurvivors(void* gcContext) = 0;
+
+ // During a full GC after we discover what objects to survive on LOH,
+ // gives the diagnostics code a chance to run.
+ virtual
+ void DiagWalkLOHSurvivors(void* gcContext) = 0;
+
+ // At the end of a background GC, gives the diagnostics code a chance to run.
+ virtual
+ void DiagWalkBGCSurvivors(void* gcContext) = 0;
+
+ // Informs the EE of changes to the location of the card table, potentially updating the write
+ // barrier if it needs to be updated.
+ virtual
+ void StompWriteBarrier(WriteBarrierParameters* args) = 0;
};
#endif // _GCINTERFACE_EE_H_
diff --git a/src/Native/gc/gcinterface.h b/src/Native/gc/gcinterface.h
index ac14332dc..4ba4e0c63 100644
--- a/src/Native/gc/gcinterface.h
+++ b/src/Native/gc/gcinterface.h
@@ -34,6 +34,70 @@ typedef enum
SUSPEND_FOR_GC_PREP = 6
} SUSPEND_REASON;
+typedef enum
+{
+ walk_for_gc = 1,
+ walk_for_bgc = 2,
+ walk_for_loh = 3
+} walk_surv_type;
+
+// Different operations that can be done by GCToEEInterface::StompWriteBarrier
+enum class WriteBarrierOp
+{
+ StompResize,
+ StompEphemeral,
+ Initialize,
+ SwitchToWriteWatch,
+ SwitchToNonWriteWatch
+};
+
+// Arguments to GCToEEInterface::StompWriteBarrier
+struct WriteBarrierParameters
+{
+ // The operation that StompWriteBarrier will perform.
+ WriteBarrierOp operation;
+
+ // Whether or not the runtime is currently suspended. If it is not,
+ // the EE will need to suspend it before bashing the write barrier.
+ // Used for all operations.
+ bool is_runtime_suspended;
+
+ // Whether or not the GC has moved the ephemeral generation to no longer
+ // be at the top of the heap. When the ephemeral generation is at the top
+ // of the heap, and the write barrier observes that a pointer is greater than
+ // g_ephemeral_low, it does not need to check that the pointer is less than
+ // g_ephemeral_high because there is nothing in the GC heap above the ephemeral
+ // generation. When this is not the case, however, the GC must inform the EE
+ // so that the EE can switch to a write barrier that checks that a pointer
+ // is both greater than g_ephemeral_low and less than g_ephemeral_high.
+ // Used for WriteBarrierOp::StompResize.
+ bool requires_upper_bounds_check;
+
+ // The new card table location. May or may not be the same as the previous
+ // card table. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
+ uint32_t* card_table;
+
+ // The heap's new low boundary. May or may not be the same as the previous
+ // value. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
+ uint8_t* lowest_address;
+
+ // The heap's new high boundary. May or may not be the same as the previous
+ // value. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
+ uint8_t* highest_address;
+
+ // The new start of the ephemeral generation.
+ // Used for WriteBarrierOp::StompEphemeral.
+ uint8_t* ephemeral_low;
+
+ // The new end of the ephemeral generation.
+ // Used for WriteBarrierOp::StompEphemeral.
+ uint8_t* ephemeral_high;
+
+ // The new write watch table, if we are using our own write watch
+ // implementation. Used for WriteBarrierOp::SwitchToWriteWatch only.
+ uint8_t* write_watch_table;
+};
+
#include "gcinterface.ee.h"
// The allocation context must be known to the VM for use in the allocation
@@ -88,6 +152,12 @@ struct segment_info
// one for the object header, and one for the first field in the object.
#define min_obj_size ((sizeof(uint8_t*) + sizeof(uintptr_t) + sizeof(size_t)))
+#define max_generation 2
+
+// The bit shift used to convert a memory address into an index into the
+// Software Write Watch table.
+#define SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift 0xc
+
class Object;
class IGCHeap;
@@ -101,19 +171,6 @@ IGCHeap* InitializeGarbageCollector(IGCToCLR* clrToGC);
// and the heap is actually recated.
void InitializeHeapType(bool bServerHeap);
-#ifndef DACCESS_COMPILE
-extern "C" {
-#endif // !DACCESS_COMPILE
-GPTR_DECL(uint8_t,g_lowest_address);
-GPTR_DECL(uint8_t,g_highest_address);
-GPTR_DECL(uint32_t,g_card_table);
-#ifndef DACCESS_COMPILE
-}
-#endif // !DACCESS_COMPILE
-
-extern "C" uint8_t* g_ephemeral_low;
-extern "C" uint8_t* g_ephemeral_high;
-
#ifdef WRITE_BARRIER_CHECK
//always defined, but should be 0 in Server GC
extern uint8_t* g_GCShadow;
@@ -174,6 +231,10 @@ enum end_no_gc_region_status
typedef BOOL (* walk_fn)(Object*, void*);
typedef void (* gen_walk_fn)(void* context, int generation, uint8_t* range_start, uint8_t* range_end, uint8_t* range_reserved);
+typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, size_t context, BOOL compacting_p, BOOL bgc_p);
+typedef void (* fq_walk_fn)(BOOL, void*);
+typedef void (* fq_scan_fn)(Object** ppObject, ScanContext *pSC, uint32_t dwFlags);
+typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent);
// IGCHeap is the interface that the VM will use when interacting with the GC.
class IGCHeap {
@@ -347,9 +408,6 @@ public:
// sanity checks asserting that a GC has not occured.
virtual unsigned GetGcCount() = 0;
- // Sets cards after an object has been memmoved.
- virtual void SetCardsAfterBulkCopy(Object** obj, size_t length) = 0;
-
// Gets whether or not the home heap of this alloc context matches the heap
// associated with this thread.
virtual bool IsThreadUsingAllocationContextHeap(gc_alloc_context* acontext, int thread_number) = 0;
@@ -413,8 +471,8 @@ public:
// with the given size and flags.
virtual Object* AllocAlign8 (gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;
- // If allocating on the LOH, blocks if a BGC is in a position (concurrent mark)
- // where the LOH allocator can't allocate.
+ // This is for the allocator to indicate it's done allocating a large object during a
+ // background GC as the BGC threads also need to walk LOH.
virtual void PublishObject(uint8_t* obj) = 0;
// Gets the event that suspended threads will use to wait for the
@@ -449,13 +507,31 @@ public:
*/
// Walks an object, invoking a callback on each member.
- virtual void WalkObject(Object* obj, walk_fn fn, void* context) = 0;
+ virtual void DiagWalkObject(Object* obj, walk_fn fn, void* context) = 0;
+
+ // Walk the heap object by object.
+ virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) = 0;
+
+ // Walks the survivors and get the relocation information if objects have moved.
+ virtual void DiagWalkSurvivorsWithType(void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type) = 0;
+
+ // Walks the finalization queue.
+ virtual void DiagWalkFinalizeQueue(void* gc_context, fq_walk_fn fn) = 0;
+
+ // Scan roots on finalizer queue. This is a generic function.
+ virtual void DiagScanFinalizeQueue(fq_scan_fn fn, ScanContext* context) = 0;
+
+ // Scan handles for profiling or ETW.
+ virtual void DiagScanHandles(handle_scan_fn fn, int gen_number, ScanContext* context) = 0;
+
+ // Scan dependent handles for profiling or ETW.
+ virtual void DiagScanDependentHandles(handle_scan_fn fn, int gen_number, ScanContext* context) = 0;
// Describes all generations to the profiler, invoking a callback on each generation.
- virtual void DescrGenerationsToProfiler(gen_walk_fn fn, void* context) = 0;
+ virtual void DiagDescrGenerations(gen_walk_fn fn, void* context) = 0;
// Traces all GC segments and fires ETW events with information on them.
- virtual void TraceGCSegments() = 0;
+ virtual void DiagTraceGCSegments() = 0;
/*
===========================================================================
@@ -550,26 +626,4 @@ struct ScanContext
}
};
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-struct ProfilingScanContext : ScanContext
-{
- BOOL fProfilerPinned;
- void * pvEtwContext;
- void *pHeapId;
-
- ProfilingScanContext(BOOL fProfilerPinnedParam) : ScanContext()
- {
- LIMITED_METHOD_CONTRACT;
-
- pHeapId = NULL;
- fProfilerPinned = fProfilerPinnedParam;
- pvEtwContext = NULL;
-#ifdef FEATURE_CONSERVATIVE_GC
- // To not confuse GCScan::GcScanRoots
- promotion = g_pConfig->GetGCConservative();
-#endif
- }
-};
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
#endif // _GC_INTERFACE_H_
diff --git a/src/Native/gc/gcpriv.h b/src/Native/gc/gcpriv.h
index e0147c33c..1f97d7f2d 100644
--- a/src/Native/gc/gcpriv.h
+++ b/src/Native/gc/gcpriv.h
@@ -24,7 +24,9 @@
inline void FATAL_GC_ERROR()
{
+#ifndef DACCESS_COMPILE
GCToOSInterface::DebugBreak();
+#endif // DACCESS_COMPILE
_ASSERTE(!"Fatal Error in GC.");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
@@ -1073,9 +1075,6 @@ enum interesting_data_point
};
//class definition of the internal class
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-extern void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
class gc_heap
{
friend struct ::_DacGlobals;
@@ -1225,7 +1224,7 @@ public:
static
gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size);
static
- void __stdcall gc_thread_stub (void* arg);
+ void gc_thread_stub (void* arg);
#endif //MULTIPLE_HEAPS
CObjectHeader* try_fast_alloc (size_t jsize);
@@ -1283,35 +1282,48 @@ public:
protected:
- PER_HEAP
+ PER_HEAP_ISOLATED
void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+ PER_HEAP
+ void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p);
+
struct walk_relocate_args
{
uint8_t* last_plug;
BOOL is_shortened;
mark* pinned_plug_entry;
+ size_t profiling_context;
+ record_surv_fn fn;
};
PER_HEAP
+ void walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type);
+
+ PER_HEAP
void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p,
- walk_relocate_args* args, size_t profiling_context);
+ walk_relocate_args* args);
PER_HEAP
- void walk_relocation (int condemned_gen_number,
- uint8_t* first_condemned_address, size_t profiling_context);
+ void walk_relocation (size_t profiling_context, record_surv_fn fn);
PER_HEAP
- void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args, size_t profiling_context);
+ void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args);
-#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
PER_HEAP
- void walk_relocation_for_bgc(size_t profiling_context);
+ void walk_finalize_queue (fq_walk_fn fn);
+#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
PER_HEAP
- void make_free_lists_for_profiler_for_bgc();
+ void walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn);
#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
+ // used in blocking GCs after plan phase so this walks the plugs.
+ PER_HEAP
+ void walk_survivors_relocation (size_t profiling_context, record_surv_fn fn);
+ PER_HEAP
+ void walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn);
+
PER_HEAP
int generation_to_condemn (int n,
BOOL* blocking_collection_p,
@@ -1659,7 +1671,7 @@ protected:
PER_HEAP
void reset_write_watch (BOOL concurrent_p);
PER_HEAP
- void adjust_ephemeral_limits (bool is_runtime_suspended);
+ void adjust_ephemeral_limits ();
PER_HEAP
void make_generation (generation& gen, heap_segment* seg,
uint8_t* start, uint8_t* pointer);
@@ -2148,10 +2160,8 @@ protected:
PER_HEAP
void relocate_in_loh_compact();
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
PER_HEAP
- void walk_relocation_loh (size_t profiling_context);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ void walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn);
PER_HEAP
BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len);
@@ -2549,12 +2559,6 @@ protected:
PER_HEAP_ISOLATED
void descr_generations_to_profiler (gen_walk_fn fn, void *context);
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- PER_HEAP
- void record_survived_for_profiler(int condemned_gen_number, uint8_t * first_condemned_address);
- PER_HEAP
- void notify_profiler_of_surviving_large_objects ();
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
/*------------ Multiple non isolated heaps ----------------*/
#ifdef MULTIPLE_HEAPS
@@ -2798,13 +2802,11 @@ public:
PER_HEAP
void exit_gc_done_event_lock();
-#ifdef MULTIPLE_HEAPS
PER_HEAP
uint8_t* ephemeral_low; //lowest ephemeral address
PER_HEAP
uint8_t* ephemeral_high; //highest ephemeral address
-#endif //MULTIPLE_HEAPS
PER_HEAP
uint32_t* card_table;
@@ -3763,9 +3765,7 @@ public:
Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE);
BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp);
void RelocateFinalizationData (int gen, gc_heap* hp);
-#ifdef GC_PROFILING
- void WalkFReachableObjects (gc_heap* hp);
-#endif //GC_PROFILING
+ void WalkFReachableObjects (fq_walk_fn fn);
void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC);
void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p);
size_t GetPromotedCount();
@@ -4317,9 +4317,6 @@ dynamic_data* gc_heap::dynamic_data_of (int gen_number)
return &dynamic_data_table [ gen_number ];
}
-extern "C" uint8_t* g_ephemeral_low;
-extern "C" uint8_t* g_ephemeral_high;
-
#define card_word_width ((size_t)32)
//
diff --git a/src/Native/gc/gcrecord.h b/src/Native/gc/gcrecord.h
index 8c95ad04d..fff1fc5c8 100644
--- a/src/Native/gc/gcrecord.h
+++ b/src/Native/gc/gcrecord.h
@@ -13,7 +13,7 @@ Module Name:
#ifndef __gc_record_h__
#define __gc_record_h__
-#define max_generation 2
+//#define max_generation 2
// We pack the dynamic tuning for deciding which gen to condemn in a uint32_t.
// We assume that 2 bits are enough to represent the generation.
diff --git a/src/Native/gc/gcscan.cpp b/src/Native/gc/gcscan.cpp
index f021554fd..b4e6352dd 100644
--- a/src/Native/gc/gcscan.cpp
+++ b/src/Native/gc/gcscan.cpp
@@ -192,33 +192,32 @@ void GCScan::GcScanHandles (promote_func* fn, int condemned, int max_gen,
}
}
-
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
/*
* Scan all handle roots in this 'namespace' for profiling
*/
-void GCScan::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc)
+void GCScan::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn)
{
LIMITED_METHOD_CONTRACT;
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, Handles\n"));
- Ref_ScanPointersForProfilerAndETW(max_gen, (uintptr_t)sc);
+ Ref_ScanHandlesForProfilerAndETW(max_gen, (uintptr_t)sc, fn);
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
}
/*
* Scan dependent handles in this 'namespace' for profiling
*/
-void GCScan::GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc)
+void GCScan::GcScanDependentHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn)
{
LIMITED_METHOD_CONTRACT;
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, DependentHandles\n"));
- Ref_ScanDependentHandlesForProfilerAndETW(max_gen, sc);
-}
-
+ Ref_ScanDependentHandlesForProfilerAndETW(max_gen, sc, fn);
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+}
void GCScan::GcRuntimeStructuresValid (BOOL bValid)
{
diff --git a/src/Native/gc/gcscan.h b/src/Native/gc/gcscan.h
index 3515b8e1b..362370fa4 100644
--- a/src/Native/gc/gcscan.h
+++ b/src/Native/gc/gcscan.h
@@ -52,10 +52,8 @@ class GCScan
static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
#endif // DACCESS_COMPILE
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
- static void GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc);
- static void GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ static void GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn);
+ static void GcScanDependentHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn);
// scan for dead weak pointers
static void GcWeakPtrScan (promote_func* fn, int condemned, int max_gen, ScanContext*sc );
diff --git a/src/Native/gc/gcsvr.cpp b/src/Native/gc/gcsvr.cpp
index cf5fc9335..70801dd4e 100644
--- a/src/Native/gc/gcsvr.cpp
+++ b/src/Native/gc/gcsvr.cpp
@@ -13,6 +13,7 @@
#include "gc.h"
#include "gcscan.h"
#include "gcdesc.h"
+#include "softwarewritewatch.h"
#define SERVER_GC 1
diff --git a/src/Native/gc/gcwks.cpp b/src/Native/gc/gcwks.cpp
index 574df8215..5c489df0e 100644
--- a/src/Native/gc/gcwks.cpp
+++ b/src/Native/gc/gcwks.cpp
@@ -11,6 +11,7 @@
#include "gc.h"
#include "gcscan.h"
#include "gcdesc.h"
+#include "softwarewritewatch.h"
#ifdef SERVER_GC
#undef SERVER_GC
diff --git a/src/Native/gc/handletablecache.cpp b/src/Native/gc/handletablecache.cpp
index b2af40c82..aaf3370bd 100644
--- a/src/Native/gc/handletablecache.cpp
+++ b/src/Native/gc/handletablecache.cpp
@@ -15,6 +15,12 @@
#include "gcenv.h"
+#ifdef Sleep // TODO(segilles)
+#undef Sleep
+#endif // Sleep
+
+#include "env/gcenv.os.h"
+
#include "handletablepriv.h"
/****************************************************************************
diff --git a/src/Native/gc/handletablecore.cpp b/src/Native/gc/handletablecore.cpp
index be65b142b..5776c26ac 100644
--- a/src/Native/gc/handletablecore.cpp
+++ b/src/Native/gc/handletablecore.cpp
@@ -611,7 +611,7 @@ TableSegment *SegmentAlloc(HandleTable *pTable)
_ASSERTE(HANDLE_SEGMENT_ALIGNMENT >= HANDLE_SEGMENT_SIZE);
_ASSERTE(HANDLE_SEGMENT_ALIGNMENT == 0x10000);
- pSegment = (TableSegment *)GCToOSInterface::VirtualReserve(NULL, HANDLE_SEGMENT_SIZE, HANDLE_SEGMENT_ALIGNMENT, VirtualReserveFlags::None);
+ pSegment = (TableSegment *)GCToOSInterface::VirtualReserve(HANDLE_SEGMENT_SIZE, HANDLE_SEGMENT_ALIGNMENT, VirtualReserveFlags::None);
_ASSERTE(((size_t)pSegment % HANDLE_SEGMENT_ALIGNMENT) == 0);
// bail out if we couldn't get any memory
diff --git a/src/Native/gc/objecthandle.cpp b/src/Native/gc/objecthandle.cpp
index d8834b72f..e8eed9300 100644
--- a/src/Native/gc/objecthandle.cpp
+++ b/src/Native/gc/objecthandle.cpp
@@ -110,6 +110,21 @@ void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtra
}
#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
+
+// Only used by profiling/ETW.
+//----------------------------------------------------------------------------
+
+/*
+ * struct DIAG_DEPSCANINFO
+ *
+ * used when tracing dependent handles for profiling/ETW.
+ */
+struct DIAG_DEPSCANINFO
+{
+ HANDLESCANPROC pfnTrace; // tracing function to use
+ uintptr_t pfnProfilingOrETW;
+};
+
void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
{
WRAPPER_NO_CONTRACT;
@@ -122,14 +137,15 @@ void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pEx
// object should also be non-NULL.
_ASSERTE(*pExtraInfo == NULL || *pObjRef != NULL);
- // lp2 is a HANDLESCANPROC
- HANDLESCANPROC pfnTrace = (HANDLESCANPROC) lp2;
+ struct DIAG_DEPSCANINFO *pInfo = (struct DIAG_DEPSCANINFO*)lp2;
+
+ HANDLESCANPROC pfnTrace = pInfo->pfnTrace;
// is the handle's secondary object non-NULL?
if ((*pObjRef != NULL) && (*pExtraInfo != 0))
{
// yes - call the tracing function for this handle
- pfnTrace(pObjRef, NULL, lp1, *pExtraInfo);
+ pfnTrace(pObjRef, NULL, lp1, (uintptr_t)(pInfo->pfnProfilingOrETW));
}
}
@@ -414,7 +430,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
CONTRACTL_END;
#endif // FEATURE_REDHAWK
UNREFERENCED_PARAMETER(pExtraInfo);
- UNREFERENCED_PARAMETER(lp2);
+ handle_scan_fn fn = (handle_scan_fn)lp2;
LOG((LF_GC | LF_CORPROF, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Notifying profiler of ", pObjRef, "to ", *pObjRef)));
@@ -422,7 +438,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
Object **pRef = (Object **)pObjRef;
// Get a hold of the heap ID that's tacked onto the end of the scancontext struct.
- ProfilingScanContext *pSC = (ProfilingScanContext *)lp1;
+ ScanContext *pSC = (ScanContext *)lp1;
uint32_t rootFlags = 0;
BOOL isDependent = FALSE;
@@ -487,60 +503,15 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt
_UNCHECKED_OBJECTREF pSec = NULL;
-#ifdef GC_PROFILING
- // Give the profiler the objectref.
- if (pSC->fProfilerPinned)
+ if (isDependent)
{
- if (!isDependent)
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackGC());
- g_profControlBlock.pProfInterface->RootReference2(
- (uint8_t *)*pRef,
- kEtwGCRootKindHandle,
- (EtwGCRootFlags)rootFlags,
- pRef,
- &pSC->pHeapId);
- END_PIN_PROFILER();
- }
- else
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
- pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle);
- g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
- (uint8_t*)*pRef,
- (uint8_t*)pSec,
- pRef,
- &pSC->pHeapId);
- END_PIN_PROFILER();
- }
+ pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle);
}
-#endif // GC_PROFILING
-
-#if defined(FEATURE_EVENT_TRACE)
- // Notify ETW of the handle
- if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
- {
- if (isDependent && (pSec == NULL))
- {
- pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle);
- }
-
- ETW::GCLog::RootReference(
- handle,
- *pRef, // object being rooted
- pSec, // pSecondaryNodeForDependentHandle
- isDependent,
- pSC,
- 0, // dwGCFlags,
- rootFlags); // ETW handle flags
- }
-#endif // defined(FEATURE_EVENT_TRACE)
+ fn(pRef, pSec, rootFlags, pSC, isDependent);
}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-
-
/*
* Scan callback for updating pointers.
*
@@ -1417,13 +1388,15 @@ void Ref_ScanDependentHandlesForRelocation(uint32_t condemned, uint32_t maxgen,
/*
loop scan version of TraceVariableHandles for single-thread-managed Ref_* functions
should be kept in sync with the code above
+ Only used by profiling/ETW.
*/
-void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uint32_t condemned, uint32_t maxgen, uint32_t flags)
+void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uintptr_t lp2, uint32_t condemned, uint32_t maxgen, uint32_t flags)
{
WRAPPER_NO_CONTRACT;
// set up to scan variable handles with the specified mask and trace function
uint32_t type = HNDTYPE_DEPENDENT;
+ struct DIAG_DEPSCANINFO info = { pfnTrace, lp2 };
HandleTableMap *walk = &g_HandleTableMap;
while (walk) {
@@ -1436,14 +1409,13 @@ void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1,
HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
if (hTable)
HndScanHandlesForGC(hTable, TraceDependentHandle,
- lp1, (uintptr_t)pfnTrace, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags);
+ lp1, (uintptr_t)&info, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags);
}
}
walk = walk->pNext;
}
}
-
// We scan handle tables by their buckets (ie, AD index). We could get into the situation where
// the AD indices are not very compacted (for example if we have just unloaded ADs and their
// indices haven't been reused yet) and we could be scanning them in an unbalanced fashion.
@@ -1623,7 +1595,7 @@ void Ref_UpdatePointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Re
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
// Please update this if you change the Ref_UpdatePointers function above.
-void Ref_ScanPointersForProfilerAndETW(uint32_t maxgen, uintptr_t lp1)
+void Ref_ScanHandlesForProfilerAndETW(uint32_t maxgen, uintptr_t lp1, handle_scan_fn fn)
{
WRAPPER_NO_CONTRACT;
@@ -1662,16 +1634,16 @@ void Ref_ScanPointersForProfilerAndETW(uint32_t maxgen, uintptr_t lp1)
{
HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
if (hTable)
- HndScanHandlesForGC(hTable, &ScanPointerForProfilerAndETW, lp1, 0, types, _countof(types), maxgen, maxgen, flags);
+ HndScanHandlesForGC(hTable, &ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, types, _countof(types), maxgen, maxgen, flags);
}
walk = walk->pNext;
}
// update pointers in variable handles whose dynamic type is VHT_WEAK_SHORT, VHT_WEAK_LONG or VHT_STRONG
- TraceVariableHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, 0, VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, maxgen, maxgen, flags);
+ TraceVariableHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, maxgen, maxgen, flags);
}
-void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ProfilingScanContext * SC)
+void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ScanContext * SC, handle_scan_fn fn)
{
WRAPPER_NO_CONTRACT;
@@ -1680,12 +1652,7 @@ void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ProfilingScanCon
uint32_t flags = HNDGCF_NORMAL;
uintptr_t lp1 = (uintptr_t)SC;
- // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
- // (-1)), so reset it to NULL
- _ASSERTE((*((size_t *)(&SC->pHeapId)) == (size_t)(-1)) ||
- (*((size_t *)(&SC->pHeapId)) == (size_t)(0)));
- SC->pHeapId = NULL;
- TraceDependentHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, maxgen, maxgen, flags);
+ TraceDependentHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, maxgen, maxgen, flags);
}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
diff --git a/src/Native/gc/objecthandle.h b/src/Native/gc/objecthandle.h
index 89365267d..34c2a0e32 100644
--- a/src/Native/gc/objecthandle.h
+++ b/src/Native/gc/objecthandle.h
@@ -652,7 +652,6 @@ BOOL Ref_ContainHandle(HandleTableBucket *pBucket, OBJECTHANDLE handle);
*/
struct ScanContext;
struct DhContext;
-struct ProfilingScanContext;
void Ref_BeginSynchronousGC (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration);
void Ref_EndSynchronousGC (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration);
@@ -672,10 +671,12 @@ void Ref_ScanSizedRefHandles(uint32_t condemned, uint32_t maxgen, ScanContext* s
void Ref_ScanPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn);
#endif
+typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent);
+
void Ref_CheckReachable (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
void Ref_CheckAlive (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
-void Ref_ScanPointersForProfilerAndETW(uint32_t uMaxGeneration, uintptr_t lp1);
-void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t uMaxGeneration, ProfilingScanContext * SC);
+void Ref_ScanHandlesForProfilerAndETW(uint32_t uMaxGeneration, uintptr_t lp1, handle_scan_fn fn);
+void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t uMaxGeneration, ScanContext * SC, handle_scan_fn fn);
void Ref_AgeHandles (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
void Ref_RejuvenateHandles(uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1);
diff --git a/src/Native/gc/sample/CMakeLists.txt b/src/Native/gc/sample/CMakeLists.txt
index 572fba371..9552cc51e 100644
--- a/src/Native/gc/sample/CMakeLists.txt
+++ b/src/Native/gc/sample/CMakeLists.txt
@@ -22,11 +22,11 @@ set(SOURCES
if(WIN32)
list(APPEND SOURCES
- gcenv.windows.cpp)
+ ../gcenv.windows.cpp)
add_definitions(-DUNICODE=1)
else()
list(APPEND SOURCES
- gcenv.unix.cpp)
+ ../gcenv.unix.cpp)
endif()
_add_executable(gcsample
diff --git a/src/Native/gc/sample/GCSample.cpp b/src/Native/gc/sample/GCSample.cpp
index 7f0dd8cab..112d29142 100644
--- a/src/Native/gc/sample/GCSample.cpp
+++ b/src/Native/gc/sample/GCSample.cpp
@@ -91,17 +91,14 @@ inline void ErectWriteBarrier(Object ** dst, Object * ref)
{
// if the dst is outside of the heap (unboxed value classes) then we
// simply exit
- if (((uint8_t*)dst < g_lowest_address) || ((uint8_t*)dst >= g_highest_address))
+ if (((uint8_t*)dst < g_gc_lowest_address) || ((uint8_t*)dst >= g_gc_highest_address))
return;
- if((uint8_t*)ref >= g_ephemeral_low && (uint8_t*)ref < g_ephemeral_high)
- {
- // volatile is used here to prevent fetch of g_card_table from being reordered
- // with g_lowest/highest_address check above. See comment in code:gc_heap::grow_brick_card_tables.
- uint8_t* pCardByte = (uint8_t *)*(volatile uint8_t **)(&g_card_table) + card_byte((uint8_t *)dst);
- if(*pCardByte != 0xFF)
- *pCardByte = 0xFF;
- }
+ // volatile is used here to prevent fetch of g_card_table from being reordered
+ // with g_lowest/highest_address check above. See comment in code:gc_heap::grow_brick_card_tables.
+ uint8_t* pCardByte = (uint8_t *)*(volatile uint8_t **)(&g_gc_card_table) + card_byte((uint8_t *)dst);
+ if(*pCardByte != 0xFF)
+ *pCardByte = 0xFF;
}
void WriteBarrier(Object ** dst, Object * ref)
diff --git a/src/Native/gc/sample/GCSample.vcxproj b/src/Native/gc/sample/GCSample.vcxproj
index b196e1f34..1716f462e 100644
--- a/src/Native/gc/sample/GCSample.vcxproj
+++ b/src/Native/gc/sample/GCSample.vcxproj
@@ -84,10 +84,12 @@
</ItemGroup>
<ItemGroup>
<ClCompile Include="gcenv.ee.cpp" />
- <ClCompile Include="gcenv.windows.cpp" />
<ClCompile Include="GCSample.cpp" />
<ClCompile Include="..\gccommon.cpp" />
<ClCompile Include="..\gceewks.cpp" />
+ <ClCompile Include="..\gcenv.windows.cpp">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
<ClCompile Include="..\gcscan.cpp" />
<ClCompile Include="..\gcwks.cpp" />
<ClCompile Include="..\handletable.cpp" />
@@ -96,8 +98,7 @@
<ClCompile Include="..\handletablescan.cpp" />
<ClCompile Include="..\objecthandle.cpp" />
<ClCompile Include="..\env\common.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader>Create</PrecompiledHeader>
</ClCompile>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
diff --git a/src/Native/gc/sample/GCSample.vcxproj.filters b/src/Native/gc/sample/GCSample.vcxproj.filters
index e46c05456..f6aacfd0c 100644
--- a/src/Native/gc/sample/GCSample.vcxproj.filters
+++ b/src/Native/gc/sample/GCSample.vcxproj.filters
@@ -59,7 +59,7 @@
<ClCompile Include="gcenv.ee.cpp">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="gcenv.windows.cpp">
+ <ClCompile Include="..\gcenv.windows.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
diff --git a/src/Native/gc/sample/gcenv.ee.cpp b/src/Native/gc/sample/gcenv.ee.cpp
index 25d829e79..ac227b482 100644
--- a/src/Native/gc/sample/gcenv.ee.cpp
+++ b/src/Native/gc/sample/gcenv.ee.cpp
@@ -9,6 +9,12 @@
#include "gcenv.h"
#include "gc.h"
+MethodTable * g_pFreeObjectMethodTable;
+
+int32_t g_TrapReturningThreads;
+
+bool g_fFinalizerRunOnShutDown;
+
EEConfig * g_pConfig;
bool CLREventStatic::CreateManualEventNoThrow(bool bInitialState)
@@ -221,6 +227,38 @@ Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunction threa
return NULL;
}
+void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
+{
+}
+
+void GCToEEInterface::DiagUpdateGenerationBounds()
+{
+}
+
+void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
+{
+}
+
+void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
+{
+}
+
+void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
+{
+}
+
+void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
+{
+}
+
+void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
+{
+}
+
+void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
+{
+}
+
void FinalizerThread::EnableFinalization()
{
// Signal to finalizer thread that there are objects to finalize
@@ -238,14 +276,6 @@ bool IsGCSpecialThread()
return false;
}
-void StompWriteBarrierEphemeral(bool /* isRuntimeSuspended */)
-{
-}
-
-void StompWriteBarrierResize(bool /* isRuntimeSuspended */, bool /*bReqUpperBoundsCheck*/)
-{
-}
-
bool IsGCThread()
{
return false;
diff --git a/src/Native/gc/softwarewritewatch.cpp b/src/Native/gc/softwarewritewatch.cpp
index 519744900..b85293857 100644
--- a/src/Native/gc/softwarewritewatch.cpp
+++ b/src/Native/gc/softwarewritewatch.cpp
@@ -3,9 +3,9 @@
// See the LICENSE file in the project root for more information.
#include "common.h"
-#include "softwarewritewatch.h"
-
#include "gcenv.h"
+#include "env/gcenv.os.h"
+#include "softwarewritewatch.h"
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
#ifndef DACCESS_COMPILE
@@ -14,8 +14,8 @@ static_assert((static_cast<size_t>(1) << SOFTWARE_WRITE_WATCH_AddressToTableByte
extern "C"
{
- uint8_t *g_sw_ww_table = nullptr;
- bool g_sw_ww_enabled_for_gc_heap = false;
+ uint8_t *g_gc_sw_ww_table = nullptr;
+ bool g_gc_sw_ww_enabled_for_gc_heap = false;
}
void SoftwareWriteWatch::StaticClose()
@@ -25,8 +25,8 @@ void SoftwareWriteWatch::StaticClose()
return;
}
- g_sw_ww_enabled_for_gc_heap = false;
- g_sw_ww_table = nullptr;
+ g_gc_sw_ww_enabled_for_gc_heap = false;
+ g_gc_sw_ww_table = nullptr;
}
bool SoftwareWriteWatch::GetDirtyFromBlock(
diff --git a/src/Native/gc/softwarewritewatch.h b/src/Native/gc/softwarewritewatch.h
index 3c8491cec..0e6e6c819 100644
--- a/src/Native/gc/softwarewritewatch.h
+++ b/src/Native/gc/softwarewritewatch.h
@@ -5,25 +5,20 @@
#ifndef __SOFTWARE_WRITE_WATCH_H__
#define __SOFTWARE_WRITE_WATCH_H__
+#include "gcinterface.h"
+#include "gc.h"
+
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
#ifndef DACCESS_COMPILE
-extern void SwitchToWriteWatchBarrier(bool isRuntimeSuspended);
-extern void SwitchToNonWriteWatchBarrier(bool isRuntimeSuspended);
-
-#define SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift 0xc
-
extern "C"
{
// Table containing the dirty state. This table is translated to exclude the lowest address it represents, see
// TranslateTableToExcludeHeapStartAddress.
- extern uint8_t *g_sw_ww_table;
+ extern uint8_t *g_gc_sw_ww_table;
// Write watch may be disabled when it is not needed (between GCs for instance). This indicates whether it is enabled.
- extern bool g_sw_ww_enabled_for_gc_heap;
-
- extern uint8_t *g_lowest_address; // start address of the GC heap
- extern uint8_t *g_highest_address; // end address of the GC heap
+ extern bool g_gc_sw_ww_enabled_for_gc_heap;
}
class SoftwareWriteWatch
@@ -116,7 +111,7 @@ inline void SoftwareWriteWatch::VerifyMemoryRegion(
inline uint8_t *SoftwareWriteWatch::GetTable()
{
- return g_sw_ww_table;
+ return g_gc_sw_ww_table;
}
inline uint8_t *SoftwareWriteWatch::GetUntranslatedTable()
@@ -163,7 +158,7 @@ inline void SoftwareWriteWatch::SetUntranslatedTable(uint8_t *untranslatedTable,
assert(ALIGN_DOWN(untranslatedTable, sizeof(size_t)) == untranslatedTable);
assert(heapStartAddress != nullptr);
- g_sw_ww_table = TranslateTableToExcludeHeapStartAddress(untranslatedTable, heapStartAddress);
+ g_gc_sw_ww_table = TranslateTableToExcludeHeapStartAddress(untranslatedTable, heapStartAddress);
}
inline void SoftwareWriteWatch::SetResizedUntranslatedTable(
@@ -194,7 +189,7 @@ inline void SoftwareWriteWatch::SetResizedUntranslatedTable(
inline bool SoftwareWriteWatch::IsEnabledForGCHeap()
{
- return g_sw_ww_enabled_for_gc_heap;
+ return g_gc_sw_ww_enabled_for_gc_heap;
}
inline void SoftwareWriteWatch::EnableForGCHeap()
@@ -204,9 +199,13 @@ inline void SoftwareWriteWatch::EnableForGCHeap()
VerifyCreated();
assert(!IsEnabledForGCHeap());
+ g_gc_sw_ww_enabled_for_gc_heap = true;
- g_sw_ww_enabled_for_gc_heap = true;
- SwitchToWriteWatchBarrier(true);
+ WriteBarrierParameters args = {};
+ args.operation = WriteBarrierOp::SwitchToWriteWatch;
+ args.write_watch_table = g_gc_sw_ww_table;
+ args.is_runtime_suspended = true;
+ GCToEEInterface::StompWriteBarrier(&args);
}
inline void SoftwareWriteWatch::DisableForGCHeap()
@@ -216,19 +215,22 @@ inline void SoftwareWriteWatch::DisableForGCHeap()
VerifyCreated();
assert(IsEnabledForGCHeap());
+ g_gc_sw_ww_enabled_for_gc_heap = false;
- g_sw_ww_enabled_for_gc_heap = false;
- SwitchToNonWriteWatchBarrier(true);
+ WriteBarrierParameters args = {};
+ args.operation = WriteBarrierOp::SwitchToNonWriteWatch;
+ args.is_runtime_suspended = true;
+ GCToEEInterface::StompWriteBarrier(&args);
}
inline void *SoftwareWriteWatch::GetHeapStartAddress()
{
- return g_lowest_address;
+ return g_gc_lowest_address;
}
inline void *SoftwareWriteWatch::GetHeapEndAddress()
{
- return g_highest_address;
+ return g_gc_highest_address;
}
inline size_t SoftwareWriteWatch::GetTableByteIndex(void *address)