Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/corert.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Vorlicek <janvorli@microsoft.com>2016-01-13 00:30:29 +0300
committerJan Vorlicek <janvorli@microsoft.com>2016-01-13 00:30:29 +0300
commit92d9aee739a7bb5b78cb1efed9fd0ca64543559d (patch)
tree212743a8849a4c3c9fe5a091ec9468b4d11b4970 /src/Native
parentaaa49d2cc26901291bd68d4c840fff06c463af2e (diff)
parent9e0eb8b241fc53cc0d1969f78985ec6d31e42820 (diff)
Merge pull request #564 from janvorli/gc-os-interface
GC to OS interface refactoring
Diffstat (limited to 'src/Native')
-rw-r--r--src/Native/Runtime/Crst.cpp17
-rw-r--r--src/Native/Runtime/Crst.h18
-rw-r--r--src/Native/Runtime/PalRedhawk.h6
-rw-r--r--src/Native/Runtime/PalRedhawkFunctions.h6
-rw-r--r--src/Native/Runtime/gcenv.h13
-rw-r--r--src/Native/Runtime/gcrhenv.cpp95
-rw-r--r--src/Native/Runtime/unix/PalRedhawkInline.h4
-rw-r--r--src/Native/Runtime/unix/PalRedhawkUnix.cpp833
-rw-r--r--src/Native/Runtime/windows/PalRedhawkMinWin.cpp510
-rw-r--r--src/Native/gc/env/common.h2
-rw-r--r--src/Native/gc/env/gcenv.base.h392
-rw-r--r--src/Native/gc/env/gcenv.ee.h85
-rw-r--r--src/Native/gc/env/gcenv.interlocked.h102
-rw-r--r--src/Native/gc/env/gcenv.interlocked.inl200
-rw-r--r--src/Native/gc/env/gcenv.object.h4
-rw-r--r--src/Native/gc/env/gcenv.os.h274
-rw-r--r--src/Native/gc/env/gcenv.structs.h70
-rw-r--r--src/Native/gc/env/gcenv.sync.h31
-rw-r--r--src/Native/gc/env/gcenv.windows.cpp227
-rw-r--r--src/Native/gc/gc.cpp935
-rw-r--r--src/Native/gc/gc.h4
-rw-r--r--src/Native/gc/gcee.cpp18
-rw-r--r--src/Native/gc/gcpriv.h111
-rw-r--r--src/Native/gc/gcscan.cpp44
-rw-r--r--src/Native/gc/gcscan.h9
-rw-r--r--src/Native/gc/handletable.cpp2
-rw-r--r--src/Native/gc/handletable.inl4
-rw-r--r--src/Native/gc/handletablecache.cpp26
-rw-r--r--src/Native/gc/handletablecore.cpp12
-rw-r--r--src/Native/gc/objecthandle.cpp8
-rw-r--r--src/Native/gc/sample/CMakeLists.txt41
-rw-r--r--src/Native/gc/sample/GCSample.cpp9
-rw-r--r--src/Native/gc/sample/GCSample.vcxproj8
-rw-r--r--src/Native/gc/sample/GCSample.vcxproj.filters6
-rw-r--r--src/Native/gc/sample/gcenv.ee.cpp (renamed from src/Native/gc/sample/gcenv.cpp)25
-rw-r--r--src/Native/gc/sample/gcenv.h10
-rw-r--r--src/Native/gc/sample/gcenv.unix.cpp (renamed from src/Native/gc/env/gcenv.unix.cpp)161
-rw-r--r--src/Native/gc/sample/gcenv.windows.cpp455
38 files changed, 2797 insertions, 1980 deletions
diff --git a/src/Native/Runtime/Crst.cpp b/src/Native/Runtime/Crst.cpp
index 6593769f4..8b30def85 100644
--- a/src/Native/Runtime/Crst.cpp
+++ b/src/Native/Runtime/Crst.cpp
@@ -10,20 +10,13 @@
#include "holder.h"
#include "Crst.h"
-#ifndef DACCESS_COMPILE
-bool EEThreadId::IsSameThread()
-{
- return PalGetCurrentThreadId() == m_uiId;
-}
-#endif // DACCESS_COMPILE
-
void CrstStatic::Init(CrstType eType, CrstFlags eFlags)
{
UNREFERENCED_PARAMETER(eType);
UNREFERENCED_PARAMETER(eFlags);
#ifndef DACCESS_COMPILE
#if defined(_DEBUG)
- m_uiOwnerId = UNOWNED;
+ m_uiOwnerId.Clear();
#endif // _DEBUG
PalInitializeCriticalSectionEx(&m_sCritSec, 0, 0);
#endif // !DACCESS_COMPILE
@@ -42,7 +35,7 @@ void CrstStatic::Enter(CrstStatic *pCrst)
#ifndef DACCESS_COMPILE
PalEnterCriticalSection(&pCrst->m_sCritSec);
#if defined(_DEBUG)
- pCrst->m_uiOwnerId = PalGetCurrentThreadId();
+ pCrst->m_uiOwnerId.SetToCurrentThread();
#endif // _DEBUG
#else
UNREFERENCED_PARAMETER(pCrst);
@@ -54,7 +47,7 @@ void CrstStatic::Leave(CrstStatic *pCrst)
{
#ifndef DACCESS_COMPILE
#if defined(_DEBUG)
- pCrst->m_uiOwnerId = UNOWNED;
+ pCrst->m_uiOwnerId.Clear();
#endif // _DEBUG
PalLeaveCriticalSection(&pCrst->m_sCritSec);
#else
@@ -66,7 +59,7 @@ void CrstStatic::Leave(CrstStatic *pCrst)
bool CrstStatic::OwnedByCurrentThread()
{
#ifndef DACCESS_COMPILE
- return m_uiOwnerId == PalGetCurrentThreadId();
+ return m_uiOwnerId.IsCurrentThread();
#else
return false;
#endif
@@ -74,6 +67,6 @@ bool CrstStatic::OwnedByCurrentThread()
EEThreadId CrstStatic::GetHolderThreadId()
{
- return EEThreadId(m_uiOwnerId);
+ return m_uiOwnerId;
}
#endif // _DEBUG
diff --git a/src/Native/Runtime/Crst.h b/src/Native/Runtime/Crst.h
index c267a3c0c..d087f9cc4 100644
--- a/src/Native/Runtime/Crst.h
+++ b/src/Native/Runtime/Crst.h
@@ -3,21 +3,6 @@
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
//
-// Abstracted thread ID. This doesn't really belong in this file, but there is not currently any better place
-// for it.
-class EEThreadId
-{
-public:
- EEThreadId(uint32_t uiId) : m_uiId(uiId) {}
-#ifndef DACCESS_COMPILE
- bool IsSameThread();
-#endif
-
-private:
- uint32_t m_uiId;
-};
-
-
//
// -----------------------------------------------------------------------------------------------------------
//
@@ -67,8 +52,7 @@ public:
private:
CRITICAL_SECTION m_sCritSec;
#if defined(_DEBUG)
- uint32_t m_uiOwnerId;
- static const uint32_t UNOWNED = 0;
+ EEThreadId m_uiOwnerId;
#endif // _DEBUG
};
diff --git a/src/Native/Runtime/PalRedhawk.h b/src/Native/Runtime/PalRedhawk.h
index 027491a7e..d27e60c18 100644
--- a/src/Native/Runtime/PalRedhawk.h
+++ b/src/Native/Runtime/PalRedhawk.h
@@ -651,6 +651,7 @@ extern GCSystemInfo g_SystemInfo;
#define REDHAWK_PALAPI __stdcall
#endif // GCENV_INCLUDED
+bool InitializeSystemInfo();
#ifndef DACCESS_COMPILE
@@ -773,18 +774,13 @@ REDHAWK_PALIMPORT int __cdecl PalVSprintf(_Out_writes_z_(cchBuffer) char * szBuf
#define ALLOW_CONSTANT_EXPR_BEGIN __pragma(warning(push)) __pragma(warning(disable:4127))
#define ALLOW_CONSTANT_EXPR_END __pragma(warning(pop))
-struct GCMemoryStatus;
-REDHAWK_PALIMPORT UInt32_BOOL REDHAWK_PALAPI PalGlobalMemoryStatusEx(_Out_ GCMemoryStatus* pBuffer);
REDHAWK_PALIMPORT _Ret_maybenull_ _Post_writable_byte_size_(size) void* REDHAWK_PALAPI PalVirtualAlloc(_In_opt_ void* pAddress, UIntNative size, UInt32 allocationType, UInt32 protect);
REDHAWK_PALIMPORT UInt32_BOOL REDHAWK_PALAPI PalVirtualFree(_In_ void* pAddress, UIntNative size, UInt32 freeType);
REDHAWK_PALIMPORT void REDHAWK_PALAPI PalSleep(UInt32 milliseconds);
REDHAWK_PALIMPORT UInt32_BOOL REDHAWK_PALAPI PalSwitchToThread();
-REDHAWK_PALIMPORT HANDLE REDHAWK_PALAPI PalCreateMutexW(_In_opt_ LPSECURITY_ATTRIBUTES pMutexAttributes, UInt32_BOOL initialOwner, _In_opt_z_ LPCWSTR pName);
REDHAWK_PALIMPORT HANDLE REDHAWK_PALAPI PalCreateEventW(_In_opt_ LPSECURITY_ATTRIBUTES pEventAttributes, UInt32_BOOL manualReset, UInt32_BOOL initialState, _In_opt_z_ LPCWSTR pName);
REDHAWK_PALIMPORT UInt32 REDHAWK_PALAPI PalGetTickCount();
REDHAWK_PALIMPORT HANDLE REDHAWK_PALAPI PalCreateFileW(_In_z_ LPCWSTR pFileName, uint32_t desiredAccess, uint32_t shareMode, _In_opt_ void* pSecurityAttributes, uint32_t creationDisposition, uint32_t flagsAndAttributes, HANDLE hTemplateFile);
-REDHAWK_PALIMPORT UInt32 REDHAWK_PALAPI PalGetWriteWatch(UInt32 flags, _In_ void* pBaseAddress, UIntNative regionSize, _Out_writes_to_opt_(*pCount, *pCount) void** pAddresses, _Inout_opt_ UIntNative* pCount, _Inout_opt_ UInt32* pGranularity);
-REDHAWK_PALIMPORT UInt32 REDHAWK_PALAPI PalResetWriteWatch(_In_ void* pBaseAddress, UIntNative regionSize);
REDHAWK_PALIMPORT HANDLE REDHAWK_PALAPI PalCreateLowMemoryNotification();
REDHAWK_PALIMPORT void REDHAWK_PALAPI PalTerminateCurrentProcess(UInt32 exitCode);
REDHAWK_PALIMPORT HANDLE REDHAWK_PALAPI PalGetModuleHandleFromPointer(_In_ void* pointer);
diff --git a/src/Native/Runtime/PalRedhawkFunctions.h b/src/Native/Runtime/PalRedhawkFunctions.h
index e2603ea1b..d1a062456 100644
--- a/src/Native/Runtime/PalRedhawkFunctions.h
+++ b/src/Native/Runtime/PalRedhawkFunctions.h
@@ -196,12 +196,6 @@ inline UInt32_BOOL PalSetEvent(HANDLE arg1)
return SetEvent(arg1);
}
-extern "C" UInt32_BOOL __stdcall SetFilePointerEx(HANDLE, LARGE_INTEGER, LARGE_INTEGER *, UInt32);
-inline UInt32_BOOL PalSetFilePointerEx(HANDLE arg1, LARGE_INTEGER arg2, LARGE_INTEGER * arg3, UInt32 arg4)
-{
- return SetFilePointerEx(arg1, arg2, arg3, arg4);
-}
-
extern "C" void __stdcall TerminateProcess(HANDLE, UInt32);
inline void PalTerminateProcess(HANDLE arg1, UInt32 arg2)
{
diff --git a/src/Native/Runtime/gcenv.h b/src/Native/Runtime/gcenv.h
index 4bb61e3f9..5ab29aa21 100644
--- a/src/Native/Runtime/gcenv.h
+++ b/src/Native/Runtime/gcenv.h
@@ -9,9 +9,15 @@
#pragma warning( disable: 4127 ) // conditional expression is constant -- common in GC
#endif
+typedef wchar_t WCHAR;
+#define W(str) L##str
+
#include "sal.h"
#include "gcenv.structs.h"
+#include "gcenv.os.h"
+#include "gcenv.interlocked.h"
#include "gcenv.base.h"
+#include "gcenv.ee.h"
#include "Crst.h"
#include "event.h"
@@ -25,6 +31,7 @@
#include "PalRedhawkCommon.h"
#include "PalRedhawk.h"
#include "gcrhinterface.h"
+#include "gcenv.interlocked.inl"
#ifdef FEATURE_ETW
@@ -53,6 +60,12 @@
#endif // FEATURE_ETW
+#define MAX_LONGPATH 1024
+
+#ifndef YieldProcessor
+#define YieldProcessor PalYieldProcessor
+#endif
+
// Adapter for GC's view of Array
class ArrayBase : Array
{
diff --git a/src/Native/Runtime/gcrhenv.cpp b/src/Native/Runtime/gcrhenv.cpp
index bf5c15a8e..0748c874c 100644
--- a/src/Native/Runtime/gcrhenv.cpp
+++ b/src/Native/Runtime/gcrhenv.cpp
@@ -169,7 +169,10 @@ bool RedhawkGCInterface::InitializeSubsystems(GCType gcType)
MICROSOFT_WINDOWS_REDHAWK_GC_PUBLIC_PROVIDER_Context.RegistrationHandle = Microsoft_Windows_Redhawk_GC_PublicHandle;
#endif // FEATURE_ETW
- InitializeSystemInfo();
+ if (!InitializeSystemInfo())
+ {
+ return false;
+ }
// Initialize the special EEType used to mark free list entries in the GC heap.
g_FreeObjectEEType.InitializeAsGcFreeType();
@@ -479,7 +482,7 @@ void RedhawkGCInterface::ScanHeap(GcScanObjectFunction pfnScanCallback, void *pC
// Carefully attempt to set the global callback function (careful in that we won't overwrite another scan
// that's being scheduled or in-progress). If someone beat us to it back off and wait for the
// corresponding GC to complete.
- while (FastInterlockCompareExchangePointer(&g_pfnHeapScan, pfnScanCallback, NULL) != NULL)
+ while (Interlocked::CompareExchangePointer(&g_pfnHeapScan, pfnScanCallback, NULL) != NULL)
{
// Wait in pre-emptive mode to avoid stalling another thread that's attempting a collection.
Thread * pCurThread = GetThread();
@@ -509,7 +512,7 @@ void RedhawkGCInterface::ScanHeap(GcScanObjectFunction pfnScanCallback, void *pC
// Release our hold on the global scanning pointers.
g_pvHeapScanContext = NULL;
- FastInterlockExchangePointer(&g_pfnHeapScan, NULL);
+ Interlocked::ExchangePointer(&g_pfnHeapScan, NULL);
#else
UNREFERENCED_PARAMETER(pfnScanCallback);
UNREFERENCED_PARAMETER(pContext);
@@ -992,12 +995,12 @@ bool StartFinalizerThread()
//
static volatile Int32 fFinalizerThreadCreated;
- if (FastInterlockExchange(&fFinalizerThreadCreated, 1) != 1)
+ if (Interlocked::Exchange(&fFinalizerThreadCreated, 1) != 1)
{
if (!PalStartFinalizerThread(FinalizerStart, (void*)FinalizerThread::GetFinalizerEvent()))
{
// Need to try again another time...
- FastInterlockExchange(&fFinalizerThreadCreated, 0);
+ Interlocked::Exchange(&fFinalizerThreadCreated, 0);
}
}
@@ -1095,9 +1098,9 @@ bool FinalizerThread::WatchDog()
// Wait for any outstanding finalization run to complete. Time this initial operation so that it forms
// part of the overall timeout budget.
- DWORD dwStartTime = GetTickCount();
+ DWORD dwStartTime = PalGetTickCount();
Wait(dwTimeout);
- DWORD dwEndTime = GetTickCount();
+ DWORD dwEndTime = PalGetTickCount();
// In the exceedingly rare case that the tick count wrapped then we'll just reset the timeout to its
// initial value. Otherwise we'll subtract the time we waited from the timeout budget (being mindful
@@ -1156,60 +1159,14 @@ void FinalizerThread::Wait(DWORD timeout, bool allowReentrantWait)
#endif // FEATURE_PREMORTEM_FINALIZATION
#ifndef DACCESS_COMPILE
-void GetProcessMemoryLoad(GCMemoryStatus* pGCMemStatus)
-{
- // @TODO: no way to communicate failure
- PalGlobalMemoryStatusEx(pGCMemStatus);
-}
bool __SwitchToThread(uint32_t /*dwSleepMSec*/, uint32_t /*dwSwitchCount*/)
{
return !!PalSwitchToThread();
}
-void * ClrVirtualAlloc(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect)
-{
- return PalVirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
-}
-
-void * ClrVirtualAllocAligned(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect,
- size_t /*dwAlignment*/)
-{
- return PalVirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
-}
-
-bool ClrVirtualFree(
- void * lpAddress,
- size_t dwSize,
- uint32_t dwFreeType)
-{
- return !!PalVirtualFree(lpAddress, dwSize, dwFreeType);
-}
#endif // DACCESS_COMPILE
-bool
-ClrVirtualProtect(
- void * lpAddress,
- size_t dwSize,
- uint32_t flNewProtect,
- uint32_t * lpflOldProtect)
-{
- UNREFERENCED_PARAMETER(lpAddress);
- UNREFERENCED_PARAMETER(dwSize);
- UNREFERENCED_PARAMETER(flNewProtect);
- UNREFERENCED_PARAMETER(lpflOldProtect);
- ASSERT(!"ClrVirtualProtect");
- return false;
-}
-
MethodTable * g_pFreeObjectMethodTable;
int32_t g_TrapReturningThreads;
bool g_fFinalizerRunOnShutDown;
@@ -1230,38 +1187,6 @@ VOID LogSpewAlways(const char * /*fmt*/, ...)
{
}
-CLR_MUTEX_COOKIE ClrCreateMutex(CLR_MUTEX_ATTRIBUTES lpMutexAttributes, bool bInitialOwner, LPCWSTR lpName)
-{
- UNREFERENCED_PARAMETER(lpMutexAttributes);
- UNREFERENCED_PARAMETER(bInitialOwner);
- UNREFERENCED_PARAMETER(lpName);
- ASSERT(!"ClrCreateMutex");
- return NULL;
-}
-
-void ClrCloseMutex(CLR_MUTEX_COOKIE mutex)
-{
- UNREFERENCED_PARAMETER(mutex);
- ASSERT(!"ClrCloseMutex");
-}
-
-bool ClrReleaseMutex(CLR_MUTEX_COOKIE mutex)
-{
- UNREFERENCED_PARAMETER(mutex);
- ASSERT(!"ClrReleaseMutex");
- return true;
-}
-
-uint32_t ClrWaitForMutex(CLR_MUTEX_COOKIE mutex, uint32_t dwMilliseconds, bool bAlertable)
-{
- UNREFERENCED_PARAMETER(mutex);
- UNREFERENCED_PARAMETER(dwMilliseconds);
- UNREFERENCED_PARAMETER(bAlertable);
- ASSERT(!"ClrWaitForMutex");
- return WAIT_OBJECT_0;
-}
-
-
uint32_t CLRConfig::GetConfigValue(ConfigDWORDInfo eType)
{
switch (eType)
diff --git a/src/Native/Runtime/unix/PalRedhawkInline.h b/src/Native/Runtime/unix/PalRedhawkInline.h
index 5d9419841..a09fac876 100644
--- a/src/Native/Runtime/unix/PalRedhawkInline.h
+++ b/src/Native/Runtime/unix/PalRedhawkInline.h
@@ -17,12 +17,12 @@ FORCEINLINE Int32 PalInterlockedDecrement(_Inout_ _Interlocked_operand_ Int32 vo
FORCEINLINE UInt32 PalInterlockedOr(_Inout_ _Interlocked_operand_ UInt32 volatile *pDst, UInt32 iValue)
{
- return __sync_fetch_and_or(pDst, iValue);
+ return __sync_or_and_fetch(pDst, iValue);
}
FORCEINLINE UInt32 PalInterlockedAnd(_Inout_ _Interlocked_operand_ UInt32 volatile *pDst, UInt32 iValue)
{
- return __sync_fetch_and_and(pDst, iValue);
+ return __sync_and_and_fetch(pDst, iValue);
}
FORCEINLINE Int32 PalInterlockedExchange(_Inout_ _Interlocked_operand_ Int32 volatile *pDst, Int32 iValue)
diff --git a/src/Native/Runtime/unix/PalRedhawkUnix.cpp b/src/Native/Runtime/unix/PalRedhawkUnix.cpp
index b1c35c715..9a6743158 100644
--- a/src/Native/Runtime/unix/PalRedhawkUnix.cpp
+++ b/src/Native/Runtime/unix/PalRedhawkUnix.cpp
@@ -4,7 +4,7 @@
//
//
-// Implementation of the Redhawk Platform Abstraction Layer (PAL) library when Unix is the platform.
+// Implementation of the Redhawk Platform Abstraction Layer (PAL) library when Unix is the platform.
//
#include <stdio.h>
@@ -16,11 +16,14 @@
#include <sal.h>
#include "config.h"
#include "UnixHandle.h"
+#include <pthread.h>
+#include "gcenv.structs.h"
+#include "gcenv.os.h"
+#include "holder.h"
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
-#include <pthread.h>
#include <sys/types.h>
#include <iconv.h>
#include <dlfcn.h>
@@ -95,29 +98,11 @@ typedef Int32 (__stdcall *PVECTORED_EXCEPTION_HANDLER)(
);
#define PAGE_NOACCESS 0x01
-#define PAGE_READONLY 0x02
#define PAGE_READWRITE 0x04
-#define PAGE_WRITECOPY 0x08
-#define PAGE_EXECUTE 0x10
-#define PAGE_EXECUTE_READ 0x20
-#define PAGE_EXECUTE_READWRITE 0x40
-#define PAGE_EXECUTE_WRITECOPY 0x80
-#define PAGE_GUARD 0x100
-#define PAGE_NOCACHE 0x200
-#define PAGE_WRITECOMBINE 0x400
#define MEM_COMMIT 0x1000
#define MEM_RESERVE 0x2000
#define MEM_DECOMMIT 0x4000
#define MEM_RELEASE 0x8000
-#define MEM_FREE 0x10000
-#define MEM_PRIVATE 0x20000
-#define MEM_MAPPED 0x40000
-#define MEM_RESET 0x80000
-#define MEM_TOP_DOWN 0x100000
-#define MEM_WRITE_WATCH 0x200000
-#define MEM_PHYSICAL 0x400000
-#define MEM_LARGE_PAGES 0x20000000
-#define MEM_4MB_PAGES 0x80000000
#define WAIT_OBJECT_0 0
#define WAIT_TIMEOUT 258
@@ -277,41 +262,8 @@ public:
}
};
-class UnixMutex
-{
- pthread_mutex_t m_mutex;
-
-public:
-
- UnixMutex()
- {
- int st = pthread_mutex_init(&m_mutex, NULL);
- ASSERT(st == NULL);
- }
-
- ~UnixMutex()
- {
- int st = pthread_mutex_destroy(&m_mutex);
- ASSERT(st == NULL);
- }
-
- bool Release()
- {
- return pthread_mutex_unlock(&m_mutex) == 0;
- }
-
- uint32_t Wait(uint32_t milliseconds)
- {
- // TODO: implement timed wait if needed
- ASSERT(milliseconds == INFINITE);
- int st = pthread_mutex_lock(&m_mutex);
- return (st == 0) ? WAIT_OBJECT_0 : WAIT_FAILED;
- }
-};
-
typedef UnixHandle<UnixHandleType::Event, UnixEvent> EventUnixHandle;
typedef UnixHandle<UnixHandleType::Thread, pthread_t> ThreadUnixHandle;
-typedef UnixHandle<UnixHandleType::Mutex, UnixMutex> MutexUnixHandle;
// The Redhawk PAL must be initialized before any of its exports can be called. Returns true for a successful
// initialization and false on failure.
@@ -342,7 +294,7 @@ REDHAWK_PALEXPORT bool REDHAWK_PALAPI PalHasCapability(PalCapability capability)
static const char* const WCharEncoding = "UTF-32LE";
-int UTF8ToWideChar(char* bytes, int len, wchar_t* buffer, int bufLen)
+int UTF8ToWideChar(const char* bytes, int len, wchar_t* buffer, int bufLen)
{
iconv_t cd = iconv_open(WCharEncoding, "UTF-8");
if (cd == (iconv_t)-1)
@@ -351,7 +303,7 @@ int UTF8ToWideChar(char* bytes, int len, wchar_t* buffer, int bufLen)
return 0;
}
- char* inbuf = bytes;
+ char* inbuf = (char*)bytes;
char* outbuf = (char*)buffer;
size_t inbufbytesleft = len;
size_t outbufbytesleft = bufLen;
@@ -368,7 +320,7 @@ int UTF8ToWideChar(char* bytes, int len, wchar_t* buffer, int bufLen)
return (bufLen - outbufbytesleft) / sizeof(wchar_t);
}
-int WideCharToUTF8(wchar_t* chars, int len, char* buffer, int bufLen)
+int WideCharToUTF8(const wchar_t* chars, int len, char* buffer, int bufLen)
{
iconv_t cd = iconv_open("UTF-8", WCharEncoding);
if (cd == (iconv_t)-1)
@@ -402,7 +354,7 @@ REDHAWK_PALEXPORT unsigned int REDHAWK_PALAPI PalGetCurrentProcessorNumber()
return (unsigned int)processorNumber;
#else
- // TODO: implement for OSX / FreeBSD
+ // UNIXTODO: implement for OSX / FreeBSD
return 0;
#endif
}
@@ -412,90 +364,6 @@ REDHAWK_PALEXPORT UInt32_BOOL REDHAWK_PALAPI PalAllocateThunksFromTemplate(HANDL
PORTABILITY_ASSERT("UNIXTODO: Implement this function");
}
-REDHAWK_PALEXPORT UInt32_BOOL REDHAWK_PALAPI PalGlobalMemoryStatusEx(_Inout_ GCMemoryStatus* pBuffer)
-{
- pBuffer->dwMemoryLoad = 0;
- pBuffer->ullTotalPhys = 0;
- pBuffer->ullAvailPhys = 0;
- pBuffer->ullTotalPageFile = 0;
- pBuffer->ullAvailPageFile = 0;
- pBuffer->ullTotalVirtual = 0;
- pBuffer->ullAvailVirtual = 0;
-
- UInt32_BOOL fRetVal = UInt32_FALSE;
-
- // Get the physical memory size
-#if HAVE_SYSCONF && HAVE__SC_PHYS_PAGES
- int64_t physical_memory;
-
- // Get the Physical memory size
- physical_memory = sysconf( _SC_PHYS_PAGES ) * sysconf( _SC_PAGE_SIZE );
- pBuffer->ullTotalPhys = (uint64_t)physical_memory;
- fRetVal = UInt32_TRUE;
-#elif HAVE_SYSCTL
- int mib[2];
- int64_t physical_memory;
- size_t length;
-
- // Get the Physical memory size
- mib[0] = CTL_HW;
- mib[1] = HW_MEMSIZE;
- length = sizeof(int64_t);
- int rc = sysctl(mib, 2, &physical_memory, &length, NULL, 0);
- if (rc != 0)
- {
- ASSERT_UNCONDITIONALLY("sysctl failed for HW_MEMSIZE\n");
- }
- else
- {
- pBuffer->ullTotalPhys = (uint64_t)physical_memory;
- fRetVal = UInt32_TRUE;
- }
-#elif // HAVE_SYSINFO
- // TODO: implement getting memory details via sysinfo. On Linux, it provides swap file details that
- // we can use to fill in the xxxPageFile members.
-
-#endif // HAVE_SYSCONF
-
- // Get the physical memory in use - from it, we can get the physical memory available.
- // We do this only when we have the total physical memory available.
- if (pBuffer->ullTotalPhys > 0)
- {
-#ifndef __APPLE__
- pBuffer->ullAvailPhys = sysconf(SYSCONF_PAGES) * sysconf(_SC_PAGE_SIZE);
- int64_t used_memory = pBuffer->ullTotalPhys - pBuffer->ullAvailPhys;
- pBuffer->dwMemoryLoad = (uint32_t)((used_memory * 100) / pBuffer->ullTotalPhys);
-#else
- vm_size_t page_size;
- mach_port_t mach_port;
- mach_msg_type_number_t count;
- vm_statistics_data_t vm_stats;
- mach_port = mach_host_self();
- count = sizeof(vm_stats) / sizeof(natural_t);
- if (KERN_SUCCESS == host_page_size(mach_port, &page_size))
- {
- if (KERN_SUCCESS == host_statistics(mach_port, HOST_VM_INFO, (host_info_t)&vm_stats, &count))
- {
- pBuffer->ullAvailPhys = (int64_t)vm_stats.free_count * (int64_t)page_size;
- int64_t used_memory = ((int64_t)vm_stats.active_count + (int64_t)vm_stats.inactive_count + (int64_t)vm_stats.wire_count) * (int64_t)page_size;
- pBuffer->dwMemoryLoad = (uint32_t)((used_memory * 100) / pBuffer->ullTotalPhys);
- }
- }
- mach_port_deallocate(mach_task_self(), mach_port);
-#endif // __APPLE__
- }
-
- // There is no API to get the total virtual address space size on
- // Unix, so we use a constant value representing 128TB, which is
- // the approximate size of total user virtual address space on
- // the currently supported Unix systems.
- static const uint64_t _128TB = (1ull << 47);
- pBuffer->ullTotalVirtual = _128TB;
- pBuffer->ullAvailVirtual = pBuffer->ullAvailPhys;
-
- return fRetVal;
-}
-
REDHAWK_PALEXPORT void REDHAWK_PALAPI PalSleep(uint32_t milliseconds)
{
#if HAVE_CLOCK_MONOTONIC
@@ -520,7 +388,7 @@ REDHAWK_PALEXPORT void REDHAWK_PALAPI PalSleep(uint32_t milliseconds)
REDHAWK_PALEXPORT UInt32_BOOL REDHAWK_PALAPI __stdcall PalSwitchToThread()
{
- // sched_yield yields to another thread in the current process. This implementation
+ // sched_yield yields to another thread in the current process. This implementation
// won't work well for cross-process synchronization.
return sched_yield() == 0;
}
@@ -534,24 +402,10 @@ extern "C" UInt32_BOOL CloseHandle(HANDLE handle)
return UInt32_TRUE;
}
-REDHAWK_PALEXPORT HANDLE REDHAWK_PALAPI PalCreateMutexW(_In_opt_ LPSECURITY_ATTRIBUTES pMutexAttributes, UInt32_BOOL initialOwner, _In_opt_z_ const wchar_t* pName)
-{
- return new MutexUnixHandle(UnixMutex());
-}
-
-
REDHAWK_PALEXPORT HANDLE REDHAWK_PALAPI PalCreateEventW(_In_opt_ LPSECURITY_ATTRIBUTES pEventAttributes, UInt32_BOOL manualReset, UInt32_BOOL initialState, _In_opt_z_ const wchar_t* pName)
{
- return new EventUnixHandle(UnixEvent(manualReset, initialState));
-}
-
-// This is not needed in the PAL
-#if 0
-REDHAWK_PALEXPORT _Success_(return) bool REDHAWK_PALAPI PalGetThreadContext(HANDLE hThread, _Out_ PAL_LIMITED_CONTEXT * pCtx)
-{
- PORTABILITY_ASSERT("UNIXTODO: Implement this function");
+ return new (nothrow) EventUnixHandle(UnixEvent(manualReset, initialState));
}
-#endif
typedef UInt32(__stdcall *BackgroundCallback)(_In_opt_ void* pCallbackContext);
@@ -651,7 +505,7 @@ REDHAWK_PALEXPORT UInt64 REDHAWK_PALAPI GetTickCount64()
}
#else
{
- struct timeval tv;
+ struct timeval tv;
if (gettimeofday(&tv, NULL) == 0)
{
retval = (tv.tv_sec * tccSecondsToMilliSeconds) + (tv.tv_usec / tccMilliSecondsToMicroSeconds);
@@ -662,9 +516,9 @@ REDHAWK_PALEXPORT UInt64 REDHAWK_PALAPI GetTickCount64()
ASSERT_UNCONDITIONALLY("gettimeofday() failed\n");
}
}
-#endif // HAVE_CLOCK_MONOTONIC
+#endif // HAVE_CLOCK_MONOTONIC
- return retval;
+ return retval;
}
REDHAWK_PALEXPORT uint32_t REDHAWK_PALAPI PalGetTickCount()
@@ -672,36 +526,6 @@ REDHAWK_PALEXPORT uint32_t REDHAWK_PALAPI PalGetTickCount()
return (uint32_t)GetTickCount64();
}
-#if 0
-REDHAWK_PALEXPORT UInt32_BOOL REDHAWK_PALAPI PalEventEnabled(REGHANDLE regHandle, _In_ const EVENT_DESCRIPTOR* eventDescriptor)
-{
- PORTABILITY_ASSERT("UNIXTODO: Implement this function");
-}
-#endif
-
-REDHAWK_PALEXPORT HANDLE REDHAWK_PALAPI PalCreateFileW(_In_z_ const WCHAR* pFileName, uint32_t desiredAccess, uint32_t shareMode, _In_opt_ LPSECURITY_ATTRIBUTES pSecurityAttributes, uint32_t creationDisposition, uint32_t flagsAndAttributes, HANDLE hTemplateFile)
-{
- PORTABILITY_ASSERT("UNIXTODO: Implement this function");
-}
-
-REDHAWK_PALEXPORT _Success_(return == 0)
-uint32_t REDHAWK_PALAPI PalGetWriteWatch(_In_ uint32_t flags, _In_ void* pBaseAddress, _In_ size_t regionSize, _Out_writes_to_opt_(*pCount, *pCount) void** pAddresses, _Inout_opt_ uintptr_t* pCount, _Out_opt_ uint32_t* pGranularity)
-{
- // There is no write watching feature available on Unix other than a possibility to emulate
- // it using read only pages and page fault handler.
- *pAddresses = NULL;
- *pCount = 0;
- // Return non-zero value as an indicator of failure
- return 1;
-}
-
-REDHAWK_PALEXPORT uint32_t REDHAWK_PALAPI PalResetWriteWatch(_In_ void* pBaseAddress, size_t regionSize)
-{
- // There is no write watching feature available on Unix.
- // Return non-zero value as an indicator of failure.
- return 1;
-}
-
REDHAWK_PALEXPORT HANDLE REDHAWK_PALAPI PalGetModuleHandleFromPointer(_In_ void* pointer)
{
HANDLE moduleHandle = NULL;
@@ -715,11 +539,6 @@ REDHAWK_PALEXPORT HANDLE REDHAWK_PALAPI PalGetModuleHandleFromPointer(_In_ void*
return moduleHandle;
}
-REDHAWK_PALEXPORT void* REDHAWK_PALAPI PalAddVectoredExceptionHandler(uint32_t firstHandler, _In_ PVECTORED_EXCEPTION_HANDLER vectoredHandler)
-{
- PORTABILITY_ASSERT("UNIXTODO: Implement this function");
-}
-
bool QueryCacheSize()
{
bool success = true;
@@ -729,7 +548,7 @@ bool QueryCacheSize()
DIR* cpuDir = opendir("/sys/devices/system/cpu");
if (cpuDir == nullptr)
{
- ASSERT_UNCONDITIONALLY("opendir on /sys/devices/system/cpu failed\n");
+ ASSERT_UNCONDITIONALLY("opendir on /sys/devices/system/cpu failed\n");
return false;
}
@@ -897,7 +716,7 @@ REDHAWK_PALEXPORT _Ret_maybenull_ _Post_writable_byte_size_(size) void* REDHAWK_
// Align size to whole pages
size = (size + (OS_PAGE_SIZE - 1)) & ~(OS_PAGE_SIZE - 1);
int unixProtect = W32toUnixAccessControl(protect);
-
+
if (allocationType & MEM_RESERVE)
{
// For Windows compatibility, let the PalVirtualAlloc reserve memory with 64k alignment.
@@ -926,7 +745,7 @@ REDHAWK_PALEXPORT _Ret_maybenull_ _Post_writable_byte_size_(size) void* REDHAWK_
pRetVal = pAlignedRetVal;
}
-
+
return pRetVal;
}
@@ -960,6 +779,11 @@ extern "C" HANDLE GetCurrentProcess()
return (HANDLE)-1;
}
+extern "C" uint32_t GetCurrentProcessId()
+{
+ return getpid();
+}
+
extern "C" HANDLE GetCurrentThread()
{
return (HANDLE)-2;
@@ -978,7 +802,7 @@ extern "C" UInt32_BOOL DuplicateHandle(
ASSERT(hSourceProcessHandle == GetCurrentProcess());
ASSERT(hTargetProcessHandle == GetCurrentProcess());
ASSERT(hSourceHandle == GetCurrentThread());
- *lpTargetHandle = new ThreadUnixHandle(pthread_self());
+ *lpTargetHandle = new (nothrow) ThreadUnixHandle(pthread_self());
return lpTargetHandle != nullptr;
}
@@ -1208,7 +1032,7 @@ __thread void* pStackHighOut = NULL;
__thread void* pStackLowOut = NULL;
// Retrieves the entire range of memory dedicated to the calling thread's stack. This does
-// not get the current dynamic bounds of the stack, which can be significantly smaller than
+// not get the current dynamic bounds of the stack, which can be significantly smaller than
// the maximum bounds.
REDHAWK_PALEXPORT bool PalGetMaximumStackBounds(_Out_ void** ppStackLowOut, _Out_ void** ppStackHighOut)
{
@@ -1253,7 +1077,7 @@ REDHAWK_PALEXPORT bool PalGetMaximumStackBounds(_Out_ void** ppStackLowOut, _Out
return true;
}
-// retrieves the full path to the specified module, if moduleBase is NULL retreieves the full path to the
+// retrieves the full path to the specified module, if moduleBase is NULL retreieves the full path to the
// executable module of the current process.
//
// Return value: number of characters in name string
@@ -1276,68 +1100,44 @@ void PalDebugBreak()
GCSystemInfo g_SystemInfo;
-void InitializeSystemInfo()
+// Initialize the g_SystemInfo
+bool InitializeSystemInfo()
{
- // TODO: Implement
- g_SystemInfo.dwNumberOfProcessors = 4;
+ long pagesize = getpagesize();
+ g_SystemInfo.dwPageSize = pagesize;
+ g_SystemInfo.dwAllocationGranularity = pagesize;
- g_SystemInfo.dwPageSize = OS_PAGE_SIZE;
- g_SystemInfo.dwAllocationGranularity = OS_PAGE_SIZE;
-}
-
-extern "C" void FlushProcessWriteBuffers()
-{
- // UNIXTODO: Implement
-}
-
-extern "C" uint32_t GetTickCount()
-{
- return PalGetTickCount();
-}
-
-int32_t FastInterlockIncrement(int32_t volatile *lpAddend)
-{
- return __sync_add_and_fetch(lpAddend, 1);
-}
+ int nrcpus = 0;
-int32_t FastInterlockDecrement(int32_t volatile *lpAddend)
-{
- return __sync_sub_and_fetch(lpAddend, 1);
-}
-
-int32_t FastInterlockExchange(int32_t volatile *Target, int32_t Value)
-{
- return __sync_swap(Target, Value);
-}
-
-int32_t FastInterlockCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comperand)
-{
- return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
-}
-
-int32_t FastInterlockExchangeAdd(int32_t volatile *Addend, int32_t Value)
-{
- return __sync_fetch_and_add(Addend, Value);
-}
+#if HAVE_SYSCONF
+ nrcpus = sysconf(_SC_NPROCESSORS_ONLN);
+ if (nrcpus < 1)
+ {
+ ASSERT_UNCONDITIONALLY("sysconf failed for _SC_NPROCESSORS_ONLN\n");
+ return false;
+ }
+#elif HAVE_SYSCTL
+ int mib[2];
-void * _FastInterlockExchangePointer(void * volatile *Target, void * Value)
-{
- return __sync_swap(Target, Value);
-}
+ size_t sz = sizeof(nrcpus);
+ mib[0] = CTL_HW;
+ mib[1] = HW_NCPU;
+ int rc = sysctl(mib, 2, &nrcpus, &sz, NULL, 0);
+ if (rc != 0)
+ {
+ ASSERT_UNCONDITIONALLY("sysctl failed for HW_NCPU\n");
+ return false;
+ }
+#endif // HAVE_SYSCONF
-void * _FastInterlockCompareExchangePointer(void * volatile *Destination, void * Exchange, void * Comperand)
-{
- return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
-}
+ g_SystemInfo.dwNumberOfProcessors = nrcpus;
-void FastInterlockOr(uint32_t volatile *p, uint32_t msk)
-{
- __sync_fetch_and_or(p, msk);
+ return true;
}
-void FastInterlockAnd(uint32_t volatile *p, uint32_t msk)
+extern "C" void FlushProcessWriteBuffers()
{
- __sync_fetch_and_and(p, msk);
+ // UNIXTODO: Implement
}
extern "C" UInt32_BOOL QueryPerformanceCounter(LARGE_INTEGER *lpPerformanceCount)
@@ -1366,16 +1166,6 @@ extern "C" uint32_t GetCurrentThreadId()
return 1;
}
-extern "C" uint32_t SetFilePointer(
- HANDLE hFile,
- int32_t lDistanceToMove,
- int32_t * lpDistanceToMoveHigh,
- uint32_t dwMoveMethod)
-{
- // TODO: Reimplement callers using CRT
- return 0;
-}
-
extern "C" UInt32_BOOL FlushFileBuffers(
HANDLE hFile)
{
@@ -1408,62 +1198,505 @@ extern "C" uint32_t GetLastError()
return 1;
}
-extern "C" uint32_t GetWriteWatch(
- uint32_t dwFlags,
- void* lpBaseAddress,
- size_t dwRegionSize,
- void** lpAddresses,
- uintptr_t * lpdwCount,
- uint32_t * lpdwGranularity
- )
+extern "C" UInt32 WaitForMultipleObjectsEx(UInt32, HANDLE *, UInt32_BOOL, UInt32, UInt32_BOOL)
{
- // TODO: Implement for background GC
- *lpAddresses = NULL;
- *lpdwCount = 0;
- // Until it is implemented, return non-zero value as an indicator of failure
- return 1;
+ PORTABILITY_ASSERT("UNIXTODO: Implement this function");
}
-extern "C" uint32_t ResetWriteWatch(
- void* lpBaseAddress,
- size_t dwRegionSize
- )
+// Initialize the interface implementation
+bool GCToOSInterface::Initialize()
{
- // TODO: Implement for background GC
- // Until it is implemented, return non-zero value as an indicator of failure
- return 1;
+ return true;
}
-extern "C" UInt32_BOOL VirtualUnlock(
- void* lpAddress,
- size_t dwSize
- )
+// Shutdown the interface implementation
+void GCToOSInterface::Shutdown()
{
- // TODO: Implement
- return UInt32_FALSE;
}
-void UnsafeInitializeCriticalSection(CRITICAL_SECTION * lpCriticalSection)
+// Get numeric id of the current thread if possible on the
+// current platform. It is indended for logging purposes only.
+// Return:
+// Numeric id of the current thread or 0 if the
+uint32_t GCToOSInterface::GetCurrentThreadIdForLogging()
{
- InitializeCriticalSection(lpCriticalSection);
+ return ::GetCurrentThreadId();
}
-void UnsafeEEEnterCriticalSection(CRITICAL_SECTION *lpCriticalSection)
+// Get id of the process
+uint32_t GCToOSInterface::GetCurrentProcessId()
{
- EnterCriticalSection(lpCriticalSection);
+ return ::GetCurrentProcessId();
}
-void UnsafeEELeaveCriticalSection(CRITICAL_SECTION * lpCriticalSection)
+// Set ideal affinity for the current thread
+// Parameters:
+// affinity - ideal processor affinity for the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity)
{
- LeaveCriticalSection(lpCriticalSection);
+ return false;
}
-void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection)
+// Get the number of the current processor
+uint32_t GCToOSInterface::GetCurrentProcessorNumber()
{
- DeleteCriticalSection(lpCriticalSection);
+ // UNIXTODO: implement this method
+ return 0;
}
-extern "C" UInt32 WaitForMultipleObjectsEx(UInt32, HANDLE *, UInt32_BOOL, UInt32, UInt32_BOOL)
+// Check if the OS supports getting current processor number
+bool GCToOSInterface::CanGetCurrentProcessorNumber()
{
- PORTABILITY_ASSERT("UNIXTODO: Implement this function");
+ return false;
+}
+
+// Flush write buffers of processors that are executing threads of the current process
+void GCToOSInterface::FlushProcessWriteBuffers()
+{
+ return ::FlushProcessWriteBuffers();
+}
+
+// Break into a debugger
+void GCToOSInterface::DebugBreak()
+{
+ __debugbreak();
+}
+
+// Get number of logical processors
+uint32_t GCToOSInterface::GetLogicalCpuCount()
+{
+ return g_cLogicalCpus;
+}
+
+// Causes the calling thread to sleep for the specified number of milliseconds
+// Parameters:
+// sleepMSec - time to sleep before switching to another thread
+void GCToOSInterface::Sleep(uint32_t sleepMSec)
+{
+ PalSleep(sleepMSec);
+}
+
+// Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
+// Parameters:
+// switchCount - number of times the YieldThread was called in a loop
+void GCToOSInterface::YieldThread(uint32_t switchCount)
+{
+ // UNIXTODO: handle the switchCount
+ YieldProcessor();
+}
+
+// Reserve virtual memory range.
+// Parameters:
+// address - starting virtual address, it can be NULL to let the function choose the starting address
+// size - size of the virtual memory range
+// alignment - requested memory alignment, 0 means no specific alignment requested
+// flags - flags to control special settings like write watching
+// Return:
+// Starting virtual address of the reserved range
+void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags)
+{
+ ASSERT_MSG(!(flags & VirtualReserveFlags::WriteWatch), "WriteWatch not supported on Unix");
+
+ if (alignment == 0)
+ {
+ alignment = OS_PAGE_SIZE;
+ }
+
+ size_t alignedSize = size + (alignment - OS_PAGE_SIZE);
+
+ void * pRetVal = mmap(address, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
+
+ if (pRetVal != NULL)
+ {
+ void * pAlignedRetVal = (void *)(((size_t)pRetVal + (alignment - 1)) & ~(alignment - 1));
+ size_t startPadding = (size_t)pAlignedRetVal - (size_t)pRetVal;
+ if (startPadding != 0)
+ {
+ int ret = munmap(pRetVal, startPadding);
+ ASSERT(ret == 0);
+ }
+
+ size_t endPadding = alignedSize - (startPadding + size);
+ if (endPadding != 0)
+ {
+ int ret = munmap((void *)((size_t)pAlignedRetVal + size), endPadding);
+ ASSERT(ret == 0);
+ }
+
+ pRetVal = pAlignedRetVal;
+ }
+
+ return pRetVal;
+}
+
+// Release virtual memory range previously reserved using VirtualReserve
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualRelease(void* address, size_t size)
+{
+ int ret = munmap(address, size);
+
+ return (ret == 0);
+}
+
+// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+{
+ return mprotect(address, size, PROT_WRITE | PROT_READ) == 0;
+}
+
+// Decomit virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualDecommit(void* address, size_t size)
+{
+ return mprotect(address, size, PROT_NONE) == 0;
+}
+
+// Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
+// longer of interest, but it should not be decommitted.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// unlock - true if the memory range should also be unlocked
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock)
+{
+ // UNIXTODO: Implement this
+ return true;
+}
+
+// Check if the OS supports write watching
+bool GCToOSInterface::SupportsWriteWatch()
+{
+ return PalHasCapability(WriteWatchCapability);
+}
+
+// Reset the write tracking state for the specified virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+void GCToOSInterface::ResetWriteWatch(void* address, size_t size)
+{
+}
+
+// Retrieve addresses of the pages that are written to in a region of virtual memory
+// Parameters:
+// resetState - true indicates to reset the write tracking state
+// address - starting virtual address
+// size - size of the virtual memory range
+// pageAddresses - buffer that receives an array of page addresses in the memory region
+// pageAddressesCount - on input, size of the lpAddresses array, in array elements
+// on output, the number of page addresses that are returned in the array.
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount)
+{
+ return false;
+}
+
+// Get size of the largest cache on the processor die
+// Parameters:
+// trueSize - true to return true cache size, false to return scaled up size based on
+// the processor architecture
+// Return:
+// Size of the cache
+size_t GCToOSInterface::GetLargestOnDieCacheSize(bool trueSize)
+{
+ // UNIXTODO: implement this
+ return 0;
+}
+
+// Get affinity mask of the current process
+// Parameters:
+// processMask - affinity mask for the specified process
+// systemMask - affinity mask for the system
+// Return:
+// true if it has succeeded, false if it has failed
+// Remarks:
+// A process affinity mask is a bit vector in which each bit represents the processors that
+// a process is allowed to run on. A system affinity mask is a bit vector in which each bit
+// represents the processors that are configured into a system.
+// A process affinity mask is a subset of the system affinity mask. A process is only allowed
+// to run on the processors configured into a system. Therefore, the process affinity mask cannot
+// specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor.
+bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processMask, uintptr_t* systemMask)
+{
+ return false;
+}
+
+// Get number of processors assigned to the current process
+// Return:
+// The number of processors
+uint32_t GCToOSInterface::GetCurrentProcessCpuCount()
+{
+ return ::PalGetProcessCpuCount();
+}
+
+// Get global memory status
+// Parameters:
+// ms - pointer to the structure that will be filled in with the memory status
+void GCToOSInterface::GetMemoryStatus(GCMemoryStatus* ms)
+{
+ ms->dwMemoryLoad = 0;
+ ms->ullTotalPhys = 0;
+ ms->ullAvailPhys = 0;
+ ms->ullTotalPageFile = 0;
+ ms->ullAvailPageFile = 0;
+ ms->ullTotalVirtual = 0;
+ ms->ullAvailVirtual = 0;
+
+ UInt32_BOOL fRetVal = UInt32_FALSE;
+
+ // Get the physical memory size
+#if HAVE_SYSCONF && HAVE__SC_PHYS_PAGES
+ int64_t physical_memory;
+
+ // Get the Physical memory size
+ physical_memory = sysconf( _SC_PHYS_PAGES ) * sysconf( _SC_PAGE_SIZE );
+ ms->ullTotalPhys = (uint64_t)physical_memory;
+ fRetVal = UInt32_TRUE;
+#elif HAVE_SYSCTL
+ int mib[2];
+ int64_t physical_memory;
+ size_t length;
+
+ // Get the Physical memory size
+ mib[0] = CTL_HW;
+ mib[1] = HW_MEMSIZE;
+ length = sizeof(int64_t);
+ int rc = sysctl(mib, 2, &physical_memory, &length, NULL, 0);
+ if (rc != 0)
+ {
+ ASSERT_UNCONDITIONALLY("sysctl failed for HW_MEMSIZE\n");
+ }
+ else
+ {
+ ms->ullTotalPhys = (uint64_t)physical_memory;
+ fRetVal = UInt32_TRUE;
+ }
+#elif // HAVE_SYSINFO
+ // TODO: implement getting memory details via sysinfo. On Linux, it provides swap file details that
+ // we can use to fill in the xxxPageFile members.
+
+#endif // HAVE_SYSCONF
+
+ // Get the physical memory in use - from it, we can get the physical memory available.
+ // We do this only when we have the total physical memory available.
+ if (ms->ullTotalPhys > 0)
+ {
+#ifndef __APPLE__
+ ms->ullAvailPhys = sysconf(SYSCONF_PAGES) * sysconf(_SC_PAGE_SIZE);
+ int64_t used_memory = ms->ullTotalPhys - ms->ullAvailPhys;
+ ms->dwMemoryLoad = (uint32_t)((used_memory * 100) / ms->ullTotalPhys);
+#else
+ vm_size_t page_size;
+ mach_port_t mach_port;
+ mach_msg_type_number_t count;
+ vm_statistics_data_t vm_stats;
+ mach_port = mach_host_self();
+ count = sizeof(vm_stats) / sizeof(natural_t);
+ if (KERN_SUCCESS == host_page_size(mach_port, &page_size))
+ {
+ if (KERN_SUCCESS == host_statistics(mach_port, HOST_VM_INFO, (host_info_t)&vm_stats, &count))
+ {
+ ms->ullAvailPhys = (int64_t)vm_stats.free_count * (int64_t)page_size;
+ int64_t used_memory = ((int64_t)vm_stats.active_count + (int64_t)vm_stats.inactive_count + (int64_t)vm_stats.wire_count) * (int64_t)page_size;
+ ms->dwMemoryLoad = (uint32_t)((used_memory * 100) / ms->ullTotalPhys);
+ }
+ }
+ mach_port_deallocate(mach_task_self(), mach_port);
+#endif // __APPLE__
+ }
+
+ // There is no API to get the total virtual address space size on
+ // Unix, so we use a constant value representing 128TB, which is
+ // the approximate size of total user virtual address space on
+ // the currently supported Unix systems.
+ static const uint64_t _128TB = (1ull << 47);
+ ms->ullTotalVirtual = _128TB;
+ ms->ullAvailVirtual = ms->ullAvailPhys;
+
+ // UNIXTODO: failfast for !fRetVal?
+}
+
+// Get a high precision performance counter
+// Return:
+// The counter value
+int64_t GCToOSInterface::QueryPerformanceCounter()
+{
+ LARGE_INTEGER ts;
+ if (!::QueryPerformanceCounter(&ts))
+ {
+ DebugBreak();
+ ASSERT_UNCONDITIONALLY("Fatal Error - cannot query performance counter.");
+ abort();
+ }
+
+ return ts.QuadPart;
+}
+
+// Get a frequency of the high precision performance counter
+// Return:
+// The counter frequency
+int64_t GCToOSInterface::QueryPerformanceFrequency()
+{
+ LARGE_INTEGER frequency;
+ if (!::QueryPerformanceFrequency(&frequency))
+ {
+ DebugBreak();
+ ASSERT_UNCONDITIONALLY("Fatal Error - cannot query performance counter.");
+ abort();
+ }
+
+ return frequency.QuadPart;
+}
+
+// Get a time stamp with a low precision
+// Return:
+// Time stamp in milliseconds
+uint32_t GCToOSInterface::GetLowPrecisionTimeStamp()
+{
+ return PalGetTickCount();
+}
+
+// Parameters of the GC thread stub
+struct GCThreadStubParam
+{
+ GCThreadFunction GCThreadFunction;
+ void* GCThreadParam;
+};
+
+// GC thread stub to convert GC thread function to an OS specific thread function
+static void* GCThreadStub(void* param)
+{
+ GCThreadStubParam *stubParam = (GCThreadStubParam*)param;
+ GCThreadFunction function = stubParam->GCThreadFunction;
+ void* threadParam = stubParam->GCThreadParam;
+
+ delete stubParam;
+
+ function(threadParam);
+
+ return NULL;
+}
+
+// Create a new thread for GC use
+// Parameters:
+// function - the function to be executed by the thread
+// param - parameters of the thread
+// affinity - processor affinity of the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity)
+{
+ NewHolder<GCThreadStubParam> stubParam = new (nothrow) GCThreadStubParam();
+ if (stubParam == NULL)
+ {
+ return false;
+ }
+
+ stubParam->GCThreadFunction = function;
+ stubParam->GCThreadParam = param;
+
+ pthread_attr_t attrs;
+
+ int st = pthread_attr_init(&attrs);
+ ASSERT(st == 0);
+
+ // Create the thread as detached, that means not joinable
+ st = pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
+ ASSERT(st == 0);
+
+ pthread_t threadId;
+ st = pthread_create(&threadId, &attrs, GCThreadStub, stubParam);
+
+ if (st == 0)
+ {
+ stubParam.SuppressRelease();
+ }
+
+ int st2 = pthread_attr_destroy(&attrs);
+ ASSERT(st2 == 0);
+
+ return (st == 0);
+}
+
+// Open a file
+// Parameters:
+// filename - name of the file to open
+// mode - mode to open the file in (like in the CRT fopen)
+// Return:
+// FILE* of the opened file
+FILE* GCToOSInterface::OpenFile(const WCHAR* filename, const WCHAR* mode)
+{
+ int filenameLen = wcslen(filename);
+ int modeLen = wcslen(mode);
+
+ int charFilenameLen = filenameLen * 3;
+ int charModeLen = modeLen * 3;
+
+ NewArrayHolder<char> charFilename = new (nothrow) char [charFilenameLen + 1];
+ if (charFilename == NULL)
+ {
+ return NULL;
+ }
+
+ NewArrayHolder<char> charMode = new (nothrow) char [charModeLen + 1];
+ if (charMode == NULL)
+ {
+ return NULL;
+ }
+
+ if (WideCharToUTF8(filename, filenameLen + 1, charFilename, charFilenameLen + 1) == 0)
+ {
+ return NULL;
+ }
+
+ if (WideCharToUTF8(mode, modeLen + 1, charMode, charModeLen + 1) == 0)
+ {
+ return NULL;
+ }
+
+ return fopen(charFilename, charMode);
+}
+
+// Initialize the critical section
+void CLRCriticalSection::Initialize()
+{
+ int st = pthread_mutex_init(&m_cs.mutex, NULL);
+ ASSERT(st == 0);
+}
+
+// Destroy the critical section
+void CLRCriticalSection::Destroy()
+{
+ int st = pthread_mutex_destroy(&m_cs.mutex);
+ ASSERT(st == 0);
+}
+
+// Enter the critical section. Blocks until the section can be entered.
+void CLRCriticalSection::Enter()
+{
+ pthread_mutex_lock(&m_cs.mutex);
+}
+
+// Leave the critical section
+void CLRCriticalSection::Leave()
+{
+ pthread_mutex_unlock(&m_cs.mutex);
}
diff --git a/src/Native/Runtime/windows/PalRedhawkMinWin.cpp b/src/Native/Runtime/windows/PalRedhawkMinWin.cpp
index cc7fe76fd..86d81b2e0 100644
--- a/src/Native/Runtime/windows/PalRedhawkMinWin.cpp
+++ b/src/Native/Runtime/windows/PalRedhawkMinWin.cpp
@@ -19,6 +19,10 @@
#include <errno.h>
#include <evntprov.h>
+#include "holder.h"
+
+#define PalRaiseFailFastException RaiseFailFastException
+
uint32_t PalEventWrite(REGHANDLE arg1, const EVENT_DESCRIPTOR * arg2, uint32_t arg3, EVENT_DATA_DESCRIPTOR * arg4)
{
return EventWrite(arg1, arg2, arg3, arg4);
@@ -39,6 +43,20 @@ extern "C" UInt32 __stdcall NtGetCurrentProcessorNumber();
static DWORD g_dwPALCapabilities;
+GCSystemInfo g_SystemInfo;
+
+bool InitializeSystemInfo()
+{
+ SYSTEM_INFO systemInfo;
+ GetSystemInfo(&systemInfo);
+
+ g_SystemInfo.dwNumberOfProcessors = systemInfo.dwNumberOfProcessors;
+ g_SystemInfo.dwPageSize = systemInfo.dwPageSize;
+ g_SystemInfo.dwAllocationGranularity = systemInfo.dwAllocationGranularity;
+
+ return true;
+}
+
extern bool PalQueryProcessorTopology();
REDHAWK_PALEXPORT void __cdecl PalPrintf(_In_z_ _Printf_format_string_ const char * szFormat, ...);
@@ -154,25 +172,6 @@ REDHAWK_PALEXPORT UInt32 REDHAWK_PALAPI PalCompatibleWaitAny(UInt32_BOOL alertab
}
}
-REDHAWK_PALIMPORT UInt32_BOOL REDHAWK_PALAPI PalGlobalMemoryStatusEx(_Out_ GCMemoryStatus* pGCMemStatus)
-{
- MEMORYSTATUSEX memStatus;
- memStatus.dwLength = sizeof(MEMORYSTATUSEX);
-
- UInt32_BOOL result = GlobalMemoryStatusEx(&memStatus);
-
- // Convert Windows struct to abstract struct
- pGCMemStatus->dwMemoryLoad = memStatus.dwMemoryLoad ;
- pGCMemStatus->ullTotalPhys = memStatus.ullTotalPhys ;
- pGCMemStatus->ullAvailPhys = memStatus.ullAvailPhys ;
- pGCMemStatus->ullTotalPageFile = memStatus.ullTotalPageFile ;
- pGCMemStatus->ullAvailPageFile = memStatus.ullAvailPageFile ;
- pGCMemStatus->ullTotalVirtual = memStatus.ullTotalVirtual ;
- pGCMemStatus->ullAvailVirtual = memStatus.ullAvailVirtual ;
-
- return result;
-}
-
REDHAWK_PALEXPORT void REDHAWK_PALAPI PalSleep(UInt32 milliseconds)
{
return Sleep(milliseconds);
@@ -183,11 +182,6 @@ REDHAWK_PALEXPORT UInt32_BOOL REDHAWK_PALAPI PalSwitchToThread()
return SwitchToThread();
}
-REDHAWK_PALEXPORT HANDLE REDHAWK_PALAPI PalCreateMutexW(_In_opt_ LPSECURITY_ATTRIBUTES pMutexAttributes, UInt32_BOOL initialOwner, _In_opt_z_ LPCWSTR pName)
-{
- return CreateMutexW(pMutexAttributes, initialOwner, pName);
-}
-
REDHAWK_PALEXPORT HANDLE REDHAWK_PALAPI PalCreateEventW(_In_opt_ LPSECURITY_ATTRIBUTES pEventAttributes, UInt32_BOOL manualReset, UInt32_BOOL initialState, _In_opt_z_ LPCWSTR pName)
{
return CreateEventW(pEventAttributes, manualReset, initialState, pName);
@@ -336,22 +330,6 @@ REDHAWK_PALEXPORT HANDLE REDHAWK_PALAPI PalCreateFileW(
creationDisposition, flagsAndAttributes, hTemplateFile);
}
-REDHAWK_PALEXPORT UInt32 REDHAWK_PALAPI PalGetWriteWatch(
- UInt32 flags,
- _In_ void* pBaseAddress,
- UIntNative regionSize,
- _Out_writes_to_opt_(*pCount, *pCount) void** pAddresses,
- _Inout_opt_ UIntNative* pCount,
- _Inout_opt_ UInt32* pGranularity)
-{
- return GetWriteWatch(flags, pBaseAddress, regionSize, pAddresses, (ULONG_PTR *)pCount, (LPDWORD)pGranularity);
-}
-
-REDHAWK_PALEXPORT UInt32 REDHAWK_PALAPI PalResetWriteWatch(_In_ void* pBaseAddress, UIntNative regionSize)
-{
- return ResetWriteWatch(pBaseAddress, regionSize);
-}
-
REDHAWK_PALEXPORT HANDLE REDHAWK_PALAPI PalCreateLowMemoryNotification()
{
return CreateMemoryResourceNotification(LowMemoryResourceNotification);
@@ -1232,18 +1210,6 @@ void PalDebugBreak()
__debugbreak();
}
-// Functions called by the GC to obtain our cached values for number of logical processors and cache size.
-REDHAWK_PALEXPORT UInt32 REDHAWK_PALAPI PalGetLogicalCpuCount()
-{
- return g_cLogicalCpus;
-}
-
-REDHAWK_PALEXPORT size_t REDHAWK_PALAPI PalGetLargestOnDieCacheSize(UInt32_BOOL bTrueSize)
-{
- return bTrueSize ? g_cbLargestOnDieCache
- : g_cbLargestOnDieCacheAdjusted;
-}
-
REDHAWK_PALEXPORT _Ret_maybenull_ _Post_writable_byte_size_(size) void* REDHAWK_PALAPI PalVirtualAlloc(_In_opt_ void* pAddress, UIntNative size, UInt32 allocationType, UInt32 protect)
{
return VirtualAlloc(pAddress, size, allocationType, protect);
@@ -1267,99 +1233,451 @@ REDHAWK_PALEXPORT _Ret_maybenull_ void* REDHAWK_PALAPI PalSetWerDataBuffer(_In_
return InterlockedExchangePointer(&pBuffer, pNewBuffer);
}
+typedef uint32_t (WINAPI *GetCurrentProcessorNumber_t)(void);
+static GetCurrentProcessorNumber_t g_GetCurrentProcessorNumber = NULL;
+static LARGE_INTEGER performanceFrequency;
-//
-// Code seeded from gcenv.windows.cpp
-//
+// Initialize the interface implementation
+bool GCToOSInterface::Initialize()
+{
+ if (!::QueryPerformanceFrequency(&performanceFrequency))
+ {
+ return false;
+ }
+ if (PalHasCapability(GetCurrentProcessorNumberCapability))
+ {
+ g_GetCurrentProcessorNumber = PalGetCurrentProcessorNumber;
+ }
-#ifdef _X86_
-EXTERN_C long _InterlockedOr(long volatile *, long);
-#pragma intrinsic (_InterlockedOr)
-#define InterlockedOr _InterlockedOr
+ return true;
+}
-EXTERN_C long _InterlockedAnd(long volatile *, long);
-#pragma intrinsic(_InterlockedAnd)
-#define InterlockedAnd _InterlockedAnd
-#endif // _X86_
+// Shutdown the interface implementation
+void GCToOSInterface::Shutdown()
+{
+}
-int32_t FastInterlockIncrement(int32_t volatile *lpAddend)
+// Get numeric id of the current thread if possible on the
+// current platform. It is indended for logging purposes only.
+// Return:
+// Numeric id of the current thread or 0 if the
+uint32_t GCToOSInterface::GetCurrentThreadIdForLogging()
{
- return InterlockedIncrement((LONG *)lpAddend);
+ return ::GetCurrentThreadId();
}
-int32_t FastInterlockDecrement(int32_t volatile *lpAddend)
+// Get id of the process
+uint32_t GCToOSInterface::GetCurrentProcessId()
{
- return InterlockedDecrement((LONG *)lpAddend);
+ return ::GetCurrentProcessId();
}
-int32_t FastInterlockExchange(int32_t volatile *Target, int32_t Value)
+// Set ideal affinity for the current thread
+// Parameters:
+// affinity - ideal processor affinity for the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity)
{
- return InterlockedExchange((LONG *)Target, Value);
+ bool success = true;
+
+ PROCESSOR_NUMBER proc;
+
+ if (affinity->Group != -1)
+ {
+ proc.Group = (WORD)affinity->Group;
+ proc.Number = (BYTE)affinity->Processor;
+ proc.Reserved = 0;
+
+ success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
+ }
+ else
+ {
+ if (GetThreadIdealProcessorEx(GetCurrentThread(), &proc))
+ {
+ proc.Number = affinity->Processor;
+ success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
+ }
+ }
+
+ return success;
}
-int32_t FastInterlockCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comperand)
+// Get the number of the current processor
+uint32_t GCToOSInterface::GetCurrentProcessorNumber()
{
- return InterlockedCompareExchange((LONG *)Destination, Exchange, Comperand);
+ _ASSERTE(GCToOSInterface::CanGetCurrentProcessorNumber());
+ return g_GetCurrentProcessorNumber();
}
-int32_t FastInterlockExchangeAdd(int32_t volatile *Addend, int32_t Value)
+// Check if the OS supports getting current processor number
+bool GCToOSInterface::CanGetCurrentProcessorNumber()
{
- return InterlockedExchangeAdd((LONG *)Addend, Value);
+ return g_GetCurrentProcessorNumber != NULL;
}
-void * _FastInterlockExchangePointer(void * volatile *Target, void * Value)
+// Flush write buffers of processors that are executing threads of the current process
+void GCToOSInterface::FlushProcessWriteBuffers()
{
- return InterlockedExchangePointer(Target, Value);
+ ::FlushProcessWriteBuffers();
}
-void * _FastInterlockCompareExchangePointer(void * volatile *Destination, void * Exchange, void * Comperand)
+// Break into a debugger
+void GCToOSInterface::DebugBreak()
{
- return InterlockedCompareExchangePointer(Destination, Exchange, Comperand);
+ ::DebugBreak();
}
-void FastInterlockOr(uint32_t volatile *p, uint32_t msk)
+// Get number of logical processors
+uint32_t GCToOSInterface::GetLogicalCpuCount()
{
- InterlockedOr((LONG volatile *)p, msk);
+ return g_cLogicalCpus;
}
-void FastInterlockAnd(uint32_t volatile *p, uint32_t msk)
+bool __SwitchToThread(uint32_t dwSleepMSec, uint32_t dwSwitchCount);
+
+// Causes the calling thread to sleep for the specified number of milliseconds
+// Parameters:
+// sleepMSec - time to sleep before switching to another thread
+void GCToOSInterface::Sleep(uint32_t sleepMSec)
{
- InterlockedAnd((LONG volatile *)p, msk);
+ __SwitchToThread(sleepMSec, 0);
}
+// Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
+// Parameters:
+// switchCount - number of times the YieldThread was called in a loop
+void GCToOSInterface::YieldThread(uint32_t switchCount)
+{
+ __SwitchToThread(0, switchCount);
+}
-void UnsafeInitializeCriticalSection(CRITICAL_SECTION * lpCriticalSection)
+// Reserve virtual memory range.
+// Parameters:
+// address - starting virtual address, it can be NULL to let the function choose the starting address
+// size - size of the virtual memory range
+// alignment - requested memory alignment
+// flags - flags to control special settings like write watching
+// Return:
+// Starting virtual address of the reserved range
+void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags)
{
- InitializeCriticalSection(lpCriticalSection);
+ DWORD memFlags = (flags & VirtualReserveFlags::WriteWatch) ? (MEM_RESERVE | MEM_WRITE_WATCH) : MEM_RESERVE;
+ return ::VirtualAlloc(0, size, memFlags, PAGE_READWRITE);
}
-void UnsafeEEEnterCriticalSection(CRITICAL_SECTION *lpCriticalSection)
+// Release virtual memory range previously reserved using VirtualReserve
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualRelease(void* address, size_t size)
{
- EnterCriticalSection(lpCriticalSection);
+ UNREFERENCED_PARAMETER(size);
+ return !!::VirtualFree(address, 0, MEM_RELEASE);
}
-void UnsafeEELeaveCriticalSection(CRITICAL_SECTION * lpCriticalSection)
+// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualCommit(void* address, size_t size)
{
- LeaveCriticalSection(lpCriticalSection);
+ return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != NULL;
}
-void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection)
+// Decomit virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualDecommit(void* address, size_t size)
{
- DeleteCriticalSection(lpCriticalSection);
+ return !!::VirtualFree(address, size, MEM_DECOMMIT);
}
+// Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
+// longer of interest, but it should not be decommitted.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// unlock - true if the memory range should also be unlocked
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock)
+{
+ bool success = ::VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE) != NULL;
+ if (success && unlock)
+ {
+ // Remove the page range from the working set
+ ::VirtualUnlock(address, size);
+ }
-GCSystemInfo g_SystemInfo;
+ return success;
+}
-void InitializeSystemInfo()
+// Check if the OS supports write watching
+bool GCToOSInterface::SupportsWriteWatch()
{
- SYSTEM_INFO systemInfo;
- GetSystemInfo(&systemInfo);
+ return PalHasCapability(WriteWatchCapability);
+}
- g_SystemInfo.dwNumberOfProcessors = systemInfo.dwNumberOfProcessors;
- g_SystemInfo.dwPageSize = systemInfo.dwPageSize;
- g_SystemInfo.dwAllocationGranularity = systemInfo.dwAllocationGranularity;
+// Reset the write tracking state for the specified virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+void GCToOSInterface::ResetWriteWatch(void* address, size_t size)
+{
+ ::ResetWriteWatch(address, size);
}
+// Retrieve addresses of the pages that are written to in a region of virtual memory
+// Parameters:
+// resetState - true indicates to reset the write tracking state
+// address - starting virtual address
+// size - size of the virtual memory range
+// pageAddresses - buffer that receives an array of page addresses in the memory region
+// pageAddressesCount - on input, size of the lpAddresses array, in array elements
+// on output, the number of page addresses that are returned in the array.
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount)
+{
+ uint32_t flags = resetState ? 1 : 0;
+ ULONG granularity;
+
+ bool success = ::GetWriteWatch(flags, address, size, pageAddresses, (ULONG_PTR*)pageAddressesCount, &granularity) == 0;
+ _ASSERTE (granularity == OS_PAGE_SIZE);
+
+ return success;
+}
+
+// Get size of the largest cache on the processor die
+// Parameters:
+// trueSize - true to return true cache size, false to return scaled up size based on
+// the processor architecture
+// Return:
+// Size of the cache
+size_t GCToOSInterface::GetLargestOnDieCacheSize(bool trueSize)
+{
+ return trueSize ? g_cbLargestOnDieCache : g_cbLargestOnDieCacheAdjusted;
+}
+
+// Get affinity mask of the current process
+// Parameters:
+// processMask - affinity mask for the specified process
+// systemMask - affinity mask for the system
+// Return:
+// true if it has succeeded, false if it has failed
+// Remarks:
+// A process affinity mask is a bit vector in which each bit represents the processors that
+// a process is allowed to run on. A system affinity mask is a bit vector in which each bit
+// represents the processors that are configured into a system.
+// A process affinity mask is a subset of the system affinity mask. A process is only allowed
+// to run on the processors configured into a system. Therefore, the process affinity mask cannot
+// specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor.
+bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processMask, uintptr_t* systemMask)
+{
+ return false;
+}
+
+// Get number of processors assigned to the current process
+// Return:
+// The number of processors
+uint32_t GCToOSInterface::GetCurrentProcessCpuCount()
+{
+ static int cCPUs = 0;
+
+ if (cCPUs != 0)
+ return cCPUs;
+
+ DWORD_PTR pmask, smask;
+
+ if (!GetProcessAffinityMask(GetCurrentProcess(), &pmask, &smask))
+ return 1;
+
+ if (pmask == 1)
+ return 1;
+
+ pmask &= smask;
+
+ int count = 0;
+ while (pmask)
+ {
+ if (pmask & 1)
+ count++;
+
+ pmask >>= 1;
+ }
+
+ // GetProcessAffinityMask can return pmask=0 and smask=0 on systems with more
+ // than 64 processors, which would leave us with a count of 0. Since the GC
+ // expects there to be at least one processor to run on (and thus at least one
+ // heap), we'll return 64 here if count is 0, since there are likely a ton of
+ // processors available in that case. The GC also cannot (currently) handle
+ // the case where there are more than 64 processors, so we will return a
+ // maximum of 64 here.
+ if (count == 0 || count > 64)
+ count = 64;
+
+ cCPUs = count;
+
+ return count;
+}
+
+// Get global memory status
+// Parameters:
+// ms - pointer to the structure that will be filled in with the memory status
+void GCToOSInterface::GetMemoryStatus(GCMemoryStatus* ms)
+{
+ MEMORYSTATUSEX memStatus;
+ memStatus.dwLength = sizeof(MEMORYSTATUSEX);
+
+ // TODO: fail fast if the function call fails?
+ GlobalMemoryStatusEx(&memStatus);
+
+ // Convert Windows struct to abstract struct
+ ms->dwMemoryLoad = memStatus.dwMemoryLoad;
+ ms->ullTotalPhys = memStatus.ullTotalPhys;
+ ms->ullAvailPhys = memStatus.ullAvailPhys;
+ ms->ullTotalPageFile = memStatus.ullTotalPageFile;
+ ms->ullAvailPageFile = memStatus.ullAvailPageFile;
+ ms->ullTotalVirtual = memStatus.ullTotalVirtual;
+ ms->ullAvailVirtual = memStatus.ullAvailVirtual;
+}
+
+// Get a high precision performance counter
+// Return:
+// The counter value
+int64_t GCToOSInterface::QueryPerformanceCounter()
+{
+ LARGE_INTEGER ts;
+ if (!::QueryPerformanceCounter(&ts))
+ {
+ ASSERT_UNCONDITIONALLY("Fatal Error - cannot query performance counter.");
+ RhFailFast();
+ }
+
+ return ts.QuadPart;
+}
+
+// Get a frequency of the high precision performance counter
+// Return:
+// The counter frequency
+int64_t GCToOSInterface::QueryPerformanceFrequency()
+{
+ LARGE_INTEGER frequency;
+ if (!::QueryPerformanceFrequency(&frequency))
+ {
+ ASSERT_UNCONDITIONALLY("Fatal Error - cannot query performance counter.");
+ RhFailFast();
+ }
+
+ return frequency.QuadPart;
+}
+
+// Get a time stamp with a low precision
+// Return:
+// Time stamp in milliseconds
+uint32_t GCToOSInterface::GetLowPrecisionTimeStamp()
+{
+ return ::GetTickCount();
+}
+
+// Parameters of the GC thread stub
+struct GCThreadStubParam
+{
+ GCThreadFunction GCThreadFunction;
+ void* GCThreadParam;
+};
+
+// GC thread stub to convert GC thread function to an OS specific thread function
+static DWORD GCThreadStub(void* param)
+{
+ GCThreadStubParam *stubParam = (GCThreadStubParam*)param;
+ GCThreadFunction function = stubParam->GCThreadFunction;
+ void* threadParam = stubParam->GCThreadParam;
+
+ delete stubParam;
+
+ function(threadParam);
+
+ return 0;
+}
+
+// Create a new thread for GC use
+// Parameters:
+// function - the function to be executed by the thread
+// param - parameters of the thread
+// affinity - processor affinity of the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity)
+{
+ NewHolder<GCThreadStubParam> stubParam = new (nothrow) GCThreadStubParam();
+ if (stubParam == NULL)
+ {
+ return false;
+ }
+
+ stubParam->GCThreadFunction = function;
+ stubParam->GCThreadParam = param;
+
+ DWORD thread_id;
+ HANDLE gc_thread = ::CreateThread(0, 4096, GCThreadStub, &stubParam, CREATE_SUSPENDED, &thread_id);
+
+ if (!gc_thread)
+ {
+ return false;
+ }
+
+ stubParam.SuppressRelease();
+
+ SetThreadPriority(gc_thread, /* THREAD_PRIORITY_ABOVE_NORMAL );*/ THREAD_PRIORITY_HIGHEST );
+
+ ResumeThread(gc_thread);
+ CloseHandle(gc_thread);
+
+ return true;
+}
+
+// Open a file
+// Parameters:
+// filename - name of the file to open
+// mode - mode to open the file in (like in the CRT fopen)
+// Return:
+// FILE* of the opened file
+FILE* GCToOSInterface::OpenFile(const WCHAR* filename, const WCHAR* mode)
+{
+ return _wfopen(filename, mode);
+}
+
+// Initialize the critical section
+void CLRCriticalSection::Initialize()
+{
+ InitializeCriticalSection(&m_cs);
+}
+
+// Destroy the critical section
+void CLRCriticalSection::Destroy()
+{
+ DeleteCriticalSection(&m_cs);
+}
+
+// Enter the critical section. Blocks until the section can be entered.
+void CLRCriticalSection::Enter()
+{
+ EnterCriticalSection(&m_cs);
+}
+
+// Leave the critical section
+void CLRCriticalSection::Leave()
+{
+ LeaveCriticalSection(&m_cs);
+}
diff --git a/src/Native/gc/env/common.h b/src/Native/gc/env/common.h
index 3e982f8f6..39e97b3e7 100644
--- a/src/Native/gc/env/common.h
+++ b/src/Native/gc/env/common.h
@@ -22,7 +22,7 @@
#include <new>
-#ifndef WIN32
+#ifdef PLATFORM_UNIX
#include <pthread.h>
#endif
diff --git a/src/Native/gc/env/gcenv.base.h b/src/Native/gc/env/gcenv.base.h
index 5b8f5f7dd..d1b6d505a 100644
--- a/src/Native/gc/env/gcenv.base.h
+++ b/src/Native/gc/env/gcenv.base.h
@@ -16,11 +16,17 @@
#define REDHAWK_PALIMPORT extern "C"
#define REDHAWK_PALAPI __stdcall
-
#ifndef _MSC_VER
#define __stdcall
+#ifdef __clang__
+#define __forceinline __attribute__((always_inline)) inline
+#else // __clang__
#define __forceinline inline
-#endif
+#endif // __clang__
+#endif // !_MSC_VER
+
+#define SIZE_T_MAX ((size_t)-1)
+#define SSIZE_T_MAX ((ptrdiff_t)(SIZE_T_MAX / 2))
#ifndef _INC_WINDOWS
// -----------------------------------------------------------------------------------------------------------
@@ -44,17 +50,14 @@ typedef size_t SIZE_T;
typedef void * HANDLE;
-#define SIZE_T_MAX ((size_t)-1)
-#define SSIZE_T_MAX ((ptrdiff_t)(SIZE_T_MAX / 2))
-
// -----------------------------------------------------------------------------------------------------------
// HRESULT subset.
-#ifdef WIN32
+#ifdef PLATFORM_UNIX
+typedef int32_t HRESULT;
+#else
// this must exactly match the typedef used by windows.h
typedef long HRESULT;
-#else
-typedef int32_t HRESULT;
#endif
#define SUCCEEDED(_hr) ((HRESULT)(_hr) >= 0)
@@ -79,7 +82,7 @@ inline HRESULT HRESULT_FROM_WIN32(unsigned long x)
#define TRUE true
#define FALSE false
-#define CALLBACK
+#define CALLBACK __stdcall
#define FORCEINLINE inline
#define INFINITE 0xFFFFFFFF
@@ -104,122 +107,20 @@ inline HRESULT HRESULT_FROM_WIN32(unsigned long x)
#define INVALID_HANDLE_VALUE ((HANDLE)-1)
-#ifndef WIN32
+#ifdef PLATFORM_UNIX
#define _vsnprintf vsnprintf
#define sprintf_s snprintf
+#define swprintf_s swprintf
#endif
-#define WINBASEAPI extern "C"
#define WINAPI __stdcall
typedef DWORD (WINAPI *PTHREAD_START_ROUTINE)(void* lpThreadParameter);
-WINBASEAPI
-void
-WINAPI
-DebugBreak();
-
-WINBASEAPI
-BOOL
-WINAPI
-VirtualUnlock(
- LPVOID lpAddress,
- SIZE_T dwSize
- );
-
-WINBASEAPI
-DWORD
-WINAPI
-GetLastError();
-
-WINBASEAPI
-UINT
-WINAPI
-GetWriteWatch(
- DWORD dwFlags,
- PVOID lpBaseAddress,
- SIZE_T dwRegionSize,
- PVOID *lpAddresses,
- ULONG_PTR * lpdwCount,
- DWORD * lpdwGranularity
-);
-
-WINBASEAPI
-UINT
-WINAPI
-ResetWriteWatch(
- LPVOID lpBaseAddress,
- SIZE_T dwRegionSize
-);
-
-WINBASEAPI
-VOID
-WINAPI
-FlushProcessWriteBuffers();
-
-WINBASEAPI
-DWORD
-WINAPI
-GetTickCount();
-
-WINBASEAPI
-BOOL
-WINAPI
-QueryPerformanceCounter(LARGE_INTEGER *lpPerformanceCount);
-
-WINBASEAPI
-BOOL
-WINAPI
-QueryPerformanceFrequency(LARGE_INTEGER *lpFrequency);
-
-WINBASEAPI
-DWORD
-WINAPI
-GetCurrentThreadId(
- VOID);
-
-WINBASEAPI
-BOOL
-WINAPI
-CloseHandle(
- HANDLE hObject);
-
#define WAIT_OBJECT_0 0
#define WAIT_TIMEOUT 258
#define WAIT_FAILED 0xFFFFFFFF
-#define GENERIC_WRITE 0x40000000
-#define FILE_SHARE_READ 0x00000001
-#define CREATE_ALWAYS 2
-#define FILE_ATTRIBUTE_NORMAL 0x00000080
-
-WINBASEAPI
-BOOL
-WINAPI
-WriteFile(
- HANDLE hFile,
- LPCVOID lpBuffer,
- DWORD nNumberOfBytesToWrite,
- DWORD * lpNumberOfBytesWritten,
- PVOID lpOverlapped);
-
-#define FILE_BEGIN 0
-
-WINBASEAPI
-DWORD
-WINAPI
-SetFilePointer(
- HANDLE hFile,
- int32_t lDistanceToMove,
- int32_t * lpDistanceToMoveHigh,
- DWORD dwMoveMethod);
-
-WINBASEAPI
-BOOL
-WINAPI
-FlushFileBuffers(
- HANDLE hFile);
-
#if defined(_MSC_VER)
#if defined(_ARM_)
@@ -228,6 +129,16 @@ FlushFileBuffers(
#pragma intrinsic(__emit)
#define MemoryBarrier() { __emit(0xF3BF); __emit(0x8F5F); }
+ #elif defined(_ARM64_)
+
+ extern "C" void __yield(void);
+ #pragma intrinsic(__yield)
+ __forceinline void YieldProcessor() { __yield();}
+
+ extern "C" void __dmb(const unsigned __int32 _Type);
+ #pragma intrinsic(__dmb)
+ #define MemoryBarrier() { __dmb(_ARM64_BARRIER_SY); }
+
#elif defined(_AMD64_)
extern "C" VOID
@@ -263,24 +174,8 @@ FlushFileBuffers(
#endif
#else // _MSC_VER
-WINBASEAPI
-VOID
-WINAPI
-YieldProcessor();
-
-WINBASEAPI
-VOID
-WINAPI
-MemoryBarrier();
-
#endif // _MSC_VER
-typedef struct _GUID {
- unsigned long Data1;
- unsigned short Data2;
- unsigned short Data3;
- unsigned char Data4[8];
-} GUID;
#endif // _INC_WINDOWS
// -----------------------------------------------------------------------------------------------------------
@@ -410,56 +305,6 @@ typedef DPTR(uint8_t) PTR_uint8_t;
#define UI64(_literal) _literal##ULL
-int32_t FastInterlockIncrement(int32_t volatile *lpAddend);
-int32_t FastInterlockDecrement(int32_t volatile *lpAddend);
-int32_t FastInterlockExchange(int32_t volatile *Target, int32_t Value);
-int32_t FastInterlockCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comperand);
-int32_t FastInterlockExchangeAdd(int32_t volatile *Addend, int32_t Value);
-
-void * _FastInterlockExchangePointer(void * volatile *Target, void * Value);
-void * _FastInterlockCompareExchangePointer(void * volatile *Destination, void * Exchange, void * Comperand);
-
-template <typename T>
-inline T FastInterlockExchangePointer(
- T volatile * target,
- T value)
-{
- return (T)((TADDR)_FastInterlockExchangePointer((void **)target, value));
-}
-
-template <typename T>
-inline T FastInterlockExchangePointer(
- T volatile * target,
- nullptr_t value)
-{
- return (T)((TADDR)_FastInterlockExchangePointer((void **)target, value));
-}
-
-template <typename T>
-inline T FastInterlockCompareExchangePointer(
- T volatile * destination,
- T exchange,
- T comparand)
-{
- return (T)((TADDR)_FastInterlockCompareExchangePointer((void **)destination, exchange, comparand));
-}
-
-template <typename T>
-inline T FastInterlockCompareExchangePointer(
- T volatile * destination,
- T exchange,
- nullptr_t comparand)
-{
- return (T)((TADDR)_FastInterlockCompareExchangePointer((void **)destination, exchange, comparand));
-}
-
-
-void FastInterlockOr(uint32_t volatile *p, uint32_t msk);
-void FastInterlockAnd(uint32_t volatile *p, uint32_t msk);
-
-#define CALLER_LIMITS_SPINNING 0
-bool __SwitchToThread (uint32_t dwSleepMSec, uint32_t dwSwitchCount);
-
class ObjHeader;
class MethodTable;
class Object;
@@ -493,7 +338,51 @@ typedef TADDR OBJECTHANDLE;
#define VOLATILE(T) T volatile
+//
+// This code is extremely compiler- and CPU-specific, and will need to be altered to
+// support new compilers and/or CPUs. Here we enforce that we can only compile using
+// VC++, or Clang on x86, AMD64, ARM and ARM64.
+//
+#if !defined(_MSC_VER) && !defined(__clang__)
+#error The Volatile type is currently only defined for Visual C++ and Clang
+#endif
+
+#if defined(__clang__) && !defined(_X86_) && !defined(_AMD64_) && !defined(_ARM_) && !defined(_ARM64_)
+#error The Volatile type is currently only defined for Clang when targeting x86, AMD64, ARM or ARM64 CPUs
+#endif
+
+#if defined(__clang__)
+#if defined(_ARM_) || defined(_ARM64_)
+// This is functionally equivalent to the MemoryBarrier() macro used on ARM on Windows.
+#define VOLATILE_MEMORY_BARRIER() asm volatile ("dmb sy" : : : "memory")
+#else
+//
+// For Clang, we prevent reordering by the compiler by inserting the following after a volatile
+// load (to prevent subsequent operations from moving before the read), and before a volatile
+// write (to prevent prior operations from moving past the write). We don't need to do anything
+// special to prevent CPU reorderings, because the x86 and AMD64 architectures are already
+// sufficiently constrained for our purposes. If we ever need to run on weaker CPU architectures
+// (such as PowerPC), then we will need to do more work.
+//
+// Please do not use this macro outside of this file. It is subject to change or removal without
+// notice.
+//
+#define VOLATILE_MEMORY_BARRIER() asm volatile ("" : : : "memory")
+#endif // !_ARM_
+#elif defined(_ARM_) && _ISO_VOLATILE
+// ARM has a very weak memory model and very few tools to control that model. We're forced to perform a full
+// memory barrier to preserve the volatile semantics. Technically this is only necessary on MP systems but we
+// currently don't have a cheap way to determine the number of CPUs from this header file. Revisit this if it
+// turns out to be a performance issue for the uni-proc case.
+#define VOLATILE_MEMORY_BARRIER() MemoryBarrier()
+#else
+//
+// On VC++, reorderings at the compiler and machine level are prevented by the use of the
+// "volatile" keyword in VolatileLoad and VolatileStore. This should work on any CPU architecture
+// targeted by VC++ with /iso_volatile-.
+//
#define VOLATILE_MEMORY_BARRIER()
+#endif
//
// VolatileLoad loads a T from a pointer to T. It is guaranteed that this load will not be optimized
@@ -539,11 +428,6 @@ void VolatileStore(T* pt, T val)
}
extern GCSystemInfo g_SystemInfo;
-void InitializeSystemInfo();
-
-void
-GetProcessMemoryLoad(
- GCMemoryStatus* lpBuffer);
extern MethodTable * g_pFreeObjectMethodTable;
@@ -552,43 +436,6 @@ extern int32_t g_TrapReturningThreads;
extern bool g_fFinalizerRunOnShutDown;
//
-// Memory allocation
-//
-#define MEM_COMMIT 0x1000
-#define MEM_RESERVE 0x2000
-#define MEM_DECOMMIT 0x4000
-#define MEM_RELEASE 0x8000
-#define MEM_RESET 0x80000
-
-#define PAGE_NOACCESS 0x01
-#define PAGE_READWRITE 0x04
-
-void * ClrVirtualAlloc(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect);
-
-void * ClrVirtualAllocAligned(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect,
- size_t dwAlignment);
-
-bool ClrVirtualFree(
- void * lpAddress,
- size_t dwSize,
- uint32_t dwFreeType);
-
-bool
-ClrVirtualProtect(
- void * lpAddress,
- size_t dwSize,
- uint32_t flNewProtect,
- uint32_t * lpflOldProtect);
-
-//
// Locks
//
@@ -597,71 +444,8 @@ class Thread;
Thread * GetThread();
-struct ScanContext;
-typedef void promote_func(PTR_PTR_Object, ScanContext*, uint32_t);
-
typedef void (CALLBACK *HANDLESCANPROC)(PTR_UNCHECKED_OBJECTREF pref, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2);
-typedef void enum_alloc_context_func(alloc_context*, void*);
-
-class GCToEEInterface
-{
-public:
- //
- // Suspend/Resume callbacks
- //
- typedef enum
- {
- SUSPEND_FOR_GC,
- SUSPEND_FOR_GC_PREP
- } SUSPEND_REASON;
-
- static void SuspendEE(SUSPEND_REASON reason);
- static void RestartEE(bool bFinishedGC); //resume threads.
-
- //
- // The stack roots enumeration callback
- //
- static void GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc);
-
- //
- // Callbacks issues during GC that the execution engine can do its own bookeeping
- //
-
- // start of GC call back - single threaded
- static void GcStartWork(int condemned, int max_gen);
-
- //EE can perform post stack scanning action, while the
- // user threads are still suspended
- static void AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc);
-
- // Called before BGC starts sweeping, the heap is walkable
- static void GcBeforeBGCSweepWork();
-
- // post-gc callback.
- static void GcDone(int condemned);
-
- // Promote refcounted handle callback
- static bool RefCountedHandleCallbacks(Object * pObject);
-
- // Sync block cache management
- static void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2);
- static void SyncBlockCacheDemote(int max_gen);
- static void SyncBlockCachePromotionsGranted(int max_gen);
-
- // Thread functions
- static bool IsPreemptiveGCDisabled(Thread * pThread);
- static void EnablePreemptiveGC(Thread * pThread);
- static void DisablePreemptiveGC(Thread * pThread);
- static void SetGCSpecial(Thread * pThread);
- static bool CatchAtSafePoint(Thread * pThread);
- static alloc_context * GetAllocContext(Thread * pThread);
-
- // ThreadStore functions
- static void AttachCurrentThread(); // does not acquire thread store lock
- static void GcEnumAllocContexts (enum_alloc_context_func* fn, void* param);
-};
-
class FinalizerThread
{
public:
@@ -678,9 +462,20 @@ public:
static HANDLE GetFinalizerEvent();
};
+#ifdef FEATURE_REDHAWK
typedef uint32_t (__stdcall *BackgroundCallback)(void* pCallbackContext);
REDHAWK_PALIMPORT bool REDHAWK_PALAPI PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext);
+enum PalCapability
+{
+ WriteWatchCapability = 0x00000001, // GetWriteWatch() and friends
+ LowMemoryNotificationCapability = 0x00000002, // CreateMemoryResourceNotification() and friends
+ GetCurrentProcessorNumberCapability = 0x00000004, // GetCurrentProcessorNumber()
+};
+
+REDHAWK_PALIMPORT bool REDHAWK_PALAPI PalHasCapability(PalCapability capability);
+#endif // FEATURE_REDHAWK
+
void DestroyThread(Thread * pThread);
bool IsGCSpecialThread();
@@ -692,12 +487,6 @@ inline bool dbgOnly_IsSpecialEEThread()
#define ClrFlsSetThreadType(type)
-void UnsafeInitializeCriticalSection(CRITICAL_SECTION * lpCriticalSection);
-void UnsafeEEEnterCriticalSection(CRITICAL_SECTION *lpCriticalSection);
-void UnsafeEELeaveCriticalSection(CRITICAL_SECTION * lpCriticalSection);
-void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection);
-
-
//
// Performance logging
//
@@ -763,29 +552,10 @@ VOID LogSpewAlways(const char *fmt, ...);
#define STRESS_LOG_RESERVE_MEM(numChunks) do {} while (0)
#define STRESS_LOG_GC_STACK
-typedef void* CLR_MUTEX_ATTRIBUTES;
-typedef void* CLR_MUTEX_COOKIE;
-
-CLR_MUTEX_COOKIE ClrCreateMutex(CLR_MUTEX_ATTRIBUTES lpMutexAttributes, bool bInitialOwner, LPCWSTR lpName);
-void ClrCloseMutex(CLR_MUTEX_COOKIE mutex);
-bool ClrReleaseMutex(CLR_MUTEX_COOKIE mutex);
-uint32_t ClrWaitForMutex(CLR_MUTEX_COOKIE mutex, uint32_t dwMilliseconds, bool bAlertable);
-
-REDHAWK_PALIMPORT HANDLE REDHAWK_PALAPI PalCreateFileW(_In_z_ LPCWSTR pFileName, uint32_t desiredAccess, uint32_t shareMode, _In_opt_ void* pSecurityAttributes, uint32_t creationDisposition, uint32_t flagsAndAttributes, HANDLE hTemplateFile);
-
#define DEFAULT_GC_PRN_LVL 3
// -----------------------------------------------------------------------------------------------------------
-enum PalCapability
-{
- WriteWatchCapability = 0x00000001, // GetWriteWatch() and friends
- LowMemoryNotificationCapability = 0x00000002, // CreateMemoryResourceNotification() and friends
- GetCurrentProcessorNumberCapability = 0x00000004, // GetCurrentProcessorNumber()
-};
-
-REDHAWK_PALIMPORT bool REDHAWK_PALAPI PalHasCapability(PalCapability capability);
-
void StompWriteBarrierEphemeral();
void StompWriteBarrierResize(bool bReqUpperBoundsCheck);
@@ -862,8 +632,8 @@ namespace GCStressPolicy
static volatile int32_t s_cGcStressDisables;
inline bool IsEnabled() { return s_cGcStressDisables == 0; }
- inline void GlobalDisable() { FastInterlockIncrement(&s_cGcStressDisables); }
- inline void GlobalEnable() { FastInterlockDecrement(&s_cGcStressDisables); }
+ inline void GlobalDisable() { Interlocked::Increment(&s_cGcStressDisables); }
+ inline void GlobalEnable() { Interlocked::Decrement(&s_cGcStressDisables); }
}
enum gcs_trigger_points
diff --git a/src/Native/gc/env/gcenv.ee.h b/src/Native/gc/env/gcenv.ee.h
new file mode 100644
index 000000000..741337fbb
--- /dev/null
+++ b/src/Native/gc/env/gcenv.ee.h
@@ -0,0 +1,85 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// Interface between the GC and EE
+//
+
+#ifndef __GCENV_EE_H__
+#define __GCENV_EE_H__
+
+struct ScanContext;
+class CrawlFrame;
+
+typedef void promote_func(PTR_PTR_Object, ScanContext*, uint32_t);
+
+typedef void enum_alloc_context_func(alloc_context*, void*);
+
+typedef struct
+{
+ promote_func* f;
+ ScanContext* sc;
+ CrawlFrame * cf;
+} GCCONTEXT;
+
+
+class GCToEEInterface
+{
+public:
+ //
+ // Suspend/Resume callbacks
+ //
+ typedef enum
+ {
+ SUSPEND_FOR_GC = 1,
+ SUSPEND_FOR_GC_PREP = 6
+ } SUSPEND_REASON;
+
+ static void SuspendEE(SUSPEND_REASON reason);
+ static void RestartEE(bool bFinishedGC); //resume threads.
+
+ //
+ // The GC roots enumeration callback
+ //
+ static void GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc);
+
+ //
+ // Callbacks issues during GC that the execution engine can do its own bookeeping
+ //
+
+ // start of GC call back - single threaded
+ static void GcStartWork(int condemned, int max_gen);
+
+ //EE can perform post stack scanning action, while the
+ // user threads are still suspended
+ static void AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc);
+
+ // Called before BGC starts sweeping, the heap is walkable
+ static void GcBeforeBGCSweepWork();
+
+ // post-gc callback.
+ static void GcDone(int condemned);
+
+ // Promote refcounted handle callback
+ static bool RefCountedHandleCallbacks(Object * pObject);
+
+ // Sync block cache management
+ static void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2);
+ static void SyncBlockCacheDemote(int max_gen);
+ static void SyncBlockCachePromotionsGranted(int max_gen);
+
+ // Thread functions
+ static bool IsPreemptiveGCDisabled(Thread * pThread);
+ static void EnablePreemptiveGC(Thread * pThread);
+ static void DisablePreemptiveGC(Thread * pThread);
+
+ static void SetGCSpecial(Thread * pThread);
+ static alloc_context * GetAllocContext(Thread * pThread);
+ static bool CatchAtSafePoint(Thread * pThread);
+
+ static void GcEnumAllocContexts(enum_alloc_context_func* fn, void* param);
+
+ static void AttachCurrentThread(); // does not acquire thread store lock
+};
+
+#endif // __GCENV_EE_H__
diff --git a/src/Native/gc/env/gcenv.interlocked.h b/src/Native/gc/env/gcenv.interlocked.h
new file mode 100644
index 000000000..1d6cc8424
--- /dev/null
+++ b/src/Native/gc/env/gcenv.interlocked.h
@@ -0,0 +1,102 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// Interlocked operations
+//
+
+#ifndef __GCENV_INTERLOCKED_H__
+#define __GCENV_INTERLOCKED_H__
+
+// Interlocked operations
+class Interlocked
+{
+public:
+
+ // Increment the value of the specified 32-bit variable as an atomic operation.
+ // Parameters:
+ // addend - variable to be incremented
+ // Return:
+ // The resulting incremented value
+ template<typename T>
+ static T Increment(T volatile *addend);
+
+ // Decrement the value of the specified 32-bit variable as an atomic operation.
+ // Parameters:
+ // addend - variable to be decremented
+ // Return:
+ // The resulting decremented value
+ template<typename T>
+ static T Decrement(T volatile *addend);
+
+ // Perform an atomic AND operation on the specified values values
+ // Parameters:
+ // destination - the first operand and the destination
+ // value - second operand
+ template<typename T>
+ static void And(T volatile *destination, T value);
+
+ // Perform an atomic OR operation on the specified values values
+ // Parameters:
+ // destination - the first operand and the destination
+ // value - second operand
+ template<typename T>
+ static void Or(T volatile *destination, T value);
+
+ // Set a 32-bit variable to the specified value as an atomic operation.
+ // Parameters:
+ // destination - value to be exchanged
+ // value - value to set the destination to
+ // Return:
+ // The previous value of the destination
+ template<typename T>
+ static T Exchange(T volatile *destination, T value);
+
+ // Set a pointer variable to the specified value as an atomic operation.
+ // Parameters:
+ // destination - value to be exchanged
+ // value - value to set the destination to
+ // Return:
+ // The previous value of the destination
+ template <typename T>
+ static T ExchangePointer(T volatile * destination, T value);
+
+ template <typename T>
+ static T ExchangePointer(T volatile * destination, std::nullptr_t value);
+
+ // Perform an atomic addition of two 32-bit values and return the original value of the addend.
+ // Parameters:
+ // addend - variable to be added to
+ // value - value to add
+ // Return:
+ // The previous value of the addend
+ template<typename T>
+ static T ExchangeAdd(T volatile *addend, T value);
+
+ // Performs an atomic compare-and-exchange operation on the specified values.
+ // Parameters:
+ // destination - value to be exchanged
+ // exchange - value to set the destination to
+ // comparand - value to compare the destination to before setting it to the exchange.
+ // The destination is set only if the destination is equal to the comparand.
+ // Return:
+ // The original value of the destination
+ template<typename T>
+ static T CompareExchange(T volatile *destination, T exchange, T comparand);
+
+ // Performs an atomic compare-and-exchange operation on the specified pointers.
+ // Parameters:
+ // destination - value to be exchanged
+ // exchange - value to set the destination to
+ // comparand - value to compare the destination to before setting it to the exchange.
+ // The destination is set only if the destination is equal to the comparand.
+ // Return:
+ // The original value of the destination
+ template <typename T>
+ static T CompareExchangePointer(T volatile *destination, T exchange, T comparand);
+
+ template <typename T>
+ static T CompareExchangePointer(T volatile *destination, T exchange, std::nullptr_t comparand);
+};
+
+#endif // __GCENV_INTERLOCKED_H__
diff --git a/src/Native/gc/env/gcenv.interlocked.inl b/src/Native/gc/env/gcenv.interlocked.inl
new file mode 100644
index 000000000..62e171cad
--- /dev/null
+++ b/src/Native/gc/env/gcenv.interlocked.inl
@@ -0,0 +1,200 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// __forceinline implementation of the Interlocked class methods
+//
+
+#ifndef __GCENV_INTERLOCKED_INL__
+#define __GCENV_INTERLOCKED_INL__
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif // _MSC_VER
+
+// Increment the value of the specified 32-bit variable as an atomic operation.
+// Parameters:
+// addend - variable to be incremented
+// Return:
+// The resulting incremented value
+template <typename T>
+__forceinline T Interlocked::Increment(T volatile *addend)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedIncrement((long*)addend);
+#else
+ return __sync_add_and_fetch(addend, 1);
+#endif
+}
+
+// Decrement the value of the specified 32-bit variable as an atomic operation.
+// Parameters:
+// addend - variable to be decremented
+// Return:
+// The resulting decremented value
+template <typename T>
+__forceinline T Interlocked::Decrement(T volatile *addend)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedDecrement((long*)addend);
+#else
+ return __sync_sub_and_fetch(addend, 1);
+#endif
+}
+
+// Set a 32-bit variable to the specified value as an atomic operation.
+// Parameters:
+// destination - value to be exchanged
+// value - value to set the destination to
+// Return:
+// The previous value of the destination
+template <typename T>
+__forceinline T Interlocked::Exchange(T volatile *destination, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedExchange((long*)destination, value);
+#else
+ return __sync_swap(destination, value);
+#endif
+}
+
+// Performs an atomic compare-and-exchange operation on the specified values.
+// Parameters:
+// destination - value to be exchanged
+// exchange - value to set the destinaton to
+// comparand - value to compare the destination to before setting it to the exchange.
+// The destination is set only if the destination is equal to the comparand.
+// Return:
+// The original value of the destination
+template <typename T>
+__forceinline T Interlocked::CompareExchange(T volatile *destination, T exchange, T comparand)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedCompareExchange((long*)destination, exchange, comparand);
+#else
+ return __sync_val_compare_and_swap(destination, comparand, exchange);
+#endif
+}
+
+// Perform an atomic addition of two 32-bit values and return the original value of the addend.
+// Parameters:
+// addend - variable to be added to
+// value - value to add
+// Return:
+// The previous value of the addend
+template <typename T>
+__forceinline T Interlocked::ExchangeAdd(T volatile *addend, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ return _InterlockedExchangeAdd((long*)addend, value);
+#else
+ return __sync_fetch_and_add(addend, value);
+#endif
+}
+
+// Perform an atomic AND operation on the specified values values
+// Parameters:
+// destination - the first operand and the destination
+// value - second operand
+template <typename T>
+__forceinline void Interlocked::And(T volatile *destination, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ _InterlockedAnd((long*)destination, value);
+#else
+ __sync_and_and_fetch(destination, value);
+#endif
+}
+
+// Perform an atomic OR operation on the specified values values
+// Parameters:
+// destination - the first operand and the destination
+// value - second operand
+template <typename T>
+__forceinline void Interlocked::Or(T volatile *destination, T value)
+{
+#ifdef _MSC_VER
+ static_assert(sizeof(long) == sizeof(T), "Size of long must be the same as size of T");
+ _InterlockedOr((long*)destination, value);
+#else
+ __sync_or_and_fetch(destination, value);
+#endif
+}
+
+// Set a pointer variable to the specified value as an atomic operation.
+// Parameters:
+// destination - value to be exchanged
+// value - value to set the destination to
+// Return:
+// The previous value of the destination
+template <typename T>
+__forceinline T Interlocked::ExchangePointer(T volatile * destination, T value)
+{
+#ifdef _MSC_VER
+#ifdef BIT64
+ return (T)(TADDR)_InterlockedExchangePointer((void* volatile *)destination, value);
+#else
+ return (T)(TADDR)_InterlockedExchange((long volatile *)(void* volatile *)destination, (long)(void*)value);
+#endif
+#else
+ return (T)(TADDR)__sync_swap((void* volatile *)destination, value);
+#endif
+}
+
+template <typename T>
+__forceinline T Interlocked::ExchangePointer(T volatile * destination, std::nullptr_t value)
+{
+#ifdef _MSC_VER
+#ifdef BIT64
+ return (T)(TADDR)_InterlockedExchangePointer((void* volatile *)destination, value);
+#else
+ return (T)(TADDR)_InterlockedExchange((long volatile *)(void* volatile *)destination, (long)(void*)value);
+#endif
+#else
+ return (T)(TADDR)__sync_swap((void* volatile *)destination, value);
+#endif
+}
+
+// Performs an atomic compare-and-exchange operation on the specified pointers.
+// Parameters:
+// destination - value to be exchanged
+// exchange - value to set the destinaton to
+// comparand - value to compare the destination to before setting it to the exchange.
+// The destination is set only if the destination is equal to the comparand.
+// Return:
+// The original value of the destination
+template <typename T>
+__forceinline T Interlocked::CompareExchangePointer(T volatile *destination, T exchange, T comparand)
+{
+#ifdef _MSC_VER
+#ifdef BIT64
+ return (T)(TADDR)_InterlockedCompareExchangePointer((void* volatile *)destination, exchange, comparand);
+#else
+ return (T)(TADDR)_InterlockedCompareExchange((long volatile *)(void* volatile *)destination, (long)(void*)exchange, (long)(void*)comparand);
+#endif
+#else
+ return (T)(TADDR)__sync_val_compare_and_swap((void* volatile *)destination, comparand, exchange);
+#endif
+}
+
+template <typename T>
+__forceinline T Interlocked::CompareExchangePointer(T volatile *destination, T exchange, std::nullptr_t comparand)
+{
+#ifdef _MSC_VER
+#ifdef BIT64
+ return (T)(TADDR)_InterlockedCompareExchangePointer((void* volatile *)destination, exchange, comparand);
+#else
+ return (T)(TADDR)_InterlockedCompareExchange((long volatile *)(void* volatile *)destination, (long)(void*)exchange, (long)(void*)comparand);
+#endif
+#else
+ return (T)(TADDR)__sync_val_compare_and_swap((void* volatile *)destination, comparand, exchange);
+#endif
+}
+
+#endif // __GCENV_INTERLOCKED_INL__
diff --git a/src/Native/gc/env/gcenv.object.h b/src/Native/gc/env/gcenv.object.h
index 31dfe838d..d3660173c 100644
--- a/src/Native/gc/env/gcenv.object.h
+++ b/src/Native/gc/env/gcenv.object.h
@@ -26,8 +26,8 @@ private:
public:
uint32_t GetBits() { return m_uSyncBlockValue; }
- void SetBit(uint32_t uBit) { FastInterlockOr(&m_uSyncBlockValue, uBit); }
- void ClrBit(uint32_t uBit) { FastInterlockAnd(&m_uSyncBlockValue, ~uBit); }
+ void SetBit(uint32_t uBit) { Interlocked::Or(&m_uSyncBlockValue, uBit); }
+ void ClrBit(uint32_t uBit) { Interlocked::And(&m_uSyncBlockValue, ~uBit); }
void SetGCBit() { m_uSyncBlockValue |= BIT_SBLK_GC_RESERVE; }
void ClrGCBit() { m_uSyncBlockValue &= ~BIT_SBLK_GC_RESERVE; }
};
diff --git a/src/Native/gc/env/gcenv.os.h b/src/Native/gc/env/gcenv.os.h
new file mode 100644
index 000000000..8dda900dc
--- /dev/null
+++ b/src/Native/gc/env/gcenv.os.h
@@ -0,0 +1,274 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// Interface between GC and the OS specific functionality
+//
+
+#ifndef __GCENV_OS_H__
+#define __GCENV_OS_H__
+
+// Critical section used by the GC
+class CLRCriticalSection
+{
+ CRITICAL_SECTION m_cs;
+
+public:
+ // Initialize the critical section
+ void Initialize();
+
+ // Destroy the critical section
+ void Destroy();
+
+ // Enter the critical section. Blocks until the section can be entered.
+ void Enter();
+
+ // Leave the critical section
+ void Leave();
+};
+
+// Flags for the GCToOSInterface::VirtualReserve method
+struct VirtualReserveFlags
+{
+ enum
+ {
+ None = 0,
+ WriteWatch = 1,
+ };
+};
+
+// Affinity of a GC thread
+struct GCThreadAffinity
+{
+ static const int None = -1;
+
+ // Processor group index, None if no group is specified
+ int Group;
+ // Processor index, None if no affinity is specified
+ int Processor;
+};
+
+// GC thread function prototype
+typedef void (*GCThreadFunction)(void* param);
+
+// Interface that the GC uses to invoke OS specific functionality
+class GCToOSInterface
+{
+public:
+
+ //
+ // Initialization and shutdown of the interface
+ //
+
+ // Initialize the interface implementation
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool Initialize();
+
+ // Shutdown the interface implementation
+ static void Shutdown();
+
+ //
+ // Virtual memory management
+ //
+
+ // Reserve virtual memory range.
+ // Parameters:
+ // address - starting virtual address, it can be NULL to let the function choose the starting address
+ // size - size of the virtual memory range
+ // alignment - requested memory alignment
+ // flags - flags to control special settings like write watching
+ // Return:
+ // Starting virtual address of the reserved range
+ static void* VirtualReserve(void *address, size_t size, size_t alignment, uint32_t flags);
+
+ // Release virtual memory range previously reserved using VirtualReserve
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualRelease(void *address, size_t size);
+
+ // Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualCommit(void *address, size_t size);
+
+ // Decomit virtual memory range.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualDecommit(void *address, size_t size);
+
+ // Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
+ // longer of interest, but it should not be decommitted.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // unlock - true if the memory range should also be unlocked
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool VirtualReset(void *address, size_t size, bool unlock);
+
+ //
+ // Write watching
+ //
+
+ // Check if the OS supports write watching
+ static bool SupportsWriteWatch();
+
+ // Reset the write tracking state for the specified virtual memory range.
+ // Parameters:
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ static void ResetWriteWatch(void *address, size_t size);
+
+ // Retrieve addresses of the pages that are written to in a region of virtual memory
+ // Parameters:
+ // resetState - true indicates to reset the write tracking state
+ // address - starting virtual address
+ // size - size of the virtual memory range
+ // pageAddresses - buffer that receives an array of page addresses in the memory region
+ // pageAddressesCount - on input, size of the lpAddresses array, in array elements
+ // on output, the number of page addresses that are returned in the array.
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount);
+
+ //
+ // Thread and process
+ //
+
+ // Create a new thread
+ // Parameters:
+ // function - the function to be executed by the thread
+ // param - parameters of the thread
+ // affinity - processor affinity of the thread
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity);
+
+ // Causes the calling thread to sleep for the specified number of milliseconds
+ // Parameters:
+ // sleepMSec - time to sleep before switching to another thread
+ static void Sleep(uint32_t sleepMSec);
+
+ // Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
+ // Parameters:
+ // switchCount - number of times the YieldThread was called in a loop
+ static void YieldThread(uint32_t switchCount);
+
+ // Get the number of the current processor
+ static uint32_t GetCurrentProcessorNumber();
+
+ // Check if the OS supports getting current processor number
+ static bool CanGetCurrentProcessorNumber();
+
+ // Set ideal processor for the current thread
+ // Parameters:
+ // processorIndex - index of the processor in the group
+ // affinity - ideal processor affinity for the thread
+ // Return:
+ // true if it has succeeded, false if it has failed
+ static bool SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity);
+
+ // Get numeric id of the current thread if possible on the
+ // current platform. It is indended for logging purposes only.
+ // Return:
+ // Numeric id of the current thread or 0 if the
+ static uint32_t GetCurrentThreadIdForLogging();
+
+ // Get id of the current process
+ // Return:
+ // Id of the current process
+ static uint32_t GetCurrentProcessId();
+
+ //
+ // Processor topology
+ //
+
+ // Get number of logical processors
+ static uint32_t GetLogicalCpuCount();
+
+ // Get size of the largest cache on the processor die
+ // Parameters:
+ // trueSize - true to return true cache size, false to return scaled up size based on
+ // the processor architecture
+ // Return:
+ // Size of the cache
+ static size_t GetLargestOnDieCacheSize(bool trueSize = true);
+
+ // Get number of processors assigned to the current process
+ // Return:
+ // The number of processors
+ static uint32_t GetCurrentProcessCpuCount();
+
+ // Get affinity mask of the current process
+ // Parameters:
+ // processMask - affinity mask for the specified process
+ // systemMask - affinity mask for the system
+ // Return:
+ // true if it has succeeded, false if it has failed
+ // Remarks:
+ // A process affinity mask is a bit vector in which each bit represents the processors that
+ // a process is allowed to run on. A system affinity mask is a bit vector in which each bit
+ // represents the processors that are configured into a system.
+ // A process affinity mask is a subset of the system affinity mask. A process is only allowed
+ // to run on the processors configured into a system. Therefore, the process affinity mask cannot
+ // specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor.
+ static bool GetCurrentProcessAffinityMask(uintptr_t *processMask, uintptr_t *systemMask);
+
+ //
+ // Misc
+ //
+
+ // Get global memory status
+ // Parameters:
+ // ms - pointer to the structure that will be filled in with the memory status
+ static void GetMemoryStatus(GCMemoryStatus* ms);
+
+ // Flush write buffers of processors that are executing threads of the current process
+ static void FlushProcessWriteBuffers();
+
+ // Break into a debugger
+ static void DebugBreak();
+
+ //
+ // Time
+ //
+
+ // Get a high precision performance counter
+ // Return:
+ // The counter value
+ static int64_t QueryPerformanceCounter();
+
+ // Get a frequency of the high precision performance counter
+ // Return:
+ // The counter frequency
+ static int64_t QueryPerformanceFrequency();
+
+ // Get a time stamp with a low precision
+ // Return:
+ // Time stamp in milliseconds
+ static uint32_t GetLowPrecisionTimeStamp();
+
+ //
+ // File
+ //
+
+ // Open a file
+ // Parameters:
+ // filename - name of the file to open
+ // mode - mode to open the file in (like in the CRT fopen)
+ // Return:
+ // FILE* of the opened file
+ static FILE* OpenFile(const WCHAR* filename, const WCHAR* mode);
+};
+
+#endif // __GCENV_OS_H__
diff --git a/src/Native/gc/env/gcenv.structs.h b/src/Native/gc/env/gcenv.structs.h
index e3bfb17f5..cce073cdb 100644
--- a/src/Native/gc/env/gcenv.structs.h
+++ b/src/Native/gc/env/gcenv.structs.h
@@ -31,6 +31,62 @@ struct GCMemoryStatus
typedef void * HANDLE;
+#ifdef PLATFORM_UNIX
+
+class EEThreadId
+{
+ pthread_t m_id;
+ // Indicates whether the m_id is valid or not. pthread_t doesn't have any
+ // portable "invalid" value.
+ bool m_isValid;
+
+public:
+ bool IsCurrentThread()
+ {
+ return m_isValid && pthread_equal(m_id, pthread_self());
+ }
+
+ void SetToCurrentThread()
+ {
+ m_id = pthread_self();
+ m_isValid = true;
+ }
+
+ void Clear()
+ {
+ m_isValid = false;
+ }
+};
+
+#else // PLATFORM_UNIX
+
+#ifndef _INC_WINDOWS
+extern "C" uint32_t __stdcall GetCurrentThreadId();
+#endif
+
+class EEThreadId
+{
+ uint32_t m_uiId;
+public:
+
+ bool IsCurrentThread()
+ {
+ return m_uiId == ::GetCurrentThreadId();
+ }
+
+ void SetToCurrentThread()
+ {
+ m_uiId = ::GetCurrentThreadId();
+ }
+
+ void Clear()
+ {
+ m_uiId = 0;
+ }
+};
+
+#endif // PLATFORM_UNIX
+
#ifndef _INC_WINDOWS
typedef union _LARGE_INTEGER {
@@ -46,7 +102,13 @@ typedef union _LARGE_INTEGER {
int64_t QuadPart;
} LARGE_INTEGER, *PLARGE_INTEGER;
-#ifdef WIN32
+#ifdef PLATFORM_UNIX
+
+typedef struct _RTL_CRITICAL_SECTION {
+ pthread_mutex_t mutex;
+} CRITICAL_SECTION, RTL_CRITICAL_SECTION, *PRTL_CRITICAL_SECTION;
+
+#else
#pragma pack(push, 8)
@@ -67,12 +129,6 @@ typedef struct _RTL_CRITICAL_SECTION {
#pragma pack(pop)
-#else
-
-typedef struct _RTL_CRITICAL_SECTION {
- pthread_mutex_t mutex;
-} CRITICAL_SECTION, RTL_CRITICAL_SECTION, *PRTL_CRITICAL_SECTION;
-
#endif
#endif // _INC_WINDOWS
diff --git a/src/Native/gc/env/gcenv.sync.h b/src/Native/gc/env/gcenv.sync.h
index c3aea23fd..fe619cc69 100644
--- a/src/Native/gc/env/gcenv.sync.h
+++ b/src/Native/gc/env/gcenv.sync.h
@@ -7,19 +7,6 @@
//
// Helper classes expected by the GC
//
-class EEThreadId
-{
-public:
- EEThreadId(uint32_t uiId) : m_uiId(uiId) {}
- bool IsSameThread()
- {
- return m_uiId == GetCurrentThreadId();
- }
-
-private:
- uint32_t m_uiId;
-};
-
#define CRST_REENTRANCY 0
#define CRST_UNSAFE_SAMELEVEL 0
#define CRST_UNSAFE_ANYMODE 0
@@ -33,37 +20,37 @@ typedef int CrstType;
class CrstStatic
{
- CRITICAL_SECTION m_cs;
+ CLRCriticalSection m_cs;
#ifdef _DEBUG
- uint32_t m_holderThreadId;
+ EEThreadId m_holderThreadId;
#endif
public:
bool InitNoThrow(CrstType eType, CrstFlags eFlags = CRST_DEFAULT)
{
- UnsafeInitializeCriticalSection(&m_cs);
+ m_cs.Initialize();
return true;
}
void Destroy()
{
- UnsafeDeleteCriticalSection(&m_cs);
+ m_cs.Destroy();
}
void Enter()
{
- UnsafeEEEnterCriticalSection(&m_cs);
+ m_cs.Enter();
#ifdef _DEBUG
- m_holderThreadId = GetCurrentThreadId();
+ m_holderThreadId.SetToCurrentThread();
#endif
}
void Leave()
{
#ifdef _DEBUG
- m_holderThreadId = 0;
+ m_holderThreadId.Clear();
#endif
- UnsafeEELeaveCriticalSection(&m_cs);
+ m_cs.Leave();
}
#ifdef _DEBUG
@@ -74,7 +61,7 @@ public:
bool OwnedByCurrentThread()
{
- return GetHolderThreadId().IsSameThread();
+ return GetHolderThreadId().IsCurrentThread();
}
#endif
};
diff --git a/src/Native/gc/env/gcenv.windows.cpp b/src/Native/gc/env/gcenv.windows.cpp
deleted file mode 100644
index 806fbc8e6..000000000
--- a/src/Native/gc/env/gcenv.windows.cpp
+++ /dev/null
@@ -1,227 +0,0 @@
-//
-// Copyright (c) Microsoft. All rights reserved.
-// Licensed under the MIT license. See LICENSE file in the project root for full license information.
-//
-
-//
-// Implementation of the GC environment
-//
-#include "common.h"
-
-#include "windows.h"
-
-#include "gcenv.h"
-#include "gc.h"
-
-#ifdef _X86_
-EXTERN_C long _InterlockedOr(long volatile *, long);
-#pragma intrinsic (_InterlockedOr)
-#define InterlockedOr _InterlockedOr
-
-EXTERN_C long _InterlockedAnd(long volatile *, long);
-#pragma intrinsic(_InterlockedAnd)
-#define InterlockedAnd _InterlockedAnd
-#endif // _X86_
-
-int32_t FastInterlockIncrement(int32_t volatile *lpAddend)
-{
- return InterlockedIncrement((LONG *)lpAddend);
-}
-
-int32_t FastInterlockDecrement(int32_t volatile *lpAddend)
-{
- return InterlockedDecrement((LONG *)lpAddend);
-}
-
-int32_t FastInterlockExchange(int32_t volatile *Target, int32_t Value)
-{
- return InterlockedExchange((LONG *)Target, Value);
-}
-
-int32_t FastInterlockCompareExchange(int32_t volatile *Destination, int32_t Exchange, int32_t Comperand)
-{
- return InterlockedCompareExchange((LONG *)Destination, Exchange, Comperand);
-}
-
-int32_t FastInterlockExchangeAdd(int32_t volatile *Addend, int32_t Value)
-{
- return InterlockedExchangeAdd((LONG *)Addend, Value);
-}
-
-void * _FastInterlockExchangePointer(void * volatile *Target, void * Value)
-{
- return InterlockedExchangePointer(Target, Value);
-}
-
-void * _FastInterlockCompareExchangePointer(void * volatile *Destination, void * Exchange, void * Comperand)
-{
- return InterlockedCompareExchangePointer(Destination, Exchange, Comperand);
-}
-
-void FastInterlockOr(uint32_t volatile *p, uint32_t msk)
-{
- InterlockedOr((LONG volatile *)p, msk);
-}
-
-void FastInterlockAnd(uint32_t volatile *p, uint32_t msk)
-{
- InterlockedAnd((LONG volatile *)p, msk);
-}
-
-
-void UnsafeInitializeCriticalSection(CRITICAL_SECTION * lpCriticalSection)
-{
- InitializeCriticalSection(lpCriticalSection);
-}
-
-void UnsafeEEEnterCriticalSection(CRITICAL_SECTION *lpCriticalSection)
-{
- EnterCriticalSection(lpCriticalSection);
-}
-
-void UnsafeEELeaveCriticalSection(CRITICAL_SECTION * lpCriticalSection)
-{
- LeaveCriticalSection(lpCriticalSection);
-}
-
-void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection)
-{
- DeleteCriticalSection(lpCriticalSection);
-}
-
-
-void GetProcessMemoryLoad(GCMemoryStatus* pGCMemStatus)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
- MEMORYSTATUSEX memStatus;
-
- memStatus.dwLength = sizeof(MEMORYSTATUSEX);
- BOOL fRet = GlobalMemoryStatusEx(&memStatus);
- _ASSERTE (fRet);
-
- // If the machine has more RAM than virtual address limit, let us cap it.
- // Our GC can never use more than virtual address limit.
- if (memStatus.ullAvailPhys > memStatus.ullTotalVirtual)
- {
- memStatus.ullAvailPhys = memStatus.ullAvailVirtual;
- }
-
- // Convert Windows struct to abstract struct
- pGCMemStatus->dwMemoryLoad = memStatus.dwMemoryLoad ;
- pGCMemStatus->ullTotalPhys = memStatus.ullTotalPhys ;
- pGCMemStatus->ullAvailPhys = memStatus.ullAvailPhys ;
- pGCMemStatus->ullTotalPageFile = memStatus.ullTotalPageFile ;
- pGCMemStatus->ullAvailPageFile = memStatus.ullAvailPageFile ;
- pGCMemStatus->ullTotalVirtual = memStatus.ullTotalVirtual ;
- pGCMemStatus->ullAvailVirtual = memStatus.ullAvailVirtual ;
-}
-
-bool __SwitchToThread(uint32_t dwSleepMSec, uint32_t dwSwitchCount)
-{
- SwitchToThread();
- return true;
-}
-
-void * ClrVirtualAlloc(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect)
-{
- return VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
-}
-
-void * ClrVirtualAllocAligned(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect,
- size_t dwAlignment)
-{
- return VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
-}
-
-bool ClrVirtualFree(
- void * lpAddress,
- size_t dwSize,
- uint32_t dwFreeType)
-{
- return !!VirtualFree(lpAddress, dwSize, dwFreeType);
-}
-
-bool
-ClrVirtualProtect(
- void * lpAddress,
- size_t dwSize,
- uint32_t flNewProtect,
- uint32_t * lpflOldProtect)
-{
- return !!VirtualProtect(lpAddress, dwSize, flNewProtect, (DWORD *)lpflOldProtect);
-}
-
-MethodTable * g_pFreeObjectMethodTable;
-
-GCSystemInfo g_SystemInfo;
-
-void InitializeSystemInfo()
-{
- SYSTEM_INFO systemInfo;
- GetSystemInfo(&systemInfo);
-
- g_SystemInfo.dwNumberOfProcessors = systemInfo.dwNumberOfProcessors;
- g_SystemInfo.dwPageSize = systemInfo.dwPageSize;
- g_SystemInfo.dwAllocationGranularity = systemInfo.dwAllocationGranularity;
-}
-
-int32_t g_TrapReturningThreads;
-
-bool g_fFinalizerRunOnShutDown;
-
-void DestroyThread(Thread * pThread)
-{
- // TODO: Implement
-}
-
-bool PalHasCapability(PalCapability capability)
-{
- // TODO: Implement for background GC
- return false;
-}
-
-REDHAWK_PALIMPORT HANDLE REDHAWK_PALAPI PalCreateFileW(_In_z_ LPCWSTR pFileName, uint32_t desiredAccess, uint32_t shareMode, _In_opt_ void* pSecurityAttributes, uint32_t creationDisposition, uint32_t flagsAndAttributes, HANDLE hTemplateFile)
-{
- return INVALID_HANDLE_VALUE;
-}
-
-
-
-CLR_MUTEX_COOKIE ClrCreateMutex(CLR_MUTEX_ATTRIBUTES lpMutexAttributes, bool bInitialOwner, LPCWSTR lpName)
-{
- _ASSERTE(!"ClrCreateMutex");
- return NULL;
-}
-
-void ClrCloseMutex(CLR_MUTEX_COOKIE mutex)
-{
- _ASSERTE(!"ClrCloseMutex");
-}
-
-bool ClrReleaseMutex(CLR_MUTEX_COOKIE mutex)
-{
- _ASSERTE(!"ClrReleaseMutex");
- return true;
-}
-
-uint32_t ClrWaitForMutex(CLR_MUTEX_COOKIE mutex, uint32_t dwMilliseconds, bool bAlertable)
-{
- _ASSERTE(!"ClrWaitForMutex");
- return WAIT_OBJECT_0;
-}
-
-
diff --git a/src/Native/gc/gc.cpp b/src/Native/gc/gc.cpp
index 30642686b..3a822cb6f 100644
--- a/src/Native/gc/gc.cpp
+++ b/src/Native/gc/gc.cpp
@@ -149,6 +149,17 @@ BOOL is_induced_blocking (gc_reason reason)
(reason == reason_induced_compacting));
}
+#ifndef DACCESS_COMPILE
+int64_t qpf;
+
+size_t GetHighPrecisionTimeStamp()
+{
+ int64_t ts = GCToOSInterface::QueryPerformanceCounter();
+
+ return (size_t)(ts / (qpf / 1000));
+}
+#endif
+
#ifdef GC_STATS
// There is a current and a prior copy of the statistics. This allows us to display deltas per reporting
// interval, as well as running totals. The 'min' and 'max' values require special treatment. They are
@@ -296,15 +307,7 @@ uint32_t bgc_alloc_spin = 2;
inline
void c_write (uint32_t& place, uint32_t value)
{
- FastInterlockExchange (&(LONG&)place, value);
- //place = value;
-}
-
-// TODO - can't make it work with the syntax for Volatile<T>
-inline
-void c_write_volatile (BOOL* place, uint32_t value)
-{
- FastInterlockExchange ((LONG*)place, value);
+ Interlocked::Exchange (&place, value);
//place = value;
}
@@ -368,15 +371,15 @@ void gc_heap::add_to_history()
#endif //DACCESS_COMPILE
#endif //BACKGROUND_GC
-#ifdef TRACE_GC
+#if defined(TRACE_GC) && !defined(DACCESS_COMPILE)
BOOL gc_log_on = TRUE;
-HANDLE gc_log = INVALID_HANDLE_VALUE;
+FILE* gc_log = NULL;
size_t gc_log_file_size = 0;
size_t gc_buffer_index = 0;
size_t max_gc_buffers = 0;
-static CLR_MUTEX_COOKIE gc_log_lock = 0;
+static CLRCriticalSection gc_log_lock;
// we keep this much in a buffer and only flush when the buffer is full
#define gc_log_buffer_size (1024*1024)
@@ -385,8 +388,7 @@ size_t gc_log_buffer_offset = 0;
void log_va_msg(const char *fmt, va_list args)
{
- uint32_t status = ClrWaitForMutex(gc_log_lock, INFINITE, FALSE);
- assert (WAIT_OBJECT_0 == status);
+ gc_log_lock.Enter();
const int BUFFERSIZE = 512;
static char rgchBuffer[BUFFERSIZE];
@@ -395,7 +397,7 @@ void log_va_msg(const char *fmt, va_list args)
pBuffer[0] = '\r';
pBuffer[1] = '\n';
int buffer_start = 2;
- int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", GetCurrentThreadId());
+ int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", GCToOSInterface::GetCurrentThreadIdForLogging());
buffer_start += pid_len;
memset(&pBuffer[buffer_start], '-', BUFFERSIZE - buffer_start);
int msg_len = _vsnprintf(&pBuffer[buffer_start], BUFFERSIZE - buffer_start, fmt, args );
@@ -418,12 +420,11 @@ void log_va_msg(const char *fmt, va_list args)
gc_buffer_index++;
if (gc_buffer_index > max_gc_buffers)
{
- SetFilePointer (gc_log, 0, NULL, FILE_BEGIN);
+ fseek (gc_log, 0, SEEK_SET);
gc_buffer_index = 0;
}
- uint32_t written_to_log = 0;
- WriteFile (gc_log, gc_log_buffer, (uint32_t)gc_log_buffer_size, (DWORD*)&written_to_log, NULL);
- FlushFileBuffers (gc_log);
+ fwrite(gc_log_buffer, gc_log_buffer_size, 1, gc_log);
+ fflush(gc_log);
memset (gc_log_buffer, '*', gc_log_buffer_size);
gc_log_buffer_offset = 0;
}
@@ -431,13 +432,12 @@ void log_va_msg(const char *fmt, va_list args)
memcpy (gc_log_buffer + gc_log_buffer_offset, pBuffer, msg_len);
gc_log_buffer_offset += msg_len;
- status = ClrReleaseMutex(gc_log_lock);
- assert (status);
+ gc_log_lock.Leave();
}
void GCLog (const char *fmt, ... )
{
- if (gc_log_on && (gc_log != INVALID_HANDLE_VALUE))
+ if (gc_log_on && (gc_log != NULL))
{
va_list args;
va_start(args, fmt);
@@ -445,11 +445,12 @@ void GCLog (const char *fmt, ... )
va_end(args);
}
}
-#endif //TRACE_GC
+#endif // TRACE_GC && !DACCESS_COMPILE
+
+#if defined(GC_CONFIG_DRIVEN) && !defined(DACCESS_COMPILE)
-#ifdef GC_CONFIG_DRIVEN
BOOL gc_config_log_on = FALSE;
-HANDLE gc_config_log = INVALID_HANDLE_VALUE;
+FILE* gc_config_log = NULL;
// we keep this much in a buffer and only flush when the buffer is full
#define gc_config_log_buffer_size (1*1024) // TEMP
@@ -473,9 +474,8 @@ void log_va_msg_config(const char *fmt, va_list args)
if ((gc_config_log_buffer_offset + msg_len) > gc_config_log_buffer_size)
{
- uint32_t written_to_log = 0;
- WriteFile (gc_config_log, gc_config_log_buffer, (uint32_t)gc_config_log_buffer_offset, (DWORD*)&written_to_log, NULL);
- FlushFileBuffers (gc_config_log);
+ fwrite(gc_config_log_buffer, gc_config_log_buffer_offset, 1, gc_config_log);
+ fflush(gc_config_log);
gc_config_log_buffer_offset = 0;
}
@@ -485,14 +485,14 @@ void log_va_msg_config(const char *fmt, va_list args)
void GCLogConfig (const char *fmt, ... )
{
- if (gc_config_log_on && (gc_config_log != INVALID_HANDLE_VALUE))
+ if (gc_config_log_on && (gc_config_log != NULL))
{
va_list args;
va_start( args, fmt );
log_va_msg_config (fmt, args);
}
}
-#endif //GC_CONFIG_DRIVEN
+#endif // GC_CONFIG_DRIVEN && !DACCESS_COMPILE
#ifdef SYNCHRONIZATION_STATS
@@ -523,7 +523,7 @@ init_sync_log_stats()
gc_during_log = 0;
gc_lock_contended = 0;
- log_start_tick = GetTickCount();
+ log_start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
}
gc_count_during_log++;
#endif //SYNCHRONIZATION_STATS
@@ -534,7 +534,7 @@ process_sync_log_stats()
{
#ifdef SYNCHRONIZATION_STATS
- unsigned int log_elapsed = GetTickCount() - log_start_tick;
+ unsigned int log_elapsed = GCToOSInterface::GetLowPrecisionTimeStamp() - log_start_tick;
if (log_elapsed > log_interval)
{
@@ -700,7 +700,7 @@ public:
flavor = f;
#ifdef JOIN_STATS
- start_tick = GetTickCount();
+ start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
#endif //JOIN_STATS
return TRUE;
@@ -731,7 +731,7 @@ public:
assert (!join_struct.joined_p);
int color = join_struct.lock_color;
- if (FastInterlockDecrement((LONG*)&join_struct.join_lock) != 0)
+ if (Interlocked::Decrement(&join_struct.join_lock) != 0)
{
dprintf (JOIN_LOG, ("join%d(%d): Join() Waiting...join_lock is now %d",
flavor, join_id, (int32_t)(join_struct.join_lock)));
@@ -783,7 +783,7 @@ respin:
fire_event (gch->heap_number, time_end, type_join, join_id);
// last thread out should reset event
- if (FastInterlockDecrement((LONG*)&join_struct.join_restart) == 0)
+ if (Interlocked::Decrement(&join_struct.join_restart) == 0)
{
// the joined event must be set at this point, because the restarting must have done this
join_struct.join_restart = join_struct.n_threads - 1;
@@ -793,7 +793,7 @@ respin:
#ifdef JOIN_STATS
// parallel execution starts here
start[gch->heap_number] = GetCycleCount32();
- FastInterlockExchangeAdd((int*)&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number])/1000);
+ Interlocked::ExchangeAdd(&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number])/1000);
#endif //JOIN_STATS
}
else
@@ -810,7 +810,7 @@ respin:
// and keep track of the cycles spent waiting in the join
thd = gch->heap_number;
start_seq = GetCycleCount32();
- FastInterlockExchangeAdd((int*)&in_join_total[join_id], (start_seq - end[gch->heap_number])/1000);
+ Interlocked::ExchangeAdd(&in_join_total[join_id], (start_seq - end[gch->heap_number])/1000);
#endif //JOIN_STATS
}
}
@@ -831,7 +831,7 @@ respin:
return TRUE;
}
- if (FastInterlockDecrement((LONG*)&join_struct.r_join_lock) != (join_struct.n_threads - 1))
+ if (Interlocked::Decrement(&join_struct.r_join_lock) != (join_struct.n_threads - 1))
{
if (!join_struct.wait_done)
{
@@ -879,7 +879,7 @@ respin:
#ifdef JOIN_STATS
// parallel execution starts here
start[gch->heap_number] = GetCycleCount32();
- FastInterlockExchangeAdd((volatile int *)&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number])/1000);
+ Interlocked::ExchangeAdd(&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number])/1000);
#endif //JOIN_STATS
}
@@ -918,7 +918,7 @@ respin:
par_loss_total[join_id] += par_loss/1000;
// every 10 seconds, print a summary of the time spent in each type of join, in 1000's of clock cycles
- if (GetTickCount() - start_tick > 10*1000)
+ if (GCToOSInterface::GetLowPrecisionTimeStamp() - start_tick > 10*1000)
{
printf("**** summary *****\n");
for (int i = 0; i < 16; i++)
@@ -926,7 +926,7 @@ respin:
printf("join #%3d seq_loss = %8u par_loss = %8u in_join_total = %8u\n", i, seq_loss_total[i], par_loss_total[i], in_join_total[i]);
elapsed_total[i] = seq_loss_total[i] = par_loss_total[i] = in_join_total[i] = 0;
}
- start_tick = GetTickCount();
+ start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
}
#endif //JOIN_STATS
@@ -998,7 +998,7 @@ t_join bgc_t_join;
} \
if (!(expr)) \
{ \
- __SwitchToThread(0, CALLER_LIMITS_SPINNING); \
+ GCToOSInterface::YieldThread(0); \
} \
}
@@ -1051,7 +1051,7 @@ public:
{
if (alloc_objects [i] != (uint8_t*)0)
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
}
}
}
@@ -1060,7 +1060,7 @@ public:
{
dprintf (3, ("cm: probing %Ix", obj));
retry:
- if (FastInterlockExchange ((LONG*)&needs_checking, 1) == 0)
+ if (Interlocked::Exchange (&needs_checking, 1) == 0)
{
// If we spend too much time spending all the allocs,
// consider adding a high water mark and scan up
@@ -1099,7 +1099,7 @@ retry:
retry:
dprintf (3, ("loh alloc: probing %Ix", obj));
- if (FastInterlockExchange ((LONG*)&needs_checking, 1) == 0)
+ if (Interlocked::Exchange (&needs_checking, 1) == 0)
{
if (obj == rwp_object)
{
@@ -1117,7 +1117,7 @@ retry:
needs_checking = 0;
//if (cookie >= 4)
//{
- // DebugBreak();
+ // GCToOSInterface::DebugBreak();
//}
dprintf (3, ("loh alloc: set %Ix at %d", obj, cookie));
@@ -1273,7 +1273,7 @@ void recursive_gc_sync::begin_foreground()
try_again_top:
- FastInterlockIncrement ((LONG*)&foreground_request_count);
+ Interlocked::Increment (&foreground_request_count);
try_again_no_inc:
dprintf(2, ("Waiting sync gc point"));
@@ -1291,7 +1291,7 @@ try_again_no_inc:
if (foreground_gate)
{
- FastInterlockIncrement ((LONG*)&foreground_count);
+ Interlocked::Increment (&foreground_count);
dprintf (2, ("foreground_count: %d", (int32_t)foreground_count));
if (foreground_gate)
{
@@ -1316,11 +1316,11 @@ void recursive_gc_sync::end_foreground()
dprintf (2, ("end_foreground"));
if (gc_background_running)
{
- FastInterlockDecrement ((LONG*)&foreground_request_count);
+ Interlocked::Decrement (&foreground_request_count);
dprintf (2, ("foreground_count before decrement: %d", (int32_t)foreground_count));
- if (FastInterlockDecrement ((LONG*)&foreground_count) == 0)
+ if (Interlocked::Decrement (&foreground_count) == 0)
{
- //c_write_volatile ((BOOL*)&foreground_gate, 0);
+ //c_write ((BOOL*)&foreground_gate, 0);
// TODO - couldn't make the syntax work with Volatile<T>
foreground_gate = 0;
if (foreground_count == 0)
@@ -1350,7 +1350,7 @@ BOOL recursive_gc_sync::allow_foreground()
//background and foreground
// gc_heap::disallow_new_allocation (0);
- //__SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ //GCToOSInterface::YieldThread(0);
//END of TODO
if (foreground_request_count != 0)
@@ -1362,7 +1362,7 @@ BOOL recursive_gc_sync::allow_foreground()
do
{
did_fgc = TRUE;
- //c_write_volatile ((BOOL*)&foreground_gate, 1);
+ //c_write ((BOOL*)&foreground_gate, 1);
// TODO - couldn't make the syntax work with Volatile<T>
foreground_gate = 1;
foreground_allowed.Set ();
@@ -1411,9 +1411,6 @@ __asm pop EDX
#endif //COUNT_CYCLES || JOIN_STATS || SYNCHRONIZATION_STATS
-LARGE_INTEGER qpf;
-
-
#ifdef TIME_GC
int mark_time, plan_time, sweep_time, reloc_time, compact_time;
#endif //TIME_GC
@@ -1438,13 +1435,9 @@ void reset_memory (uint8_t* o, size_t sizeo);
#ifdef WRITE_WATCH
-#define MEM_WRITE_WATCH 0x200000
-
-static uint32_t mem_reserve = MEM_RESERVE;
+static bool virtual_alloc_write_watch = false;
-#ifndef FEATURE_REDHAWK
-BOOL write_watch_capability = FALSE;
-#endif
+static bool write_watch_capability = false;
#ifndef DACCESS_COMPILE
@@ -1452,34 +1445,22 @@ BOOL write_watch_capability = FALSE;
void write_watch_api_supported()
{
-#ifndef FEATURE_REDHAWK
- // check if the OS will accept the MEM_WRITE_WATCH flag at runtime.
- // Drawbridge does not support write-watch so we still need to do the runtime detection for them.
- // Otherwise, all currently supported OSes do support write-watch.
- void* mem = VirtualAlloc (0, g_SystemInfo.dwAllocationGranularity, MEM_WRITE_WATCH|MEM_RESERVE,
- PAGE_READWRITE);
- if (mem == 0)
+ if (GCToOSInterface::SupportsWriteWatch())
{
- dprintf (2,("WriteWatch not supported"));
+ write_watch_capability = true;
+ dprintf (2, ("WriteWatch supported"));
}
else
{
- write_watch_capability = TRUE;
- dprintf (2, ("WriteWatch supported"));
- VirtualFree (mem, 0, MEM_RELEASE);
+ dprintf (2,("WriteWatch not supported"));
}
-#endif //FEATURE_REDHAWK
}
#endif //!DACCESS_COMPILE
inline BOOL can_use_write_watch()
{
-#ifdef FEATURE_REDHAWK
- return PalHasCapability(WriteWatchCapability);
-#else //FEATURE_REDHAWK
return write_watch_capability;
-#endif //FEATURE_REDHAWK
}
#else
@@ -1509,12 +1490,12 @@ void WaitLongerNoInstru (int i)
{
YieldProcessor(); // indicate to the processor that we are spining
if (i & 0x01f)
- __SwitchToThread (0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread (0);
else
- __SwitchToThread (5, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (5);
}
else
- __SwitchToThread (5, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (5);
}
// If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
@@ -1544,7 +1525,7 @@ static void safe_switch_to_thread()
Thread* current_thread = GetThread();
BOOL cooperative_mode = gc_heap::enable_preemptive(current_thread);
- __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread(0);
gc_heap::disable_preemptive(current_thread, cooperative_mode);
}
@@ -1558,7 +1539,7 @@ static void enter_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
{
retry:
- if (FastInterlockExchange ((LONG*)lock, 0) >= 0)
+ if (Interlocked::Exchange (lock, 0) >= 0)
{
unsigned int i = 0;
while (VolatileLoad(lock) >= 0)
@@ -1600,7 +1581,7 @@ retry:
inline
static BOOL try_enter_spin_lock_noinstru(RAW_KEYWORD(volatile) int32_t* lock)
{
- return (FastInterlockExchange ((LONG*)&*lock, 0) < 0);
+ return (Interlocked::Exchange (&*lock, 0) < 0);
}
inline
@@ -1685,12 +1666,12 @@ void WaitLonger (int i
{
YieldProcessor(); // indicate to the processor that we are spining
if (i & 0x01f)
- __SwitchToThread (0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread (0);
else
- __SwitchToThread (5, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (5);
}
else
- __SwitchToThread (5, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (5);
}
// If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
@@ -1719,7 +1700,7 @@ static void enter_spin_lock (GCSpinLock* spin_lock)
{
retry:
- if (FastInterlockExchange ((LONG*)&spin_lock->lock, 0) >= 0)
+ if (Interlocked::Exchange (&spin_lock->lock, 0) >= 0)
{
unsigned int i = 0;
while (spin_lock->lock >= 0)
@@ -1747,13 +1728,13 @@ retry:
Thread* current_thread = GetThread();
BOOL cooperative_mode = gc_heap::enable_preemptive (current_thread);
- __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread(0);
gc_heap::disable_preemptive (current_thread, cooperative_mode);
}
}
else
- __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread(0);
}
else
{
@@ -1770,7 +1751,7 @@ retry:
inline BOOL try_enter_spin_lock(GCSpinLock* spin_lock)
{
- return (FastInterlockExchange ((LONG*)&spin_lock->lock, 0) < 0);
+ return (Interlocked::Exchange (&spin_lock->lock, 0) < 0);
}
inline
@@ -1783,8 +1764,6 @@ static void leave_spin_lock (GCSpinLock * spin_lock)
#endif //_DEBUG
-#endif // !DACCESS_COMPILE
-
BOOL gc_heap::enable_preemptive (Thread* current_thread)
{
bool cooperative_mode = false;
@@ -1811,6 +1790,8 @@ void gc_heap::disable_preemptive (Thread* current_thread, BOOL restore_cooperati
}
}
+#endif // !DACCESS_COMPILE
+
typedef void ** PTR_PTR;
//This function clears a piece of memory
// size has to be Dword aligned
@@ -2256,8 +2237,6 @@ CLREvent gc_heap::gc_start_event;
SVAL_IMPL_NS(int, SVR, gc_heap, n_heaps);
SPTR_IMPL_NS(PTR_gc_heap, SVR, gc_heap, g_heaps);
-HANDLE* gc_heap::g_gc_threads;
-
size_t* gc_heap::g_promoted;
#ifdef MH_SC_MARK
@@ -2308,13 +2287,15 @@ float gc_heap::short_plugs_pad_ratio = 0;
#define MIN_YOUNGEST_GEN_DESIRED (16*1024*1024)
size_t gc_heap::youngest_gen_desired_th;
+#endif //BIT64
+
+uint64_t gc_heap::mem_one_percent;
-size_t gc_heap::mem_one_percent;
+uint32_t gc_heap::high_memory_load_th;
uint64_t gc_heap::total_physical_mem;
uint64_t gc_heap::available_physical_mem;
-#endif // BIT64
#ifdef BACKGROUND_GC
CLREvent gc_heap::bgc_start_event;
@@ -2458,7 +2439,7 @@ BOOL gc_heap::loh_compacted_p = FALSE;
#ifdef BACKGROUND_GC
-uint32_t gc_heap::bgc_thread_id = 0;
+EEThreadId gc_heap::bgc_thread_id;
uint8_t* gc_heap::background_written_addresses [array_size+2];
@@ -2516,7 +2497,7 @@ BOOL gc_heap::bgc_thread_running;
CLREvent gc_heap::background_gc_create_event;
-CRITICAL_SECTION gc_heap::bgc_threads_timeout_cs;
+CLRCriticalSection gc_heap::bgc_threads_timeout_cs;
CLREvent gc_heap::gc_lh_block_event;
@@ -2967,7 +2948,7 @@ gc_heap::dt_high_frag_p (gc_tuning_point tp,
}
inline BOOL
-gc_heap::dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number, uint64_t total_mem)
+gc_heap::dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number)
{
BOOL ret = FALSE;
@@ -2983,24 +2964,21 @@ gc_heap::dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number, uint64
size_t est_maxgen_surv = (size_t)((float) (maxgen_total_size) * dd_surv (dd));
size_t est_maxgen_free = maxgen_total_size - est_maxgen_surv + dd_fragmentation (dd);
-#ifdef SIMPLE_DPRINTF
- dprintf (GTC_LOG, ("h%d: Total gen2 size: %Id(s: %d%%), est gen2 dead space: %Id (s: %d, allocated: %Id), frag: %Id, 3%% of physical mem is %Id bytes",
+ dprintf (GTC_LOG, ("h%d: Total gen2 size: %Id, est gen2 dead space: %Id (s: %d, allocated: %Id), frag: %Id",
heap_number,
maxgen_total_size,
- (int)(100*dd_surv (dd)),
est_maxgen_free,
(int)(dd_surv (dd) * 100),
maxgen_allocated,
- dd_fragmentation (dd),
- (size_t)((float)total_mem * 0.03)));
-#endif //SIMPLE_DPRINTF
+ dd_fragmentation (dd)));
+
uint32_t num_heaps = 1;
#ifdef MULTIPLE_HEAPS
num_heaps = gc_heap::n_heaps;
#endif //MULTIPLE_HEAPS
- size_t min_frag_th = min_reclaim_fragmentation_threshold(total_mem, num_heaps);
+ size_t min_frag_th = min_reclaim_fragmentation_threshold (num_heaps);
dprintf (GTC_LOG, ("h%d, min frag is %Id", heap_number, min_frag_th));
ret = (est_maxgen_free >= min_frag_th);
}
@@ -4256,14 +4234,15 @@ void* virtual_alloc (size_t size)
if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
{
gc_heap::reserved_memory_limit =
- CNameSpace::AskForMoreReservedMemory (gc_heap::reserved_memory_limit, requested_size);
+ GCScan::AskForMoreReservedMemory (gc_heap::reserved_memory_limit, requested_size);
if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
{
return 0;
}
}
- void* prgmem = ClrVirtualAllocAligned (0, requested_size, mem_reserve, PAGE_READWRITE, card_size * card_word_width);
+ uint32_t flags = virtual_alloc_write_watch ? VirtualReserveFlags::WriteWatch : VirtualReserveFlags::None;
+ void* prgmem = GCToOSInterface::VirtualReserve (0, requested_size, card_size * card_word_width, flags);
void *aligned_mem = prgmem;
// We don't want (prgmem + size) to be right at the end of the address space
@@ -4277,7 +4256,7 @@ void* virtual_alloc (size_t size)
if ((end_mem == 0) || ((size_t)(MAX_PTR - end_mem) <= END_SPACE_AFTER_GC))
{
- VirtualFree (prgmem, 0, MEM_RELEASE);
+ GCToOSInterface::VirtualRelease (prgmem, requested_size);
dprintf (2, ("Virtual Alloc size %Id returned memory right against 4GB [%Ix, %Ix[ - discarding",
requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size)));
prgmem = 0;
@@ -4298,7 +4277,7 @@ void* virtual_alloc (size_t size)
void virtual_free (void* add, size_t size)
{
- VirtualFree (add, 0, MEM_RELEASE);
+ GCToOSInterface::VirtualRelease (add, size);
gc_heap::reserved_memory -= size;
dprintf (2, ("Virtual Free size %Id: [%Ix, %Ix[",
size, (size_t)add, (size_t)((uint8_t*)add+size)));
@@ -4758,6 +4737,7 @@ gc_heap::get_large_segment (size_t size, BOOL* did_full_compact_gc)
return res;
}
+#if 0
BOOL gc_heap::unprotect_segment (heap_segment* seg)
{
uint8_t* start = align_lower_page (heap_segment_mem (seg));
@@ -4765,16 +4745,15 @@ BOOL gc_heap::unprotect_segment (heap_segment* seg)
if (region_size != 0 )
{
- uint32_t old_protection;
dprintf (3, ("unprotecting segment %Ix:", (size_t)seg));
- BOOL status = VirtualProtect (start, region_size,
- PAGE_READWRITE, (DWORD*)&old_protection);
+ BOOL status = GCToOSInterface::VirtualUnprotect (start, region_size);
assert (status);
return status;
}
return FALSE;
}
+#endif
#ifdef MULTIPLE_HEAPS
#ifdef _X86_
@@ -4837,10 +4816,6 @@ extern "C" uint64_t __rdtsc();
#error NYI platform: get_cycle_count
#endif //_TARGET_X86_
-// The purpose of this whole class is to guess the right heap to use for a given thread.
-typedef
-uint32_t (WINAPI *GetCurrentProcessorNumber_t)(void);
-
class heap_select
{
heap_select() {}
@@ -4866,31 +4841,11 @@ class heap_select
return (int) elapsed_cycles;
}
- static
- GetCurrentProcessorNumber_t GCGetCurrentProcessorNumber;
-
- //check if the new APIs are supported.
- static
- BOOL api_supported()
- {
-#ifdef FEATURE_REDHAWK
- BOOL fSupported = PalHasCapability(GetCurrentProcessorNumberCapability);
- GCGetCurrentProcessorNumber = fSupported ? PalGetCurrentProcessorNumber : NULL;
- return fSupported;
-#elif !defined(FEATURE_PAL)
- // on all platforms we support this API exists.
- GCGetCurrentProcessorNumber = (GetCurrentProcessorNumber_t)&GetCurrentProcessorNumber;
- return TRUE;
-#else
- return FALSE;
-#endif //FEATURE_REDHAWK
- }
-
public:
static BOOL init(int n_heaps)
{
assert (sniff_buffer == NULL && n_sniff_buffers == 0);
- if (!api_supported())
+ if (!GCToOSInterface::CanGetCurrentProcessorNumber())
{
n_sniff_buffers = n_heaps*2+1;
size_t sniff_buf_size = 0;
@@ -4922,10 +4877,10 @@ public:
static void init_cpu_mapping(gc_heap *heap, int heap_number)
{
- if (GCGetCurrentProcessorNumber != 0)
+ if (GCToOSInterface::CanGetCurrentProcessorNumber())
{
- uint32_t proc_no = GCGetCurrentProcessorNumber() % gc_heap::n_heaps;
- // We can safely cast heap_number to a uint8_t 'cause GetCurrentProcessCpuCount
+ uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber() % gc_heap::n_heaps;
+ // We can safely cast heap_number to a BYTE 'cause GetCurrentProcessCpuCount
// only returns up to MAX_SUPPORTED_CPUS procs right now. We only ever create at most
// MAX_SUPPORTED_CPUS GC threads.
proc_no_to_heap_no[proc_no] = (uint8_t)heap_number;
@@ -4934,7 +4889,7 @@ public:
static void mark_heap(int heap_number)
{
- if (GCGetCurrentProcessorNumber != 0)
+ if (GCToOSInterface::CanGetCurrentProcessorNumber())
return;
for (unsigned sniff_index = 0; sniff_index < n_sniff_buffers; sniff_index++)
@@ -4943,10 +4898,10 @@ public:
static int select_heap(alloc_context* acontext, int hint)
{
- if (GCGetCurrentProcessorNumber)
- return proc_no_to_heap_no[GCGetCurrentProcessorNumber() % gc_heap::n_heaps];
+ if (GCToOSInterface::CanGetCurrentProcessorNumber())
+ return proc_no_to_heap_no[GCToOSInterface::GetCurrentProcessorNumber() % gc_heap::n_heaps];
- unsigned sniff_index = FastInterlockIncrement((LONG *)&cur_sniff_index);
+ unsigned sniff_index = Interlocked::Increment(&cur_sniff_index);
sniff_index %= n_sniff_buffers;
int best_heap = 0;
@@ -4986,10 +4941,7 @@ public:
static BOOL can_find_heap_fast()
{
- if (GCGetCurrentProcessorNumber)
- return TRUE;
- else
- return FALSE;
+ return GCToOSInterface::CanGetCurrentProcessorNumber();
}
static uint8_t find_proc_no_from_heap_no(int heap_number)
@@ -5058,7 +5010,6 @@ public:
uint8_t* heap_select::sniff_buffer;
unsigned heap_select::n_sniff_buffers;
unsigned heap_select::cur_sniff_index;
-GetCurrentProcessorNumber_t heap_select::GCGetCurrentProcessorNumber;
uint8_t heap_select::proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
uint8_t heap_select::heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
uint8_t heap_select::heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
@@ -5108,17 +5059,14 @@ void gc_heap::destroy_thread_support ()
}
}
-void set_thread_group_affinity_for_heap(HANDLE gc_thread, int heap_number)
-{
#if !defined(FEATURE_REDHAWK) && !defined(FEATURE_CORECLR)
- GROUP_AFFINITY ga;
- uint16_t gn, gpn;
+void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affinity)
+{
+ affinity->Group = GCThreadAffinity::None;
+ affinity->Processor = GCThreadAffinity::None;
+ uint16_t gn, gpn;
CPUGroupInfo::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
- ga.Group = gn;
- ga.Reserved[0] = 0; // reserve must be filled with zero
- ga.Reserved[1] = 0; // otherwise call may fail
- ga.Reserved[2] = 0;
int bit_number = 0;
for (uintptr_t mask = 1; mask !=0; mask <<=1)
@@ -5126,8 +5074,8 @@ void set_thread_group_affinity_for_heap(HANDLE gc_thread, int heap_number)
if (bit_number == gpn)
{
dprintf(3, ("using processor group %d, mask %x%Ix for heap %d\n", gn, mask, heap_number));
- ga.Mask = mask;
- CPUGroupInfo::SetThreadGroupAffinity(gc_thread, &ga, NULL);
+ affinity->Processor = gpn;
+ affinity->Group = gn;
heap_select::set_cpu_group_for_heap(heap_number, (uint8_t)gn);
heap_select::set_group_proc_for_heap(heap_number, (uint8_t)gpn);
if (NumaNodeInfo::CanEnableGCNumaAware())
@@ -5149,15 +5097,15 @@ void set_thread_group_affinity_for_heap(HANDLE gc_thread, int heap_number)
}
bit_number++;
}
-#endif
}
-void set_thread_affinity_mask_for_heap(HANDLE gc_thread, int heap_number)
+void set_thread_affinity_mask_for_heap(int heap_number, GCThreadAffinity* affinity)
{
-#if !defined(FEATURE_REDHAWK) && !defined(FEATURE_CORECLR)
- DWORD_PTR pmask, smask;
+ affinity->Group = GCThreadAffinity::None;
+ affinity->Processor = GCThreadAffinity::None;
- if (GetProcessAffinityMask(GetCurrentProcess(), &pmask, &smask))
+ uintptr_t pmask, smask;
+ if (GCToOSInterface::GetCurrentProcessAffinityMask(&pmask, &smask))
{
pmask &= smask;
int bit_number = 0;
@@ -5168,8 +5116,8 @@ void set_thread_affinity_mask_for_heap(HANDLE gc_thread, int heap_number)
{
if (bit_number == heap_number)
{
- dprintf (3, ("Using processor mask 0x%Ix for heap %d\n", mask, heap_number));
- SetThreadAffinityMask(gc_thread, mask);
+ dprintf (3, ("Using processor %d for heap %d\n", proc_number, heap_number));
+ affinity->Processor = proc_number;
heap_select::set_proc_no_for_heap(heap_number, proc_number);
if (NumaNodeInfo::CanEnableGCNumaAware())
{ // have the processor number, find the numa node
@@ -5199,42 +5147,35 @@ void set_thread_affinity_mask_for_heap(HANDLE gc_thread, int heap_number)
proc_number++;
}
}
-#endif
}
+#endif // !FEATURE_REDHAWK && !FEATURE_CORECLR
-HANDLE gc_heap::create_gc_thread ()
+bool gc_heap::create_gc_thread ()
{
- uint32_t thread_id;
dprintf (3, ("Creating gc thread\n"));
-#ifdef FEATURE_REDHAWK
- HANDLE gc_thread = CreateThread(0, 4096, gc_thread_stub,this, CREATE_SUSPENDED, &thread_id);
-#else //FEATURE_REDHAWK
- HANDLE gc_thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, (DWORD (*)(void*))gc_thread_stub, this, CREATE_SUSPENDED, (DWORD*)&thread_id);
-#endif //FEATURE_REDHAWK
-
- if (!gc_thread)
- {
- return 0;;
- }
- SetThreadPriority(gc_thread, /* THREAD_PRIORITY_ABOVE_NORMAL );*/ THREAD_PRIORITY_HIGHEST );
+ GCThreadAffinity affinity;
+ affinity.Group = GCThreadAffinity::None;
+ affinity.Processor = GCThreadAffinity::None;
+#if !defined(FEATURE_REDHAWK) && !defined(FEATURE_CORECLR)
//We are about to set affinity for GC threads, it is a good place to setup NUMA and
//CPU groups, because the process mask, processor number, group number are all
//readyly available.
if (CPUGroupInfo::CanEnableGCCPUGroups())
- set_thread_group_affinity_for_heap(gc_thread, heap_number);
+ set_thread_group_affinity_for_heap(heap_number, &affinity);
else
- set_thread_affinity_mask_for_heap(gc_thread, heap_number);
+ set_thread_affinity_mask_for_heap(heap_number, &affinity);
+
+#endif // !FEATURE_REDHAWK && !FEATURE_CORECLR
- ResumeThread(gc_thread);
- return gc_thread;
+ return GCToOSInterface::CreateThread(gc_thread_stub, this, &affinity);
}
#ifdef _MSC_VER
#pragma warning(disable:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
#endif //_MSC_VER
-uint32_t gc_heap::gc_thread_function ()
+void gc_heap::gc_thread_function ()
{
assert (gc_done_event.IsValid());
assert (gc_start_event.IsValid());
@@ -5322,7 +5263,6 @@ uint32_t gc_heap::gc_thread_function ()
set_gc_done();
}
}
- return 0;
}
#ifdef _MSC_VER
#pragma warning(default:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
@@ -5330,8 +5270,7 @@ uint32_t gc_heap::gc_thread_function ()
#endif //MULTIPLE_HEAPS
-void* virtual_alloc_commit_for_heap(void* addr, size_t size, uint32_t type,
- uint32_t prot, int h_number)
+bool virtual_alloc_commit_for_heap(void* addr, size_t size, int h_number)
{
#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL)
// Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
@@ -5342,17 +5281,17 @@ void* virtual_alloc_commit_for_heap(void* addr, size_t size, uint32_t type,
{
uint32_t numa_node = heap_select::find_numa_node_from_heap_no(h_number);
void * ret = NumaNodeInfo::VirtualAllocExNuma(GetCurrentProcess(), addr, size,
- type, prot, numa_node);
+ MEM_COMMIT, PAGE_READWRITE, numa_node);
if (ret != NULL)
- return ret;
+ return true;
}
}
#else
UNREFERENCED_PARAMETER(h_number);
#endif
- //numa aware not enabled, or call failed --> fallback to VirtualAlloc()
- return VirtualAlloc(addr, size, type, prot);
+ //numa aware not enabled, or call failed --> fallback to VirtualCommit()
+ return GCToOSInterface::VirtualCommit(addr, size);
}
#ifndef SEG_MAPPING_TABLE
@@ -5973,7 +5912,7 @@ bool gc_heap::new_allocation_allowed (int gen_number)
if ((allocation_running_amount - dd_new_allocation (dd0)) >
dd_min_gc_size (dd0))
{
- uint32_t ctime = GetTickCount();
+ uint32_t ctime = GCToOSInterface::GetLowPrecisionTimeStamp();
if ((ctime - allocation_running_time) > 1000)
{
dprintf (2, (">1s since last gen0 gc"));
@@ -6573,6 +6512,7 @@ public:
uint32_t* mark_array;
#endif //MARK_ARRAY
+ size_t size;
uint32_t* next_card_table;
};
@@ -6719,7 +6659,7 @@ void gc_heap::mark_array_set_marked (uint8_t* add)
size_t index = mark_word_of (add);
uint32_t val = (1 << mark_bit_bit_of (add));
#ifdef MULTIPLE_HEAPS
- InterlockedOr ((LONG*)&(mark_array [index]), val);
+ Interlocked::Or (&(mark_array [index]), val);
#else
mark_array [index] |= val;
#endif
@@ -6830,6 +6770,12 @@ uint32_t*& card_table_next (uint32_t* c_table)
return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->next_card_table;
}
+inline
+size_t& card_table_size (uint32_t* c_table)
+{
+ return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->size;
+}
+
void own_card_table (uint32_t* c_table)
{
card_table_refcount (c_table) += 1;
@@ -6881,7 +6827,8 @@ void release_card_table (uint32_t* c_table)
void destroy_card_table (uint32_t* c_table)
{
// delete (uint32_t*)&card_table_refcount(c_table);
- VirtualFree (&card_table_refcount(c_table), 0, MEM_RELEASE);
+
+ GCToOSInterface::VirtualRelease (&card_table_refcount(c_table), card_table_size(c_table));
dprintf (2, ("Table Virtual Free : %Ix", (size_t)&card_table_refcount(c_table)));
}
@@ -6890,7 +6837,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
assert (g_lowest_address == start);
assert (g_highest_address == end);
- uint32_t mem_flags = MEM_RESERVE;
+ uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
size_t bs = size_brick_of (start, end);
size_t cs = size_card_of (start, end);
@@ -6907,7 +6854,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
#ifdef CARD_BUNDLE
if (can_use_write_watch())
{
- mem_flags |= MEM_WRITE_WATCH;
+ virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
cb = size_card_bundle_of (g_lowest_address, g_highest_address);
}
#endif //CARD_BUNDLE
@@ -6923,8 +6870,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
size_t alloc_size = sizeof (uint8_t)*(bs + cs + cb + ms + st + sizeof (card_table_info));
size_t alloc_size_aligned = Align (alloc_size, g_SystemInfo.dwAllocationGranularity-1);
- uint32_t* ct = (uint32_t*)VirtualAlloc (0, alloc_size_aligned,
- mem_flags, PAGE_READWRITE);
+ uint32_t* ct = (uint32_t*)GCToOSInterface::VirtualReserve (0, alloc_size_aligned, 0, virtual_reserve_flags);
if (!ct)
return 0;
@@ -6935,10 +6881,10 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
// mark array will be committed separately (per segment).
size_t commit_size = alloc_size - ms;
- if (!VirtualAlloc ((uint8_t*)ct, commit_size, MEM_COMMIT, PAGE_READWRITE))
+ if (!GCToOSInterface::VirtualCommit ((uint8_t*)ct, commit_size))
{
- dprintf (2, ("Table commit failed: %d", GetLastError()));
- VirtualFree ((uint8_t*)ct, 0, MEM_RELEASE);
+ dprintf (2, ("Table commit failed"));
+ GCToOSInterface::VirtualRelease ((uint8_t*)ct, alloc_size_aligned);
return 0;
}
@@ -6948,6 +6894,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
card_table_lowest_address (ct) = start;
card_table_highest_address (ct) = end;
card_table_brick_table (ct) = (short*)((uint8_t*)ct + cs);
+ card_table_size (ct) = alloc_size_aligned;
card_table_next (ct) = 0;
#ifdef CARD_BUNDLE
@@ -7013,7 +6960,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
//modify the higest address so the span covered
//is twice the previous one.
GCMemoryStatus st;
- GetProcessMemoryLoad (&st);
+ GCToOSInterface::GetMemoryStatus (&st);
uint8_t* top = (uint8_t*)0 + Align ((size_t)(st.ullTotalVirtual));
// On non-Windows systems, we get only an approximate ullTotalVirtual
// value that can possibly be slightly lower than the saved_g_highest_address.
@@ -7053,7 +7000,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
(size_t)saved_g_lowest_address,
(size_t)saved_g_highest_address));
- uint32_t mem_flags = MEM_RESERVE;
+ uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
uint32_t* saved_g_card_table = g_card_table;
uint32_t* ct = 0;
short* bt = 0;
@@ -7074,7 +7021,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
#ifdef CARD_BUNDLE
if (can_use_write_watch())
{
- mem_flags |= MEM_WRITE_WATCH;
+ virtual_reserve_flags = VirtualReserveFlags::WriteWatch;
cb = size_card_bundle_of (saved_g_lowest_address, saved_g_highest_address);
}
#endif //CARD_BUNDLE
@@ -7092,7 +7039,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
dprintf (GC_TABLE_LOG, ("brick table: %Id; card table: %Id; mark array: %Id, card bundle: %Id, seg table: %Id",
bs, cs, ms, cb, st));
- uint8_t* mem = (uint8_t*)VirtualAlloc (0, alloc_size_aligned, mem_flags, PAGE_READWRITE);
+ uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (0, alloc_size_aligned, 0, virtual_reserve_flags);
if (!mem)
{
@@ -7107,7 +7054,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
// mark array will be committed separately (per segment).
size_t commit_size = alloc_size - ms;
- if (!VirtualAlloc (mem, commit_size, MEM_COMMIT, PAGE_READWRITE))
+ if (!GCToOSInterface::VirtualCommit (mem, commit_size))
{
dprintf (GC_TABLE_LOG, ("Table commit failed"));
set_fgm_result (fgm_commit_table, commit_size, loh_p);
@@ -7206,7 +7153,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
// with address that it does not cover. Write barriers access card table
// without memory barriers for performance reasons, so we need to flush
// the store buffers here.
- FlushProcessWriteBuffers();
+ GCToOSInterface::FlushProcessWriteBuffers();
g_lowest_address = saved_g_lowest_address;
VolatileStore(&g_highest_address, saved_g_highest_address);
@@ -7224,9 +7171,9 @@ fail:
}
//delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info));
- if (!VirtualFree (mem, 0, MEM_RELEASE))
+ if (!GCToOSInterface::VirtualRelease (mem, alloc_size_aligned))
{
- dprintf (GC_TABLE_LOG, ("VirtualFree failed: %d", GetLastError()));
+ dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualRelease failed"));
assert (!"release failed");
}
}
@@ -8951,12 +8898,10 @@ int gc_heap::object_gennum_plan (uint8_t* o)
heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, int h_number)
{
- void * res;
size_t initial_commit = SEGMENT_INITIAL_COMMIT;
//Commit the first page
- if ((res = virtual_alloc_commit_for_heap (new_pages, initial_commit,
- MEM_COMMIT, PAGE_READWRITE, h_number)) == 0)
+ if (!virtual_alloc_commit_for_heap (new_pages, initial_commit, h_number))
{
return 0;
}
@@ -9063,7 +9008,7 @@ void gc_heap::reset_heap_segment_pages (heap_segment* seg)
size_t page_start = align_on_page ((size_t)heap_segment_allocated (seg));
size_t size = (size_t)heap_segment_committed (seg) - page_start;
if (size != 0)
- VirtualAlloc ((char*)page_start, size, MEM_RESET, PAGE_READWRITE);
+ GCToOSInterface::VirtualReset((void*)page_start, size, false /* unlock */);
#endif //!FEATURE_PAL
}
@@ -9078,7 +9023,7 @@ void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
page_start += max(extra_space, 32*OS_PAGE_SIZE);
size -= max (extra_space, 32*OS_PAGE_SIZE);
- VirtualFree (page_start, size, MEM_DECOMMIT);
+ GCToOSInterface::VirtualDecommit (page_start, size);
dprintf (3, ("Decommitting heap segment [%Ix, %Ix[(%d)",
(size_t)page_start,
(size_t)(page_start + size),
@@ -9103,7 +9048,7 @@ void gc_heap::decommit_heap_segment (heap_segment* seg)
#endif //BACKGROUND_GC
size_t size = heap_segment_committed (seg) - page_start;
- VirtualFree (page_start, size, MEM_DECOMMIT);
+ GCToOSInterface::VirtualDecommit (page_start, size);
//re-init the segment object
heap_segment_committed (seg) = page_start;
@@ -9240,7 +9185,6 @@ void gc_heap::update_card_table_bundle()
uint8_t* base_address = (uint8_t*)(&card_table[card_word (card_of (lowest_address))]);
uint8_t* saved_base_address = base_address;
uintptr_t bcount = array_size;
- uint32_t granularity = 0;
uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (highest_address))]);
size_t saved_region_size = align_on_page (high_address) - saved_base_address;
@@ -9248,16 +9192,15 @@ void gc_heap::update_card_table_bundle()
{
size_t region_size = align_on_page (high_address) - base_address;
dprintf (3,("Probing card table pages [%Ix, %Ix[", (size_t)base_address, (size_t)base_address+region_size));
- uint32_t status = GetWriteWatch (0, base_address, region_size,
+ bool success = GCToOSInterface::GetWriteWatch (false /* resetState */ , base_address, region_size,
(void**)g_addresses,
- (ULONG_PTR*)&bcount, (DWORD*)&granularity);
- assert (status == 0);
- assert (granularity == OS_PAGE_SIZE);
+ &bcount);
+ assert (success);
dprintf (3,("Found %d pages written", bcount));
for (unsigned i = 0; i < bcount; i++)
{
size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0];
- size_t ecardw = (uint32_t*)(min(g_addresses[i]+granularity, high_address)) - &card_table[0];
+ size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0];
assert (bcardw >= card_word (card_of (g_lowest_address)));
card_bundles_set (cardw_card_bundle (bcardw),
@@ -9284,7 +9227,7 @@ void gc_heap::update_card_table_bundle()
}
} while ((bcount >= array_size) && (base_address < high_address));
- ResetWriteWatch (saved_base_address, saved_region_size);
+ GCToOSInterface::ResetWriteWatch (saved_base_address, saved_region_size);
#ifdef _DEBUG
@@ -9329,7 +9272,7 @@ void gc_heap::switch_one_quantum()
{
Thread* current_thread = GetThread();
enable_preemptive (current_thread);
- __SwitchToThread (1, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (1);
disable_preemptive (current_thread, TRUE);
}
@@ -9345,7 +9288,7 @@ void gc_heap::reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size
next_reset_size = ((remaining_reset_size >= ww_reset_quantum) ? ww_reset_quantum : remaining_reset_size);
if (next_reset_size)
{
- ResetWriteWatch (start_address, next_reset_size);
+ GCToOSInterface::ResetWriteWatch (start_address, next_reset_size);
reset_size += next_reset_size;
switch_one_quantum();
@@ -9355,7 +9298,7 @@ void gc_heap::reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size
assert (reset_size == total_reset_size);
}
-// This does a __SwitchToThread for every reset ww_reset_quantum bytes of reset
+// This does a Sleep(1) for every reset ww_reset_quantum bytes of reset
// we do concurrently.
void gc_heap::switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size)
{
@@ -9414,7 +9357,7 @@ void gc_heap::reset_write_watch (BOOL concurrent_p)
#endif //TIME_WRITE_WATCH
dprintf (3, ("h%d: soh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
//reset_ww_by_chunk (base_address, region_size);
- ResetWriteWatch (base_address, region_size);
+ GCToOSInterface::ResetWriteWatch (base_address, region_size);
#ifdef TIME_WRITE_WATCH
unsigned int time_stop = GetCycleCount32();
@@ -9457,7 +9400,7 @@ void gc_heap::reset_write_watch (BOOL concurrent_p)
#endif //TIME_WRITE_WATCH
dprintf (3, ("h%d: loh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
//reset_ww_by_chunk (base_address, region_size);
- ResetWriteWatch (base_address, region_size);
+ GCToOSInterface::ResetWriteWatch (base_address, region_size);
#ifdef TIME_WRITE_WATCH
unsigned int time_stop = GetCycleCount32();
@@ -9561,45 +9504,28 @@ void gc_heap::adjust_ephemeral_limits ()
}
#if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN)
-HANDLE CreateLogFile(const CLRConfig::ConfigStringInfo & info, BOOL is_config)
+FILE* CreateLogFile(const CLRConfig::ConfigStringInfo & info, BOOL is_config)
{
+ FILE* logFile;
LPWSTR temp_logfile_name = NULL;
CLRConfig::GetConfigValue(info, &temp_logfile_name);
-#ifdef FEATURE_REDHAWK
- UNREFERENCED_PARAMETER(is_config);
- return PalCreateFileW(
- temp_logfile_name,
- GENERIC_WRITE,
- FILE_SHARE_READ,
- NULL,
- CREATE_ALWAYS,
- FILE_ATTRIBUTE_NORMAL,
- NULL);
-#else // FEATURE_REDHAWK
- char logfile_name[MAX_LONGPATH+1];
+ WCHAR logfile_name[MAX_LONGPATH+1];
if (temp_logfile_name != 0)
{
- int ret;
- ret = WszWideCharToMultiByte(CP_ACP, 0, temp_logfile_name, -1, logfile_name, sizeof(logfile_name)-1, NULL, NULL);
- _ASSERTE(ret != 0);
- delete temp_logfile_name;
- }
-
- char szPid[20];
- sprintf_s(szPid, _countof(szPid), ".%d", GetCurrentProcessId());
- strcat_s(logfile_name, _countof(logfile_name), szPid);
- strcat_s(logfile_name, _countof(logfile_name), (is_config ? ".config.log" : ".log"));
-
- return CreateFileA(
- logfile_name,
- GENERIC_WRITE,
- FILE_SHARE_READ,
- NULL,
- CREATE_ALWAYS,
- FILE_ATTRIBUTE_NORMAL,
- NULL);
-#endif //FEATURE_REDHAWK
+ wcscpy(logfile_name, temp_logfile_name);
+ }
+
+ size_t logfile_name_len = wcslen(logfile_name);
+ WCHAR* szPid = logfile_name + logfile_name_len;
+ size_t remaining_space = MAX_LONGPATH + 1 - logfile_name_len;
+ swprintf_s(szPid, remaining_space, W(".%d%s"), GCToOSInterface::GetCurrentProcessId(), (is_config ? W(".config.log") : W(".log")));
+
+ logFile = GCToOSInterface::OpenFile(logfile_name, W("wb"));
+
+ delete temp_logfile_name;
+
+ return logFile;
}
#endif //TRACE_GC || GC_CONFIG_DRIVEN
@@ -9616,7 +9542,7 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
{
gc_log = CreateLogFile(CLRConfig::UNSUPPORTED_GCLogFile, FALSE);
- if (gc_log == INVALID_HANDLE_VALUE)
+ if (gc_log == NULL)
return E_FAIL;
// GCLogFileSize in MBs.
@@ -9624,15 +9550,15 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
if (gc_log_file_size > 500)
{
- CloseHandle (gc_log);
+ fclose (gc_log);
return E_FAIL;
}
- gc_log_lock = ClrCreateMutex(NULL, FALSE, NULL);
+ gc_log_lock.Initialize();
gc_log_buffer = new (nothrow) uint8_t [gc_log_buffer_size];
if (!gc_log_buffer)
{
- CloseHandle(gc_log);
+ fclose(gc_log);
return E_FAIL;
}
@@ -9648,13 +9574,13 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
{
gc_config_log = CreateLogFile(CLRConfig::UNSUPPORTED_GCConfigLogFile, TRUE);
- if (gc_config_log == INVALID_HANDLE_VALUE)
+ if (gc_config_log == NULL)
return E_FAIL;
gc_config_log_buffer = new (nothrow) uint8_t [gc_config_log_buffer_size];
if (!gc_config_log_buffer)
{
- CloseHandle(gc_config_log);
+ fclose(gc_config_log);
return E_FAIL;
}
@@ -9688,7 +9614,7 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
GCStatistics::logFileName = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCMixLog);
if (GCStatistics::logFileName != NULL)
{
- GCStatistics::logFile = _wfopen((LPCWSTR)GCStatistics::logFileName, W("a"));
+ GCStatistics::logFile = GCToOSInterface::OpenFile((LPCWSTR)GCStatistics::logFileName, W("a"));
}
#endif // GC_STATS
@@ -9701,7 +9627,7 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
if (can_use_write_watch () && g_pConfig->GetGCconcurrent()!=0)
{
gc_can_use_concurrent = true;
- mem_reserve = MEM_WRITE_WATCH | MEM_RESERVE;
+ virtual_alloc_write_watch = true;
}
else
{
@@ -9778,10 +9704,6 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
return E_OUTOFMEMORY;
#endif //MH_SC_MARK
- g_gc_threads = new (nothrow) HANDLE [number_of_heaps];
- if (!g_gc_threads)
- return E_OUTOFMEMORY;
-
if (!create_thread_support (number_of_heaps))
return E_OUTOFMEMORY;
@@ -10054,7 +9976,7 @@ gc_heap::enter_gc_done_event_lock()
uint32_t dwSwitchCount = 0;
retry:
- if (FastInterlockExchange ((LONG*)&gc_done_event_lock, 0) >= 0)
+ if (Interlocked::Exchange (&gc_done_event_lock, 0) >= 0)
{
while (gc_done_event_lock >= 0)
{
@@ -10068,10 +9990,10 @@ retry:
YieldProcessor(); // indicate to the processor that we are spining
}
if (gc_done_event_lock >= 0)
- __SwitchToThread(0, ++dwSwitchCount);
+ GCToOSInterface::YieldThread(++dwSwitchCount);
}
else
- __SwitchToThread(0, ++dwSwitchCount);
+ GCToOSInterface::YieldThread(++dwSwitchCount);
}
goto retry;
}
@@ -10106,7 +10028,7 @@ void gc_heap::add_saved_spinlock_info (
current->enter_state = enter_state;
current->take_state = take_state;
- current->thread_id = GetCurrentThreadId();
+ current->thread_id.SetToCurrentThread();
spinlock_info_index++;
@@ -10389,8 +10311,7 @@ gc_heap::init_gc_heap (int h_number)
#ifdef MULTIPLE_HEAPS
//register the heap in the heaps array
- g_gc_threads [heap_number] = create_gc_thread ();
- if (!g_gc_threads [heap_number])
+ if (!create_gc_thread ())
return 0;
g_heaps [heap_number] = this;
@@ -10440,7 +10361,7 @@ gc_heap::init_gc_heap (int h_number)
#endif // MULTIPLE_HEAPS
#ifdef BACKGROUND_GC
- bgc_thread_id = 0;
+ bgc_thread_id.Clear();
if (!create_bgc_thread_support())
{
@@ -10463,7 +10384,7 @@ gc_heap::init_gc_heap (int h_number)
bgc_thread_running = 0;
bgc_thread = 0;
- InitializeCriticalSection (&bgc_threads_timeout_cs);
+ bgc_threads_timeout_cs.Initialize();
expanded_in_fgc = 0;
current_bgc_state = bgc_not_in_process;
background_soh_alloc_count = 0;
@@ -10571,17 +10492,14 @@ void gc_heap::shutdown_gc()
#ifdef MULTIPLE_HEAPS
//delete the heaps array
delete g_heaps;
- for (int i = 0; i < n_heaps; i++)
- {
- CloseHandle (g_gc_threads [i]);
- }
- delete g_gc_threads;
destroy_thread_support();
n_heaps = 0;
#endif //MULTIPLE_HEAPS
//destroy seg_manager
destroy_initial_memory();
+
+ GCToOSInterface::Shutdown();
}
inline
@@ -10668,8 +10586,7 @@ BOOL gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* high_address)
dprintf(3, ("Growing segment allocation %Ix %Ix", (size_t)heap_segment_committed(seg),c_size));
- if (!virtual_alloc_commit_for_heap(heap_segment_committed (seg), c_size,
- MEM_COMMIT, PAGE_READWRITE, heap_number))
+ if (!virtual_alloc_commit_for_heap(heap_segment_committed (seg), c_size, heap_number))
{
dprintf(3, ("Cannot grow heap segment"));
return FALSE;
@@ -11320,7 +11237,7 @@ void gc_heap::handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
// could have allocated on the same heap when OOM happened.
if (g_pConfig->IsGCBreakOnOOMEnabled())
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
}
}
@@ -12000,7 +11917,7 @@ void gc_heap::wait_for_bgc_high_memory (alloc_wait_reason awr)
{
GCMemoryStatus ms;
memset (&ms, 0, sizeof(ms));
- GetProcessMemoryLoad(&ms);
+ GCToOSInterface::GetMemoryStatus(&ms);
if (ms.dwMemoryLoad >= 95)
{
dprintf (GTC_LOG, ("high mem - wait for BGC to finish, wait reason: %d", awr));
@@ -12090,7 +12007,7 @@ BOOL gc_heap::allocate_small (int gen_number,
dprintf (SPINLOCK_LOG, ("[%d]spin Lmsl", heap_number));
leave_spin_lock (&more_space_lock);
BOOL cooperative_mode = enable_preemptive (current_thread);
- __SwitchToThread (bgc_alloc_spin, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (bgc_alloc_spin);
disable_preemptive (current_thread, cooperative_mode);
enter_spin_lock (&more_space_lock);
add_saved_spinlock_info (me_acquire, mt_alloc_small);
@@ -12098,7 +12015,7 @@ BOOL gc_heap::allocate_small (int gen_number,
}
else
{
- //__SwitchToThread (0, CALLER_LIMITS_SPINNING);
+ //GCToOSInterface::YieldThread (0);
}
}
#endif //BACKGROUND_GC && !MULTIPLE_HEAPS
@@ -12578,7 +12495,7 @@ exit:
}
#ifdef RECORD_LOH_STATE
-void gc_heap::add_saved_loh_state (allocation_state loh_state_to_save, uint32_t thread_id)
+void gc_heap::add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id)
{
// When the state is can_allocate we already have released the more
// space lock. So we are not logging states here since this code
@@ -12619,7 +12536,7 @@ BOOL gc_heap::allocate_large (int gen_number,
dprintf (SPINLOCK_LOG, ("[%d]spin Lmsl loh", heap_number));
leave_spin_lock (&more_space_lock);
BOOL cooperative_mode = enable_preemptive (current_thread);
- __SwitchToThread (bgc_alloc_spin_loh, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread (bgc_alloc_spin_loh);
disable_preemptive (current_thread, cooperative_mode);
enter_spin_lock (&more_space_lock);
add_saved_spinlock_info (me_acquire, mt_alloc_large);
@@ -12643,7 +12560,8 @@ BOOL gc_heap::allocate_large (int gen_number,
// That's why there are local variable for each state
allocation_state loh_alloc_state = a_state_start;
#ifdef RECORD_LOH_STATE
- uint32_t current_thread_id = GetCurrentThreadId();
+ EEThreadId current_thread_id;
+ current_thread_id.SetToCurrentThread();
#endif //RECORD_LOH_STATE
// If we can get a new seg it means allocation will succeed.
@@ -13127,42 +13045,30 @@ try_again:
{
uint8_t group_proc_no = heap_select::find_group_proc_from_heap_no(max_hp->heap_number);
-#if !defined(FEATURE_CORESYSTEM)
- SetThreadIdealProcessor(GetCurrentThread(), (uint32_t)group_proc_no);
-#else
- PROCESSOR_NUMBER proc;
- proc.Group = org_gn;
- proc.Number = group_proc_no;
- proc.Reserved = 0;
-
- if(!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL))
+ GCThreadAffinity affinity;
+ affinity.Processor = group_proc_no;
+ affinity.Group = org_gn;
+ if (!GCToOSInterface::SetCurrentThreadIdealAffinity(&affinity))
{
dprintf (3, ("Failed to set the ideal processor and group for heap %d.",
org_hp->heap_number));
}
-#endif
}
}
else
{
uint8_t proc_no = heap_select::find_proc_no_from_heap_no(max_hp->heap_number);
-#if !defined(FEATURE_CORESYSTEM)
- SetThreadIdealProcessor(GetCurrentThread(), (uint32_t)proc_no);
-#else
- PROCESSOR_NUMBER proc;
- if(GetThreadIdealProcessorEx(GetCurrentThread(), &proc))
- {
- proc.Number = proc_no;
- BOOL result;
- if(!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL))
+ GCThreadAffinity affinity;
+ affinity.Processor = proc_no;
+ affinity.Group = GCThreadAffinity::None;
+
+ if (!GCToOSInterface::SetCurrentThreadIdealAffinity(&affinity))
{
dprintf (3, ("Failed to set the ideal processor for heap %d.",
org_hp->heap_number));
}
}
-#endif
- }
#endif // !FEATURE_REDHAWK && !FEATURE_PAL
dprintf (3, ("Switching context %p (home heap %d) ",
acontext,
@@ -14533,11 +14439,7 @@ int gc_heap::generation_to_condemn (int n_initial,
(local_settings->pause_mode == pause_sustained_low_latency))
{
dynamic_data* dd0 = dynamic_data_of (0);
- LARGE_INTEGER ts;
- if (!QueryPerformanceCounter(&ts))
- FATAL_GC_ERROR();
-
- size_t now = (size_t) (ts.QuadPart/(qpf.QuadPart/1000));
+ size_t now = GetHighPrecisionTimeStamp();
temp_gen = n;
for (i = (temp_gen+1); i <= n_time_max; i++)
{
@@ -14656,22 +14558,18 @@ int gc_heap::generation_to_condemn (int n_initial,
if (check_memory)
{
//find out if we are short on memory
- GetProcessMemoryLoad(&ms);
+ GCToOSInterface::GetMemoryStatus(&ms);
if (heap_number == 0)
{
dprintf (GTC_LOG, ("ml: %d", ms.dwMemoryLoad));
}
- if (heap_number == 0)
- {
-#ifdef BIT64
+ // Need to get it early enough for all heaps to use.
available_physical_mem = ms.ullAvailPhys;
-#endif // BIT64
local_settings->entry_memory_load = ms.dwMemoryLoad;
- }
// @TODO: Force compaction more often under GCSTRESS
- if (ms.dwMemoryLoad >= 90 || low_memory_detected)
+ if (ms.dwMemoryLoad >= high_memory_load_th || low_memory_detected)
{
#ifdef SIMPLE_DPRINTF
// stress log can't handle any parameter that's bigger than a void*.
@@ -14684,13 +14582,13 @@ int gc_heap::generation_to_condemn (int n_initial,
high_memory_load = TRUE;
- if (ms.dwMemoryLoad >= 97 || low_memory_detected)
+ if (ms.dwMemoryLoad >= v_high_memory_load_th || low_memory_detected)
{
// TODO: Perhaps in 64-bit we should be estimating gen1's fragmentation as well since
// gen1/gen0 may take a lot more memory than gen2.
if (!high_fragmentation)
{
- high_fragmentation = dt_estimate_reclaim_space_p (tuning_deciding_condemned_gen, max_generation, ms.ullTotalPhys);
+ high_fragmentation = dt_estimate_reclaim_space_p (tuning_deciding_condemned_gen, max_generation);
}
v_high_memory_load = TRUE;
}
@@ -14954,9 +14852,19 @@ exit:
#endif //_PREFAST_
inline
-size_t gc_heap::min_reclaim_fragmentation_threshold(uint64_t total_mem, uint32_t num_heaps)
+size_t gc_heap::min_reclaim_fragmentation_threshold (uint32_t num_heaps)
{
- return min ((size_t)((float)total_mem * 0.03), (100*1024*1024)) / num_heaps;
+ // if the memory load is higher, the threshold we'd want to collect gets lower.
+ size_t min_mem_based_on_available =
+ (500 - (settings.entry_memory_load - high_memory_load_th) * 40) * 1024 * 1024 / num_heaps;
+ size_t ten_percent_size = (size_t)((float)generation_size (max_generation) * 0.10);
+ uint64_t three_percent_mem = mem_one_percent * 3 / num_heaps;
+
+#ifdef SIMPLE_DPRINTF
+ dprintf (GTC_LOG, ("min av: %Id, 10%% gen2: %Id, 3%% mem: %I64d",
+ min_mem_based_on_available, ten_percent_size, three_percent_mem));
+#endif //SIMPLE_DPRINTF
+ return (size_t)(min (min_mem_based_on_available, min (ten_percent_size, three_percent_mem)));
}
inline
@@ -15031,10 +14939,7 @@ void fire_overflow_event (uint8_t* overflow_min,
void gc_heap::concurrent_print_time_delta (const char* msg)
{
#ifdef TRACE_GC
- LARGE_INTEGER ts;
- QueryPerformanceCounter (&ts);
-
- size_t current_time = (size_t) (ts.QuadPart/(qpf.QuadPart/1000));
+ size_t current_time = GetHighPrecisionTimeStamp();
size_t elapsed_time = current_time - time_bgc_last;
time_bgc_last = current_time;
@@ -15108,7 +15013,7 @@ BOOL gc_heap::should_proceed_with_gc()
void gc_heap::gc1()
{
#ifdef BACKGROUND_GC
- assert (settings.concurrent == (uint32_t)(GetCurrentThreadId() == bgc_thread_id));
+ assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
#endif //BACKGROUND_GC
#ifdef TIME_GC
@@ -15146,10 +15051,7 @@ void gc_heap::gc1()
if (settings.concurrent)
{
#ifdef TRACE_GC
- LARGE_INTEGER ts;
- QueryPerformanceCounter (&ts);
-
- time_bgc_last = (size_t)(ts.QuadPart/(qpf.QuadPart/1000));
+ time_bgc_last = GetHighPrecisionTimeStamp();
#endif //TRACE_GC
fire_bgc_event (BGCBegin);
@@ -15172,23 +15074,13 @@ void gc_heap::gc1()
{
mark_phase (n, FALSE);
- CNameSpace::GcRuntimeStructuresValid (FALSE);
+ GCScan::GcRuntimeStructuresValid (FALSE);
plan_phase (n);
- CNameSpace::GcRuntimeStructuresValid (TRUE);
+ GCScan::GcRuntimeStructuresValid (TRUE);
}
}
- LARGE_INTEGER ts;
- if (!QueryPerformanceCounter(&ts))
- FATAL_GC_ERROR();
-
- size_t end_gc_time = (size_t) (ts.QuadPart/(qpf.QuadPart/1000));
-
-#ifdef GC_CONFIG_DRIVEN
- if (heap_number == 0)
- time_since_init = end_gc_time - time_init;
-#endif //GC_CONFIG_DRIVEN
-
+ size_t end_gc_time = GetHighPrecisionTimeStamp();
// printf ("generation: %d, elapsed time: %Id\n", n, end_gc_time - dd_time_clock (dynamic_data_of (0)));
//adjust the allocation size from the pinned quantities.
@@ -15388,7 +15280,7 @@ void gc_heap::gc1()
#endif //TIME_GC
#ifdef BACKGROUND_GC
- assert (settings.concurrent == (uint32_t)(GetCurrentThreadId() == bgc_thread_id));
+ assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
#endif //BACKGROUND_GC
#if defined(VERIFY_HEAP) || (defined (FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))
@@ -15438,7 +15330,7 @@ void gc_heap::gc1()
#endif //BACKGROUND_GC
#ifdef BACKGROUND_GC
- assert (settings.concurrent == (uint32_t)(GetCurrentThreadId() == bgc_thread_id));
+ assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
#ifdef FEATURE_EVENT_TRACE
if (ETW::GCLog::ShouldTrackMovementForEtw() && settings.concurrent)
{
@@ -15542,7 +15434,7 @@ void gc_heap::gc1()
size_t min_gc_size = dd_min_gc_size(dd);
// if min GC size larger than true on die cache, then don't bother
// limiting the desired size
- if ((min_gc_size <= GetLargestOnDieCacheSize(TRUE) / GetLogicalCpuCount()) &&
+ if ((min_gc_size <= GCToOSInterface::GetLargestOnDieCacheSize(TRUE) / GCToOSInterface::GetLogicalCpuCount()) &&
desired_per_heap <= 2*min_gc_size)
{
desired_per_heap = min_gc_size;
@@ -16058,11 +15950,7 @@ void gc_heap::update_collection_counts ()
dynamic_data* dd0 = dynamic_data_of (0);
dd_gc_clock (dd0) += 1;
- LARGE_INTEGER ts;
- if (!QueryPerformanceCounter (&ts))
- FATAL_GC_ERROR();
-
- size_t now = (size_t)(ts.QuadPart/(qpf.QuadPart/1000));
+ size_t now = GetHighPrecisionTimeStamp();
for (int i = 0; i <= settings.condemned_generation;i++)
{
@@ -16714,7 +16602,7 @@ int gc_heap::garbage_collect (int n)
gc1();
}
#ifndef MULTIPLE_HEAPS
- allocation_running_time = (size_t)GetTickCount();
+ allocation_running_time = (size_t)GCToOSInterface::GetLowPrecisionTimeStamp();
allocation_running_amount = dd_new_allocation (dynamic_data_of (0));
fgn_last_alloc = dd_new_allocation (dynamic_data_of (0));
#endif //MULTIPLE_HEAPS
@@ -17716,7 +17604,7 @@ gc_heap::mark_steal()
#ifdef SNOOP_STATS
dprintf (SNOOP_LOG, ("(GC%d)heap%d: start snooping %d", settings.gc_index, heap_number, (heap_number+1)%n_heaps));
- uint32_t begin_tick = GetTickCount();
+ uint32_t begin_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
#endif //SNOOP_STATS
int idle_loop_count = 0;
@@ -17817,8 +17705,8 @@ gc_heap::mark_steal()
#ifdef SNOOP_STATS
dprintf (SNOOP_LOG, ("heap%d: marking %Ix from %d [%d] tl:%dms",
heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
- (GetTickCount()-begin_tick)));
- uint32_t start_tick = GetTickCount();
+ (GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
+ uint32_t start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
#endif //SNOOP_STATS
mark_object_simple1 (o, start, heap_number);
@@ -17826,7 +17714,7 @@ gc_heap::mark_steal()
#ifdef SNOOP_STATS
dprintf (SNOOP_LOG, ("heap%d: done marking %Ix from %d [%d] %dms tl:%dms",
heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
- (GetTickCount()-start_tick),(GetTickCount()-begin_tick)));
+ (GCToOSInterface::GetLowPrecisionTimeStamp()-start_tick),(GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
#endif //SNOOP_STATS
mark_stack_busy() = 0;
@@ -17867,7 +17755,7 @@ gc_heap::mark_steal()
#ifdef SNOOP_STATS
snoop_stat.switch_to_thread_count++;
#endif //SNOOP_STATS
- __SwitchToThread(1,0);
+ GCToOSInterface::Sleep(1);
}
int free_count = 1;
#ifdef SNOOP_STATS
@@ -17980,7 +17868,7 @@ gc_heap::ha_mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
size_t new_size = 2*internal_root_array_length;
GCMemoryStatus statex;
- GetProcessMemoryLoad(&statex);
+ GCToOSInterface::GetMemoryStatus(&statex);
if (new_size > (size_t)(statex.ullAvailPhys / 10))
{
heap_analyze_success = FALSE;
@@ -18505,11 +18393,10 @@ void gc_heap::fix_card_table ()
PREFIX_ASSUME(seg != NULL);
- uint32_t granularity;
#ifdef BACKGROUND_GC
- uint32_t mode = settings.concurrent ? 1 : 0;
+ bool reset_watch_state = !!settings.concurrent;
#else //BACKGROUND_GC
- uint32_t mode = 0;
+ bool reset_watch_state = false;
#endif //BACKGROUND_GC
BOOL small_object_segments = TRUE;
while (1)
@@ -18549,10 +18436,10 @@ void gc_heap::fix_card_table ()
#ifdef TIME_WRITE_WATCH
unsigned int time_start = GetCycleCount32();
#endif //TIME_WRITE_WATCH
- uint32_t status = GetWriteWatch (mode, base_address, region_size,
+ bool success = GCToOSInterface::GetWriteWatch(reset_watch_state, base_address, region_size,
(void**)g_addresses,
- (ULONG_PTR*)&bcount, (DWORD*)&granularity);
- assert (status == 0);
+ &bcount);
+ assert (success);
#ifdef TIME_WRITE_WATCH
unsigned int time_stop = GetCycleCount32();
@@ -18562,7 +18449,6 @@ void gc_heap::fix_card_table ()
#endif //TIME_WRITE_WATCH
assert( ((card_size * card_word_width)&(OS_PAGE_SIZE-1))==0 );
- assert (granularity == OS_PAGE_SIZE);
//printf ("%Ix written into\n", bcount);
dprintf (3,("Found %Id pages written", bcount));
for (unsigned i = 0; i < bcount; i++)
@@ -18590,7 +18476,7 @@ void gc_heap::fix_card_table ()
align_on_page (generation_allocation_start (generation_of (0)));
size_t region_size =
heap_segment_allocated (ephemeral_heap_segment) - base_address;
- ResetWriteWatch (base_address, region_size);
+ GCToOSInterface::ResetWriteWatch (base_address, region_size);
}
#endif //BACKGROUND_GC
#endif //WRITE_WATCH
@@ -19140,7 +19026,7 @@ void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc,
// determine the local value and collect the results into the s_fUnpromotedHandles variable in what is
// effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until
// we're safely joined.
- if (CNameSpace::GcDhUnpromotedHandlesExist(sc))
+ if (GCScan::GcDhUnpromotedHandlesExist(sc))
s_fUnpromotedHandles = TRUE;
// Synchronize all the threads so we can read our state variables safely. The shared variable
@@ -19216,8 +19102,8 @@ void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc,
// If the portion of the dependent handle table managed by this worker has handles that could still be
// promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it
// could require a rescan of handles on this or other workers.
- if (CNameSpace::GcDhUnpromotedHandlesExist(sc))
- if (CNameSpace::GcDhReScan(sc))
+ if (GCScan::GcDhUnpromotedHandlesExist(sc))
+ if (GCScan::GcDhReScan(sc))
s_fUnscannedPromotions = TRUE;
}
}
@@ -19235,7 +19121,7 @@ void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc,
// Loop until there are either no more dependent handles that can have their secondary promoted or we've
// managed to perform a scan without promoting anything new.
- while (CNameSpace::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
+ while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
{
// On each iteration of the loop start with the assumption that no further objects have been promoted.
fUnscannedPromotions = false;
@@ -19247,7 +19133,7 @@ void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc,
fUnscannedPromotions = true;
// Perform the scan and set the flag if any promotions resulted.
- if (CNameSpace::GcDhReScan(sc))
+ if (GCScan::GcDhReScan(sc))
fUnscannedPromotions = true;
}
@@ -19395,7 +19281,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
if ((condemned_gen_number == max_generation) && (num_sizedrefs > 0))
{
- CNameSpace::GcScanSizedRefs(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
+ GCScan::GcScanSizedRefs(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
fire_mark_event (heap_number, ETW::GC_ROOT_SIZEDREF, (promoted_bytes (heap_number) - last_promoted_bytes));
last_promoted_bytes = promoted_bytes (heap_number);
@@ -19411,7 +19297,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
dprintf(3,("Marking Roots"));
- CNameSpace::GcScanRoots(GCHeap::Promote,
+ GCScan::GcScanRoots(GCHeap::Promote,
condemned_gen_number, max_generation,
&sc);
@@ -19437,7 +19323,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
{
dprintf(3,("Marking handle table"));
- CNameSpace::GcScanHandles(GCHeap::Promote,
+ GCScan::GcScanHandles(GCHeap::Promote,
condemned_gen_number, max_generation,
&sc);
fire_mark_event (heap_number, ETW::GC_ROOT_HANDLES, (promoted_bytes (heap_number) - last_promoted_bytes));
@@ -19504,7 +19390,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
// to optimize away further scans. The call to scan_dependent_handles is what will cycle through more
// iterations if required and will also perform processing of any mark stack overflow once the dependent
// handle table has been fully promoted.
- CNameSpace::GcDhInitialScan(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
+ GCScan::GcDhInitialScan(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
scan_dependent_handles(condemned_gen_number, &sc, true);
#ifdef MULTIPLE_HEAPS
@@ -19534,7 +19420,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
}
// null out the target of short weakref that were not promoted.
- CNameSpace::GcShortWeakPtrScan(GCHeap::Promote, condemned_gen_number, max_generation,&sc);
+ GCScan::GcShortWeakPtrScan(GCHeap::Promote, condemned_gen_number, max_generation,&sc);
// MTHTS: keep by single thread
#ifdef MULTIPLE_HEAPS
@@ -19582,7 +19468,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
#endif //MULTIPLE_HEAPS
// null out the target of long weakref that were not promoted.
- CNameSpace::GcWeakPtrScan (GCHeap::Promote, condemned_gen_number, max_generation, &sc);
+ GCScan::GcWeakPtrScan (GCHeap::Promote, condemned_gen_number, max_generation, &sc);
// MTHTS: keep by single thread
#ifdef MULTIPLE_HEAPS
@@ -19600,7 +19486,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
#endif //MULTIPLE_HEAPS
{
// scan for deleted entries in the syncblk cache
- CNameSpace::GcWeakPtrScanBySingleThread (condemned_gen_number, max_generation, &sc);
+ GCScan::GcWeakPtrScanBySingleThread (condemned_gen_number, max_generation, &sc);
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
if (g_fEnableARM)
@@ -21132,7 +21018,7 @@ void gc_heap::store_plug_gap_info (uint8_t* plug_start,
//if (last_plug_len == Align (min_obj_size))
//{
// dprintf (3, ("debugging only - last npinned plug is min, check to see if it's correct"));
- // DebugBreak();
+ // GCToOSInterface::DebugBreak();
//}
save_pre_plug_info_p = TRUE;
}
@@ -21165,7 +21051,7 @@ void gc_heap::store_plug_gap_info (uint8_t* plug_start,
//if (Align (last_plug_len) < min_pre_pin_obj_size)
//{
// dprintf (3, ("debugging only - last pinned plug is min, check to see if it's correct"));
- // DebugBreak();
+ // GCToOSInterface::DebugBreak();
//}
save_post_plug_info (last_pinned_plug, last_object_in_last_plug, plug_start);
@@ -21788,6 +21674,14 @@ void gc_heap::plan_phase (int condemned_gen_number)
(size_t)new_address + ps, ps,
(is_plug_padded (plug_start) ? 1 : 0)));
#endif //SIMPLE_DPRINTF
+
+#ifdef SHORT_PLUGS
+ if (is_plug_padded (plug_start))
+ {
+ dprintf (3, ("%Ix was padded", plug_start));
+ dd_padding_size (dd_active_old) += Align (min_obj_size);
+ }
+#endif //SHORT_PLUGS
}
}
}
@@ -22502,14 +22396,14 @@ void gc_heap::plan_phase (int condemned_gen_number)
{
dprintf (2, ("Promoting EE roots for gen %d",
condemned_gen_number));
- CNameSpace::GcPromotionsGranted(condemned_gen_number,
+ GCScan::GcPromotionsGranted(condemned_gen_number,
max_generation, &sc);
}
else if (settings.demotion)
{
dprintf (2, ("Demoting EE roots for gen %d",
condemned_gen_number));
- CNameSpace::GcDemote (condemned_gen_number, max_generation, &sc);
+ GCScan::GcDemote (condemned_gen_number, max_generation, &sc);
}
}
@@ -22664,7 +22558,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS
{
- CNameSpace::GcPromotionsGranted(condemned_gen_number,
+ GCScan::GcPromotionsGranted(condemned_gen_number,
max_generation, &sc);
if (condemned_gen_number >= (max_generation -1))
{
@@ -23481,7 +23375,7 @@ void gc_heap::relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end,
//{
// dprintf (3, ("obj %Ix needed padding: end %Ix is %d bytes from pinned obj %Ix",
// x, (x + s), (plug- (x + s)), plug));
- // DebugBreak();
+ // GCToOSInterface::DebugBreak();
//}
relocate_pre_plug_info (pinned_plug_entry);
@@ -24156,7 +24050,7 @@ void gc_heap::relocate_phase (int condemned_gen_number,
}
dprintf(3,("Relocating roots"));
- CNameSpace::GcScanRoots(GCHeap::Relocate,
+ GCScan::GcScanRoots(GCHeap::Relocate,
condemned_gen_number, max_generation, &sc);
verify_pins_with_post_plug_info("after reloc stack");
@@ -24209,7 +24103,7 @@ void gc_heap::relocate_phase (int condemned_gen_number,
// MTHTS
{
dprintf(3,("Relocating handle table"));
- CNameSpace::GcScanHandles(GCHeap::Relocate,
+ GCScan::GcScanHandles(GCHeap::Relocate,
condemned_gen_number, max_generation, &sc);
}
@@ -24587,13 +24481,9 @@ void gc_heap::compact_phase (int condemned_gen_number,
reset_pinned_queue_bos();
update_oldest_pinned_plug();
- BOOL reused_seg = FALSE;
- int heap_expand_mechanism = get_gc_data_per_heap()->get_mechanism (gc_heap_expand);
- if ((heap_expand_mechanism == expand_reuse_bestfit) ||
- (heap_expand_mechanism == expand_reuse_normal))
+ BOOL reused_seg = expand_reused_seg_p();
+ if (reused_seg)
{
- reused_seg = TRUE;
-
for (int i = 1; i <= max_generation; i++)
{
generation_allocation_size (generation_of (i)) = 0;
@@ -24769,7 +24659,7 @@ inline int32_t GCUnhandledExceptionFilter(EXCEPTION_POINTERS* pExceptionPointers
#pragma warning(push)
#pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
#endif //_MSC_VER
-uint32_t __stdcall gc_heap::gc_thread_stub (void* arg)
+void __stdcall gc_heap::gc_thread_stub (void* arg)
{
ClrFlsSetThreadType (ThreadType_GC);
STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
@@ -24782,7 +24672,7 @@ uint32_t __stdcall gc_heap::gc_thread_stub (void* arg)
{
#ifdef BACKGROUND_GC
// For background GC we revert to doing a blocking GC.
- return 0;
+ return;
#else
STRESS_LOG0(LF_GC, LL_ALWAYS, "Thread::CommitThreadStack failed.");
_ASSERTE(!"Thread::CommitThreadStack failed.");
@@ -24796,7 +24686,7 @@ uint32_t __stdcall gc_heap::gc_thread_stub (void* arg)
#endif // NO_CATCH_HANDLERS
gc_heap* heap = (gc_heap*)arg;
_alloca (256*heap->heap_number);
- return heap->gc_thread_function();
+ heap->gc_thread_function();
#ifndef NO_CATCH_HANDLERS
}
@@ -24937,7 +24827,7 @@ void gc_heap::background_scan_dependent_handles (ScanContext *sc)
// determine the local value and collect the results into the s_fUnpromotedHandles variable in what is
// effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until
// we're safely joined.
- if (CNameSpace::GcDhUnpromotedHandlesExist(sc))
+ if (GCScan::GcDhUnpromotedHandlesExist(sc))
s_fUnpromotedHandles = TRUE;
// Synchronize all the threads so we can read our state variables safely. The following shared
@@ -25007,8 +24897,8 @@ void gc_heap::background_scan_dependent_handles (ScanContext *sc)
// If the portion of the dependent handle table managed by this worker has handles that could still be
// promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it
// could require a rescan of handles on this or other workers.
- if (CNameSpace::GcDhUnpromotedHandlesExist(sc))
- if (CNameSpace::GcDhReScan(sc))
+ if (GCScan::GcDhUnpromotedHandlesExist(sc))
+ if (GCScan::GcDhReScan(sc))
s_fUnscannedPromotions = TRUE;
}
}
@@ -25022,7 +24912,7 @@ void gc_heap::background_scan_dependent_handles (ScanContext *sc)
// Scan dependent handles repeatedly until there are no further promotions that can be made or we made a
// scan without performing any new promotions.
- while (CNameSpace::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
+ while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
{
// On each iteration of the loop start with the assumption that no further objects have been promoted.
fUnscannedPromotions = false;
@@ -25034,7 +24924,7 @@ void gc_heap::background_scan_dependent_handles (ScanContext *sc)
fUnscannedPromotions = true;
// Perform the scan and set the flag if any promotions resulted.
- if (CNameSpace::GcDhReScan (sc))
+ if (GCScan::GcDhReScan (sc))
fUnscannedPromotions = true;
}
@@ -25228,7 +25118,7 @@ BOOL gc_heap::commit_mark_array_by_range (uint8_t* begin, uint8_t* end, uint32_t
size));
#endif //SIMPLE_DPRINTF
- if (VirtualAlloc (commit_start, size, MEM_COMMIT, PAGE_READWRITE))
+ if (GCToOSInterface::VirtualCommit (commit_start, size))
{
// We can only verify the mark array is cleared from begin to end, the first and the last
// page aren't necessarily all cleared 'cause they could be used by other segments or
@@ -25473,10 +25363,10 @@ void gc_heap::decommit_mark_array_by_seg (heap_segment* seg)
if (decommit_start < decommit_end)
{
- if (!VirtualFree (decommit_start, size, MEM_DECOMMIT))
+ if (!GCToOSInterface::VirtualDecommit (decommit_start, size))
{
- dprintf (GC_TABLE_LOG, ("VirtualFree on %Ix for %Id bytes failed: %d",
- decommit_start, size, GetLastError()));
+ dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualDecommit on %Ix for %Id bytes failed",
+ decommit_start, size));
assert (!"decommit failed");
}
}
@@ -25549,7 +25439,7 @@ void gc_heap::background_mark_phase ()
dprintf(3,("BGC: stack marking"));
sc.concurrent = TRUE;
- CNameSpace::GcScanRoots(background_promote_callback,
+ GCScan::GcScanRoots(background_promote_callback,
max_generation, max_generation,
&sc);
}
@@ -25602,7 +25492,7 @@ void gc_heap::background_mark_phase ()
dont_restart_ee_p = FALSE;
restart_vm();
- __SwitchToThread (0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread (0);
#ifdef MULTIPLE_HEAPS
dprintf(3, ("Starting all gc threads for gc"));
bgc_t_join.restart();
@@ -25666,7 +25556,7 @@ void gc_heap::background_mark_phase ()
if (num_sizedrefs > 0)
{
- CNameSpace::GcScanSizedRefs(background_promote, max_generation, max_generation, &sc);
+ GCScan::GcScanSizedRefs(background_promote, max_generation, max_generation, &sc);
enable_preemptive (current_thread);
@@ -25683,7 +25573,7 @@ void gc_heap::background_mark_phase ()
}
dprintf (3,("BGC: handle table marking"));
- CNameSpace::GcScanHandles(background_promote,
+ GCScan::GcScanHandles(background_promote,
max_generation, max_generation,
&sc);
//concurrent_print_time_delta ("concurrent marking handle table");
@@ -25815,7 +25705,7 @@ void gc_heap::background_mark_phase ()
dprintf (GTC_LOG, ("FM: h%d: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));
dprintf (2, ("nonconcurrent marking stack roots"));
- CNameSpace::GcScanRoots(background_promote,
+ GCScan::GcScanRoots(background_promote,
max_generation, max_generation,
&sc);
//concurrent_print_time_delta ("nonconcurrent marking stack roots");
@@ -25826,7 +25716,7 @@ void gc_heap::background_mark_phase ()
// finalize_queue->LeaveFinalizeLock();
dprintf (2, ("nonconcurrent marking handle table"));
- CNameSpace::GcScanHandles(background_promote,
+ GCScan::GcScanHandles(background_promote,
max_generation, max_generation,
&sc);
//concurrent_print_time_delta ("nonconcurrent marking handle table");
@@ -25848,7 +25738,7 @@ void gc_heap::background_mark_phase ()
// required and will also perform processing of any mark stack overflow once the dependent handle
// table has been fully promoted.
dprintf (2, ("1st dependent handle scan and process mark overflow"));
- CNameSpace::GcDhInitialScan(background_promote, max_generation, max_generation, &sc);
+ GCScan::GcDhInitialScan(background_promote, max_generation, max_generation, &sc);
background_scan_dependent_handles (&sc);
//concurrent_print_time_delta ("1st nonconcurrent dependent handle scan and process mark overflow");
concurrent_print_time_delta ("NR 1st Hov");
@@ -25870,7 +25760,7 @@ void gc_heap::background_mark_phase ()
}
// null out the target of short weakref that were not promoted.
- CNameSpace::GcShortWeakPtrScan(background_promote, max_generation, max_generation,&sc);
+ GCScan::GcShortWeakPtrScan(background_promote, max_generation, max_generation,&sc);
//concurrent_print_time_delta ("bgc GcShortWeakPtrScan");
concurrent_print_time_delta ("NR GcShortWeakPtrScan");
@@ -25919,7 +25809,7 @@ void gc_heap::background_mark_phase ()
#endif //MULTIPLE_HEAPS
// null out the target of long weakref that were not promoted.
- CNameSpace::GcWeakPtrScan (background_promote, max_generation, max_generation, &sc);
+ GCScan::GcWeakPtrScan (background_promote, max_generation, max_generation, &sc);
concurrent_print_time_delta ("NR GcWeakPtrScan");
#ifdef MULTIPLE_HEAPS
@@ -25929,7 +25819,7 @@ void gc_heap::background_mark_phase ()
{
dprintf (2, ("calling GcWeakPtrScanBySingleThread"));
// scan for deleted entries in the syncblk cache
- CNameSpace::GcWeakPtrScanBySingleThread (max_generation, max_generation, &sc);
+ GCScan::GcWeakPtrScanBySingleThread (max_generation, max_generation, &sc);
concurrent_print_time_delta ("NR GcWeakPtrScanBySingleThread");
#ifdef MULTIPLE_HEAPS
dprintf(2, ("Starting BGC threads for end of background mark phase"));
@@ -26241,8 +26131,7 @@ void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
PREFIX_ASSUME(seg != NULL);
- uint32_t granularity;
- int mode = concurrent_p ? 1 : 0;
+ bool reset_watch_state = !!concurrent_p;
BOOL small_object_segments = TRUE;
int align_const = get_alignment_constant (small_object_segments);
@@ -26347,19 +26236,18 @@ void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
ptrdiff_t region_size = high_address - base_address;
dprintf (3, ("h%d: gw: [%Ix(%Id)", heap_number, (size_t)base_address, (size_t)region_size));
- uint32_t status = GetWriteWatch (mode, base_address, region_size,
+ bool success = GCToOSInterface::GetWriteWatch (reset_watch_state, base_address, region_size,
(void**)background_written_addresses,
- (ULONG_PTR*)&bcount, (DWORD*)&granularity);
+ &bcount);
//#ifdef _DEBUG
- if (status != 0)
+ if (!success)
{
printf ("GetWriteWatch Error ");
printf ("Probing pages [%Ix, %Ix[\n", (size_t)base_address, (size_t)high_address);
}
//#endif
- assert (status == 0);
- assert (granularity == OS_PAGE_SIZE);
+ assert (success);
if (bcount != 0)
{
@@ -26539,7 +26427,7 @@ BOOL gc_heap::prepare_bgc_thread(gc_heap* gh)
BOOL thread_created = FALSE;
dprintf (2, ("Preparing gc thread"));
- EnterCriticalSection (&(gh->bgc_threads_timeout_cs));
+ gh->bgc_threads_timeout_cs.Enter();
if (!(gh->bgc_thread_running))
{
dprintf (2, ("GC thread not runnning"));
@@ -26554,7 +26442,7 @@ BOOL gc_heap::prepare_bgc_thread(gc_heap* gh)
dprintf (3, ("GC thread already running"));
success = TRUE;
}
- LeaveCriticalSection (&(gh->bgc_threads_timeout_cs));
+ gh->bgc_threads_timeout_cs.Leave();
if(thread_created)
FireEtwGCCreateConcurrentThread_V1(GetClrInstanceId());
@@ -26580,7 +26468,7 @@ BOOL gc_heap::create_bgc_thread(gc_heap* gh)
// finished the event wait below.
rh_bgc_thread_ctx sContext;
- sContext.m_pRealStartRoutine = gh->bgc_thread_stub;
+ sContext.m_pRealStartRoutine = (PTHREAD_START_ROUTINE)gh->bgc_thread_stub;
sContext.m_pRealContext = gh;
if (!PalStartBackgroundGCThread(gh->rh_bgc_thread_stub, &sContext))
@@ -26832,7 +26720,7 @@ void gc_heap::kill_gc_thread()
background_gc_done_event.CloseEvent();
gc_lh_block_event.CloseEvent();
bgc_start_event.CloseEvent();
- DeleteCriticalSection (&bgc_threads_timeout_cs);
+ bgc_threads_timeout_cs.Destroy();
bgc_thread = 0;
recursive_gc_sync::shutdown();
}
@@ -26863,8 +26751,8 @@ uint32_t gc_heap::bgc_thread_function()
bgc_thread_running = TRUE;
Thread* current_thread = GetThread();
BOOL cooperative_mode = TRUE;
- bgc_thread_id = GetCurrentThreadId();
- dprintf (1, ("bgc_thread_id is set to %Ix", bgc_thread_id));
+ bgc_thread_id.SetToCurrentThread();
+ dprintf (1, ("bgc_thread_id is set to %Ix", GCToOSInterface::GetCurrentThreadIdForLogging()));
//this also indicates that the thread is ready.
background_gc_create_event.Set();
while (1)
@@ -26901,7 +26789,7 @@ uint32_t gc_heap::bgc_thread_function()
// Should join the bgc threads and terminate all of them
// at once.
dprintf (1, ("GC thread timeout"));
- EnterCriticalSection (&bgc_threads_timeout_cs);
+ bgc_threads_timeout_cs.Enter();
if (!keep_bgc_threads_p)
{
dprintf (2, ("GC thread exiting"));
@@ -26911,10 +26799,10 @@ uint32_t gc_heap::bgc_thread_function()
// assert if the lock count is not 0.
thread_to_destroy = bgc_thread;
bgc_thread = 0;
- bgc_thread_id = 0;
+ bgc_thread_id.Clear();
do_exit = TRUE;
}
- LeaveCriticalSection (&bgc_threads_timeout_cs);
+ bgc_threads_timeout_cs.Leave();
if (do_exit)
break;
else
@@ -28566,6 +28454,19 @@ BOOL gc_heap::process_free_space (heap_segment* seg,
return FALSE;
}
+BOOL gc_heap::expand_reused_seg_p()
+{
+ BOOL reused_seg = FALSE;
+ int heap_expand_mechanism = gc_data_per_heap.get_mechanism (gc_heap_expand);
+ if ((heap_expand_mechanism == expand_reuse_bestfit) ||
+ (heap_expand_mechanism == expand_reuse_normal))
+ {
+ reused_seg = TRUE;
+ }
+
+ return reused_seg;
+}
+
BOOL gc_heap::can_expand_into_p (heap_segment* seg, size_t min_free_size, size_t min_cont_size,
allocator* gen_allocator)
{
@@ -29207,6 +29108,7 @@ generation* gc_heap::expand_heap (int condemned_generation,
//reset the elevation state for next time.
dprintf (2, ("Elevation: elevation = el_none"));
+ if (settings.should_lock_elevation && !expand_reused_seg_p())
settings.should_lock_elevation = FALSE;
heap_segment* new_seg = new_heap_segment;
@@ -29388,18 +29290,9 @@ generation* gc_heap::expand_heap (int condemned_generation,
bool gc_heap::init_dynamic_data()
{
- LARGE_INTEGER ts;
- if (!QueryPerformanceFrequency(&qpf))
- {
- FATAL_GC_ERROR();
- }
-
- if (!QueryPerformanceCounter(&ts))
- {
- FATAL_GC_ERROR();
- }
+ qpf = GCToOSInterface::QueryPerformanceFrequency();
- uint32_t now = (uint32_t)(ts.QuadPart/(qpf.QuadPart/1000));
+ uint32_t now = (uint32_t)GetHighPrecisionTimeStamp();
//clear some fields
for (int i = 0; i < max_generation+1; i++)
@@ -29521,17 +29414,6 @@ bool gc_heap::init_dynamic_data()
return true;
}
-// This returns a time stamp in milliseconds that is used throughout GC.
-// TODO: Replace all calls to QueryPerformanceCounter with this function.
-size_t gc_heap::get_time_now()
-{
- LARGE_INTEGER ts;
- if (!QueryPerformanceCounter(&ts))
- FATAL_GC_ERROR();
-
- return (size_t)(ts.QuadPart/(qpf.QuadPart/1000));
-}
-
float gc_heap::surv_to_growth (float cst, float limit, float max_limit)
{
if (cst < ((max_limit - limit ) / (limit * (max_limit-1.0f))))
@@ -29628,7 +29510,7 @@ size_t gc_heap::desired_new_allocation (dynamic_data* dd,
else //large object heap
{
GCMemoryStatus ms;
- GetProcessMemoryLoad (&ms);
+ GCToOSInterface::GetMemoryStatus (&ms);
uint64_t available_ram = ms.ullAvailPhys;
if (ms.ullAvailPhys > 1024*1024)
@@ -29864,13 +29746,20 @@ size_t gc_heap::joined_youngest_desired (size_t new_allocation)
{
uint32_t dwMemoryLoad = 0;
GCMemoryStatus ms;
- GetProcessMemoryLoad(&ms);
+ GCToOSInterface::GetMemoryStatus(&ms);
dprintf (2, ("Current memory load: %d", ms.dwMemoryLoad));
dwMemoryLoad = ms.dwMemoryLoad;
size_t final_total =
trim_youngest_desired (dwMemoryLoad, total_new_allocation, total_min_allocation);
- final_new_allocation = Align ((final_total / num_heaps), get_alignment_constant (TRUE));
+ size_t max_new_allocation =
+#ifdef MULTIPLE_HEAPS
+ dd_max_size (g_heaps[0]->dynamic_data_of (0));
+#else //MULTIPLE_HEAPS
+ dd_max_size (dynamic_data_of (0));
+#endif //MULTIPLE_HEAPS
+
+ final_new_allocation = min (Align ((final_total / num_heaps), get_alignment_constant (TRUE)), max_new_allocation);
}
}
@@ -30289,9 +30178,9 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
#endif // MULTIPLE_HEAPS
ptrdiff_t reclaim_space = generation_size(max_generation) - generation_plan_size(max_generation);
- if((settings.entry_memory_load >= 90) && (settings.entry_memory_load < 97))
+ if((settings.entry_memory_load >= high_memory_load_th) && (settings.entry_memory_load < v_high_memory_load_th))
{
- if(reclaim_space > (int64_t)(min_high_fragmentation_threshold(available_physical_mem, num_heaps)))
+ if(reclaim_space > (int64_t)(min_high_fragmentation_threshold (available_physical_mem, num_heaps)))
{
dprintf(GTC_LOG,("compacting due to fragmentation in high memory"));
should_compact = TRUE;
@@ -30299,9 +30188,9 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
}
high_memory = TRUE;
}
- else if(settings.entry_memory_load >= 97)
+ else if(settings.entry_memory_load >= v_high_memory_load_th)
{
- if(reclaim_space > (ptrdiff_t)(min_reclaim_fragmentation_threshold(total_physical_mem, num_heaps)))
+ if(reclaim_space > (ptrdiff_t)(min_reclaim_fragmentation_threshold (num_heaps)))
{
dprintf(GTC_LOG,("compacting due to fragmentation in very high memory"));
should_compact = TRUE;
@@ -30330,7 +30219,8 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
#ifdef BIT64
(high_memory && !should_compact) ||
#endif // BIT64
- generation_size (max_generation) <= generation_plan_size (max_generation))
+ (generation_plan_allocation_start (generation_of (max_generation - 1)) >=
+ generation_allocation_start (generation_of (max_generation - 1))))
{
dprintf (2, (" Elevation: gen2 size: %d, gen2 plan size: %d, no progress, elevation = locked",
generation_size (max_generation),
@@ -30550,7 +30440,7 @@ CObjectHeader* gc_heap::allocate_large_object (size_t jsize, int64_t& alloc_byte
{
if (g_pConfig->IsGCBreakOnOOMEnabled())
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
}
#ifndef FEATURE_REDHAWK
@@ -30643,10 +30533,7 @@ void reset_memory (uint8_t* o, size_t sizeo)
// on write watched memory.
if (reset_mm_p)
{
- if (VirtualAlloc ((char*)page_start, size, MEM_RESET, PAGE_READWRITE))
- VirtualUnlock ((char*)page_start, size);
- else
- reset_mm_p = FALSE;
+ reset_mm_p = GCToOSInterface::VirtualReset((void*)page_start, size, true /* unlock */);
}
}
#endif //!FEATURE_PAL
@@ -32501,11 +32388,7 @@ void gc_heap::clear_all_mark_array()
{
#ifdef MARK_ARRAY
//size_t num_dwords_written = 0;
- //LARGE_INTEGER ts;
- //if (!QueryPerformanceCounter(&ts))
- // FATAL_GC_ERROR();
- //
- //size_t begin_time = (size_t) (ts.QuadPart/(qpf.QuadPart/1000));
+ //size_t begin_time = GetHighPrecisionTimeStamp();
generation* gen = generation_of (max_generation);
heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
@@ -32566,10 +32449,7 @@ void gc_heap::clear_all_mark_array()
seg = heap_segment_next_rw (seg);
}
- //if (!QueryPerformanceCounter(&ts))
- // FATAL_GC_ERROR();
- //
- //size_t end_time = (size_t) (ts.QuadPart/(qpf.QuadPart/1000)) - begin_time;
+ //size_t end_time = GetHighPrecisionTimeStamp() - begin_time;
//printf ("took %Id ms to clear %Id bytes\n", end_time, num_dwords_written*sizeof(uint32_t));
@@ -32909,7 +32789,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
#ifdef MULTIPLE_HEAPS
t_join* current_join = &gc_t_join;
#ifdef BACKGROUND_GC
- if (settings.concurrent && (GetCurrentThreadId() == bgc_thread_id))
+ if (settings.concurrent && (bgc_thread_id.IsCurrentThread()))
{
// We always call verify_heap on entry of GC on the SVR GC threads.
current_join = &bgc_t_join;
@@ -33328,7 +33208,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
// limit its scope to handle table verification.
ScanContext sc;
sc.thread_number = heap_number;
- CNameSpace::VerifyHandleTable(max_generation, max_generation, &sc);
+ GCScan::VerifyHandleTable(max_generation, max_generation, &sc);
}
#ifdef MULTIPLE_HEAPS
@@ -33398,7 +33278,7 @@ HRESULT GCHeap::Shutdown ()
{
deleteGCShadow();
- CNameSpace::GcRuntimeStructuresValid (FALSE);
+ GCScan::GcRuntimeStructuresValid (FALSE);
// Cannot assert this, since we use SuspendEE as the mechanism to quiesce all
// threads except the one performing the shutdown.
@@ -33536,6 +33416,11 @@ HRESULT GCHeap::Initialize ()
HRESULT hr = S_OK;
+ if (!GCToOSInterface::Initialize())
+ {
+ return E_FAIL;
+ }
+
//Initialize the static members.
#ifdef TRACE_GC
GcDuration = 0;
@@ -33549,7 +33434,7 @@ HRESULT GCHeap::Initialize ()
#ifdef MULTIPLE_HEAPS
// GetGCProcessCpuCount only returns up to 64 procs.
unsigned nhp = CPUGroupInfo::CanEnableGCCPUGroups() ? CPUGroupInfo::GetNumActiveProcessors():
- GetCurrentProcessCpuCount();
+ GCToOSInterface::GetCurrentProcessCpuCount();
hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/, nhp);
#else
hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/);
@@ -33558,11 +33443,30 @@ HRESULT GCHeap::Initialize ()
if (hr != S_OK)
return hr;
-#if defined(BIT64)
GCMemoryStatus ms;
- GetProcessMemoryLoad (&ms);
+ GCToOSInterface::GetMemoryStatus (&ms);
gc_heap::total_physical_mem = ms.ullTotalPhys;
gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100;
+#ifndef MULTIPLE_HEAPS
+ gc_heap::mem_one_percent /= g_SystemInfo.dwNumberOfProcessors;
+#endif //!MULTIPLE_HEAPS
+
+ // We should only use this if we are in the "many process" mode which really is only applicable
+ // to very powerful machines - before that's implemented, temporarily I am only enabling this for 80GB+ memory.
+ // For now I am using an estimate to calculate these numbers but this should really be obtained
+ // programmatically going forward.
+ // I am assuming 47 processes using WKS GC and 3 using SVR GC.
+ // I am assuming 3 in part due to the "very high memory load" is 97%.
+ int available_mem_th = 10;
+ if (gc_heap::total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024))
+ {
+ int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(g_SystemInfo.dwNumberOfProcessors));
+ available_mem_th = min (available_mem_th, adjusted_available_mem_th);
+ }
+
+ gc_heap::high_memory_load_th = 100 - available_mem_th;
+
+#if defined(BIT64)
gc_heap::youngest_gen_desired_th = gc_heap::mem_one_percent;
#endif // BIT64
@@ -33610,7 +33514,7 @@ HRESULT GCHeap::Initialize ()
if (hr == S_OK)
{
- CNameSpace::GcRuntimeStructuresValid (TRUE);
+ GCScan::GcRuntimeStructuresValid (TRUE);
#ifdef GC_PROFILING
if (CORProfilerTrackGC())
@@ -34014,7 +33918,7 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
// Allow programmer to skip the first N Stress GCs so that you can
// get to the interesting ones faster.
- FastInterlockIncrement((LONG*)&GCStressCurCount);
+ Interlocked::Increment(&GCStressCurCount);
if (GCStressCurCount < GCStressStartCount)
return FALSE;
@@ -34060,7 +33964,7 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
// at a time. A secondary advantage is that we release part of our StressObjs
// buffer sparingly but just as effectively.
- if (FastInterlockIncrement((LONG *) &OneAtATime) == 0 &&
+ if (Interlocked::Increment(&OneAtATime) == 0 &&
!TrackAllocations()) // Messing with object sizes can confuse the profiler (see ICorProfilerInfo::GetObjectSize)
{
StringObject* str;
@@ -34122,7 +34026,7 @@ BOOL GCHeap::StressHeap(alloc_context * acontext)
}
}
}
- FastInterlockDecrement((LONG *) &OneAtATime);
+ Interlocked::Decrement(&OneAtATime);
#endif // !MULTIPLE_HEAPS
if (IsConcurrentGCEnabled())
{
@@ -35705,18 +35609,18 @@ size_t GCHeap::GetValidGen0MaxSize(size_t seg_size)
#ifdef SERVER_GC
// performance data seems to indicate halving the size results
// in optimal perf. Ask for adjusted gen0 size.
- gen0size = max(GetLargestOnDieCacheSize(FALSE)/GetLogicalCpuCount(),(256*1024));
+ gen0size = max(GCToOSInterface::GetLargestOnDieCacheSize(FALSE)/GCToOSInterface::GetLogicalCpuCount(),(256*1024));
#if (defined(_TARGET_AMD64_))
// if gen0 size is too large given the available memory, reduce it.
// Get true cache size, as we don't want to reduce below this.
- size_t trueSize = max(GetLargestOnDieCacheSize(TRUE)/GetLogicalCpuCount(),(256*1024));
+ size_t trueSize = max(GCToOSInterface::GetLargestOnDieCacheSize(TRUE)/GCToOSInterface::GetLogicalCpuCount(),(256*1024));
dprintf (2, ("cache: %Id-%Id, cpu: %Id",
- GetLargestOnDieCacheSize(FALSE),
- GetLargestOnDieCacheSize(TRUE),
- GetLogicalCpuCount()));
+ GCToOSInterface::GetLargestOnDieCacheSize(FALSE),
+ GCToOSInterface::GetLargestOnDieCacheSize(TRUE),
+ GCToOSInterface::GetLogicalCpuCount()));
GCMemoryStatus ms;
- GetProcessMemoryLoad (&ms);
+ GCToOSInterface::GetMemoryStatus (&ms);
// if the total min GC across heaps will exceed 1/6th of available memory,
// then reduce the min GC size until it either fits or has been reduced to cache size.
while ((gen0size * gc_heap::n_heaps) > (ms.ullAvailPhys / 6))
@@ -35731,7 +35635,7 @@ size_t GCHeap::GetValidGen0MaxSize(size_t seg_size)
#endif //_TARGET_AMD64_
#else //SERVER_GC
- gen0size = max((4*GetLargestOnDieCacheSize(TRUE)/5),(256*1024));
+ gen0size = max((4*GCToOSInterface::GetLargestOnDieCacheSize(TRUE)/5),(256*1024));
#endif //SERVER_GC
#else //!FEATURE_REDHAWK
gen0size = (256*1024);
@@ -35940,8 +35844,7 @@ GCHeap::SetCardsAfterBulkCopy( Object **StartPoint, size_t len )
// Set Bit For Card and advance to next card
size_t card = gcard_of ((uint8_t*)rover);
- FastInterlockOr ((DWORD RAW_KEYWORD(volatile) *)&g_card_table[card/card_word_width],
- (1 << (uint32_t)(card % card_word_width)));
+ Interlocked::Or (&g_card_table[card/card_word_width], (1U << (card % card_word_width)));
// Skip to next card for the object
rover = (Object**)align_on_card ((uint8_t*)(rover+1));
}
@@ -35987,7 +35890,7 @@ bool CFinalize::Initialize()
STRESS_LOG_OOM_STACK(sizeof(Object*[100]));
if (g_pConfig->IsGCBreakOnOOMEnabled())
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
}
return false;
}
@@ -36000,7 +35903,7 @@ bool CFinalize::Initialize()
m_PromotedCount = 0;
lock = -1;
#ifdef _DEBUG
- lockowner_threadid = (uint32_t) -1;
+ lockowner_threadid.Clear();
#endif // _DEBUG
return true;
@@ -36024,22 +35927,22 @@ void CFinalize::EnterFinalizeLock()
GCToEEInterface::IsPreemptiveGCDisabled(GetThread()));
retry:
- if (FastInterlockExchange ((LONG*)&lock, 0) >= 0)
+ if (Interlocked::Exchange (&lock, 0) >= 0)
{
unsigned int i = 0;
while (lock >= 0)
{
YieldProcessor(); // indicate to the processor that we are spining
if (++i & 7)
- __SwitchToThread (0, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::YieldThread (0);
else
- __SwitchToThread (5, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep (5);
}
goto retry;
}
#ifdef _DEBUG
- lockowner_threadid = ::GetCurrentThreadId();
+ lockowner_threadid.SetToCurrentThread();
#endif // _DEBUG
}
@@ -36051,7 +35954,7 @@ void CFinalize::LeaveFinalizeLock()
GCToEEInterface::IsPreemptiveGCDisabled(GetThread()));
#ifdef _DEBUG
- lockowner_threadid = (uint32_t) -1;
+ lockowner_threadid.Clear();
#endif // _DEBUG
lock = -1;
}
@@ -36101,7 +36004,7 @@ CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size)
STRESS_LOG_OOM_STACK(0);
if (g_pConfig->IsGCBreakOnOOMEnabled())
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
}
#ifdef FEATURE_REDHAWK
return false;
@@ -36753,7 +36656,7 @@ void TouchPages(LPVOID pStart, uint32_t cb)
void deleteGCShadow()
{
if (g_GCShadow != 0)
- VirtualFree (g_GCShadow, 0, MEM_RELEASE);
+ GCToOSInterface::VirtualRelease (g_GCShadow, g_GCShadowEnd - g_GCShadow);
g_GCShadow = 0;
g_GCShadowEnd = 0;
}
@@ -36768,7 +36671,7 @@ void initGCShadow()
if (len > (size_t)(g_GCShadowEnd - g_GCShadow))
{
deleteGCShadow();
- g_GCShadowEnd = g_GCShadow = (uint8_t*) VirtualAlloc(0, len, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ g_GCShadowEnd = g_GCShadow = (uint8_t*) GCToOSInterface::VirtualCommit(0, len);
if (g_GCShadow)
{
g_GCShadowEnd += len;
diff --git a/src/Native/gc/gc.h b/src/Native/gc/gc.h
index 3421a48bd..94d2d5df2 100644
--- a/src/Native/gc/gc.h
+++ b/src/Native/gc/gc.h
@@ -204,6 +204,7 @@ struct ScanContext
{
Thread* thread_under_crawl;
int thread_number;
+ uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read
BOOL promotion; //TRUE: Promotion, FALSE: Relocation.
BOOL concurrent; //TRUE: concurrent scanning
#if CHECK_APP_DOMAIN_LEAKS || defined (FEATURE_APPDOMAIN_RESOURCE_MONITORING) || defined (DACCESS_COMPILE)
@@ -225,6 +226,7 @@ struct ScanContext
thread_under_crawl = 0;
thread_number = -1;
+ stack_limit = 0;
promotion = FALSE;
concurrent = FALSE;
#ifdef GC_PROFILING
@@ -254,7 +256,7 @@ struct ProfilingScanContext : ScanContext
fProfilerPinned = fProfilerPinnedParam;
pvEtwContext = NULL;
#ifdef FEATURE_CONSERVATIVE_GC
- // To not confuse CNameSpace::GcScanRoots
+ // To not confuse GCScan::GcScanRoots
promotion = g_pConfig->GetGCConservative();
#endif
}
diff --git a/src/Native/gc/gcee.cpp b/src/Native/gc/gcee.cpp
index ad20009f0..8e4e4480b 100644
--- a/src/Native/gc/gcee.cpp
+++ b/src/Native/gc/gcee.cpp
@@ -379,15 +379,11 @@ size_t GCHeap::GetLastGCDuration(int generation)
return dd_gc_elapsed_time (hp->dynamic_data_of (generation));
}
+size_t GetHighPrecisionTimeStamp();
+
size_t GCHeap::GetNow()
{
-#ifdef MULTIPLE_HEAPS
- gc_heap* hp = gc_heap::g_heaps[0];
-#else
- gc_heap* hp = pGenGCHeap;
-#endif //MULTIPLE_HEAPS
-
- return hp->get_time_now();
+ return GetHighPrecisionTimeStamp();
}
#if defined(GC_PROFILING) //UNIXTODO: Enable this for FEATURE_EVENT_TRACE
@@ -439,7 +435,7 @@ void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForE
// heap.
gc_heap* hp = gc_heap::g_heaps [hn];
SC.thread_number = hn;
- CNameSpace::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
+ GCScan::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
// The finalizer queue is also a source of roots
SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
@@ -447,7 +443,7 @@ void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForE
}
#else
// Ask the vm to go over all of the roots
- CNameSpace::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
+ GCScan::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC);
// The finalizer queue is also a source of roots
SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
@@ -456,7 +452,7 @@ void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForE
#endif // MULTIPLE_HEAPS
// Handles are kept independent of wks/svr/concurrent builds
SC.dwEtwRootKind = kEtwGCRootKindHandle;
- CNameSpace::GcScanHandlesForProfilerAndETW(max_generation, &SC);
+ GCScan::GcScanHandlesForProfilerAndETW(max_generation, &SC);
// indicate that regular handle scanning is over, so we can flush the buffered roots
// to the profiler. (This is for profapi only. ETW will flush after the
@@ -476,7 +472,7 @@ void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForE
// GcScanDependentHandlesForProfiler double-checks
// CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
- CNameSpace::GcScanDependentHandlesForProfilerAndETW(max_generation, &SC);
+ GCScan::GcScanDependentHandlesForProfilerAndETW(max_generation, &SC);
// indicate that dependent handle scanning is over, so we can flush the buffered roots
// to the profiler. (This is for profapi only. ETW will flush after the
diff --git a/src/Native/gc/gcpriv.h b/src/Native/gc/gcpriv.h
index 7f56b49b9..fe40c0ccd 100644
--- a/src/Native/gc/gcpriv.h
+++ b/src/Native/gc/gcpriv.h
@@ -25,7 +25,7 @@
inline void FATAL_GC_ERROR()
{
- DebugBreak();
+ GCToOSInterface::DebugBreak();
_ASSERTE(!"Fatal Error in GC.");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
@@ -143,12 +143,12 @@ inline void FATAL_GC_ERROR()
#if defined (SYNCHRONIZATION_STATS) || defined (STAGE_STATS)
#define BEGIN_TIMING(x) \
LARGE_INTEGER x##_start; \
- QueryPerformanceCounter (&x##_start)
+ x##_start = GCToOSInterface::QueryPerformanceCounter ()
#define END_TIMING(x) \
LARGE_INTEGER x##_end; \
- QueryPerformanceCounter (&x##_end); \
- x += x##_end.QuadPart - x##_start.QuadPart
+ x##_end = GCToOSInterface::QueryPerformanceCounter (); \
+ x += x##_end - x##_start
#else
#define BEGIN_TIMING(x)
@@ -204,70 +204,7 @@ void GCLogConfig (const char *fmt, ... );
#define CLREvent CLREventStatic
-#ifdef CreateFileMapping
-
-#undef CreateFileMapping
-
-#endif //CreateFileMapping
-
-#define CreateFileMapping WszCreateFileMapping
-
// hosted api
-#ifdef InitializeCriticalSection
-#undef InitializeCriticalSection
-#endif //ifdef InitializeCriticalSection
-#define InitializeCriticalSection UnsafeInitializeCriticalSection
-
-#ifdef DeleteCriticalSection
-#undef DeleteCriticalSection
-#endif //ifdef DeleteCriticalSection
-#define DeleteCriticalSection UnsafeDeleteCriticalSection
-
-#ifdef EnterCriticalSection
-#undef EnterCriticalSection
-#endif //ifdef EnterCriticalSection
-#define EnterCriticalSection UnsafeEEEnterCriticalSection
-
-#ifdef LeaveCriticalSection
-#undef LeaveCriticalSection
-#endif //ifdef LeaveCriticalSection
-#define LeaveCriticalSection UnsafeEELeaveCriticalSection
-
-#ifdef TryEnterCriticalSection
-#undef TryEnterCriticalSection
-#endif //ifdef TryEnterCriticalSection
-#define TryEnterCriticalSection UnsafeEETryEnterCriticalSection
-
-#ifdef CreateSemaphore
-#undef CreateSemaphore
-#endif //CreateSemaphore
-#define CreateSemaphore UnsafeCreateSemaphore
-
-#ifdef CreateEvent
-#undef CreateEvent
-#endif //ifdef CreateEvent
-#define CreateEvent UnsafeCreateEvent
-
-#ifdef VirtualAlloc
-#undef VirtualAlloc
-#endif //ifdef VirtualAlloc
-#define VirtualAlloc ClrVirtualAlloc
-
-#ifdef VirtualFree
-#undef VirtualFree
-#endif //ifdef VirtualFree
-#define VirtualFree ClrVirtualFree
-
-#ifdef VirtualQuery
-#undef VirtualQuery
-#endif //ifdef VirtualQuery
-#define VirtualQuery ClrVirtualQuery
-
-#ifdef VirtualProtect
-#undef VirtualProtect
-#endif //ifdef VirtualProtect
-#define VirtualProtect ClrVirtualProtect
-
#ifdef memcpy
#undef memcpy
#endif //memcpy
@@ -554,6 +491,7 @@ enum gc_type
gc_type_max = 3
};
+#define v_high_memory_load_th 97
//encapsulates the mechanism for the current gc
class gc_mechanisms
@@ -1044,7 +982,7 @@ struct spinlock_info
{
msl_enter_state enter_state;
msl_take_state take_state;
- uint32_t thread_id;
+ EEThreadId thread_id;
};
const unsigned HS_CACHE_LINE_SIZE = 128;
@@ -1292,7 +1230,7 @@ public:
static
gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size);
static
- uint32_t __stdcall gc_thread_stub (void* arg);
+ void __stdcall gc_thread_stub (void* arg);
#endif //MULTIPLE_HEAPS
CObjectHeader* try_fast_alloc (size_t jsize);
@@ -1389,11 +1327,11 @@ protected:
int joined_generation_to_condemn (BOOL should_evaluate_elevation, int n_initial, BOOL* blocking_collection
STRESS_HEAP_ARG(int n_original));
- PER_HEAP_ISOLATED
- size_t min_reclaim_fragmentation_threshold(uint64_t total_mem, uint32_t num_heaps);
+ PER_HEAP
+ size_t min_reclaim_fragmentation_threshold (uint32_t num_heaps);
PER_HEAP_ISOLATED
- uint64_t min_high_fragmentation_threshold(uint64_t available_mem, uint32_t num_heaps);
+ uint64_t min_high_fragmentation_threshold (uint64_t available_mem, uint32_t num_heaps);
PER_HEAP
void concurrent_print_time_delta (const char* msg);
@@ -1620,13 +1558,13 @@ protected:
struct loh_state_info
{
allocation_state alloc_state;
- uint32_t thread_id;
+ EEThreadId thread_id;
};
PER_HEAP
loh_state_info last_loh_states[max_saved_loh_states];
PER_HEAP
- void add_saved_loh_state (allocation_state loh_state_to_save, uint32_t thread_id);
+ void add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id);
#endif //RECORD_LOH_STATE
PER_HEAP
BOOL allocate_large (int gen_number,
@@ -2469,6 +2407,8 @@ protected:
PER_HEAP
void compute_new_ephemeral_size();
PER_HEAP
+ BOOL expand_reused_seg_p();
+ PER_HEAP
BOOL can_expand_into_p (heap_segment* seg, size_t min_free_size,
size_t min_cont_size, allocator* al);
PER_HEAP
@@ -2514,8 +2454,6 @@ protected:
PER_HEAP
void save_ephemeral_generation_starts();
- static size_t get_time_now();
-
PER_HEAP
bool init_dynamic_data ();
PER_HEAP
@@ -2609,9 +2547,9 @@ protected:
PER_HEAP_ISOLATED
void destroy_thread_support ();
PER_HEAP
- HANDLE create_gc_thread();
+ bool create_gc_thread();
PER_HEAP
- uint32_t gc_thread_function();
+ void gc_thread_function();
#ifdef MARK_LIST
#ifdef PARALLEL_MARK_LIST_SORT
PER_HEAP
@@ -2991,16 +2929,19 @@ public:
#ifdef BIT64
PER_HEAP_ISOLATED
size_t youngest_gen_desired_th;
+#endif //BIT64
+
+ PER_HEAP_ISOLATED
+ uint32_t high_memory_load_th;
PER_HEAP_ISOLATED
- size_t mem_one_percent;
+ uint64_t mem_one_percent;
PER_HEAP_ISOLATED
uint64_t total_physical_mem;
PER_HEAP_ISOLATED
uint64_t available_physical_mem;
-#endif // BIT64
PER_HEAP_ISOLATED
size_t last_gc_index;
@@ -3103,7 +3044,7 @@ protected:
#ifdef BACKGROUND_GC
PER_HEAP
- uint32_t bgc_thread_id;
+ EEThreadId bgc_thread_id;
#ifdef WRITE_WATCH
PER_HEAP
@@ -3146,7 +3087,7 @@ protected:
Thread* bgc_thread;
PER_HEAP
- CRITICAL_SECTION bgc_threads_timeout_cs;
+ CLRCriticalSection bgc_threads_timeout_cs;
PER_HEAP_ISOLATED
CLREvent background_gc_done_event;
@@ -3500,7 +3441,7 @@ protected:
BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE);
PER_HEAP
BOOL
- dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number, uint64_t total_mem);
+ dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number);
PER_HEAP
BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem);
PER_HEAP
@@ -3722,8 +3663,6 @@ public:
SPTR_DECL(PTR_gc_heap, g_heaps);
static
- HANDLE* g_gc_threads; // keep all of the gc threads.
- static
size_t* g_promoted;
#ifdef BACKGROUND_GC
static
@@ -3778,7 +3717,7 @@ private:
VOLATILE(int32_t) lock;
#ifdef _DEBUG
- uint32_t lockowner_threadid;
+ EEThreadId lockowner_threadid;
#endif // _DEBUG
BOOL GrowArray();
diff --git a/src/Native/gc/gcscan.cpp b/src/Native/gc/gcscan.cpp
index dd7b4c1be..78e0dd61f 100644
--- a/src/Native/gc/gcscan.cpp
+++ b/src/Native/gc/gcscan.cpp
@@ -22,12 +22,12 @@
//#define CATCH_GC //catches exception during GC
#ifdef DACCESS_COMPILE
-SVAL_IMPL_INIT(int32_t, CNameSpace, m_GcStructuresInvalidCnt, 1);
+SVAL_IMPL_INIT(int32_t, GCScan, m_GcStructuresInvalidCnt, 1);
#else //DACCESS_COMPILE
-VOLATILE(int32_t) CNameSpace::m_GcStructuresInvalidCnt = 1;
+VOLATILE(int32_t) GCScan::m_GcStructuresInvalidCnt = 1;
#endif //DACCESS_COMPILE
-bool CNameSpace::GetGcRuntimeStructuresValid ()
+bool GCScan::GetGcRuntimeStructuresValid ()
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
@@ -39,7 +39,7 @@ bool CNameSpace::GetGcRuntimeStructuresValid ()
#ifndef FEATURE_REDHAWK
void
-CNameSpace::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+GCScan::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
UNREFERENCED_PARAMETER(flags);
m_GcStructuresInvalidCnt.EnumMem();
@@ -62,7 +62,7 @@ CNameSpace::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
// will still be correct and this scan allows us to spot a common optimization where no dependent handles are
// due for retirement in this particular GC. This is an important optimization to take advantage of since
// synchronizing the GC to calculate complete results is a costly operation.
-void CNameSpace::GcDhInitialScan(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
+void GCScan::GcDhInitialScan(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
{
// We allocate space for dependent handle scanning context during Ref_Initialize. Under server GC there
// are actually as many contexts as heaps (and CPUs). Ref_GetDependentHandleContext() retrieves the
@@ -87,7 +87,7 @@ void CNameSpace::GcDhInitialScan(promote_func* fn, int condemned, int max_gen, S
// This method is called after GcDhInitialScan and before each subsequent scan (GcDhReScan below). It
// determines whether any handles are left that have unpromoted secondaries.
-bool CNameSpace::GcDhUnpromotedHandlesExist(ScanContext* sc)
+bool GCScan::GcDhUnpromotedHandlesExist(ScanContext* sc)
{
WRAPPER_NO_CONTRACT;
// Locate our dependent handle context based on the GC context.
@@ -103,7 +103,7 @@ bool CNameSpace::GcDhUnpromotedHandlesExist(ScanContext* sc)
// this method in a loop. The scan records state that let's us know when to terminate (no further handles to
// be promoted or no promotions in the last scan). Returns true if at least one object was promoted as a
// result of the scan.
-bool CNameSpace::GcDhReScan(ScanContext* sc)
+bool GCScan::GcDhReScan(ScanContext* sc)
{
// Locate our dependent handle context based on the GC context.
DhContext *pDhContext = Ref_GetDependentHandleContext(sc);
@@ -115,7 +115,7 @@ bool CNameSpace::GcDhReScan(ScanContext* sc)
* Scan for dead weak pointers
*/
-void CNameSpace::GcWeakPtrScan( promote_func* fn, int condemned, int max_gen, ScanContext* sc )
+void GCScan::GcWeakPtrScan( promote_func* fn, int condemned, int max_gen, ScanContext* sc )
{
// Clear out weak pointers that are no longer live.
Ref_CheckReachable(condemned, max_gen, (uintptr_t)sc);
@@ -143,19 +143,19 @@ static void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t * /*
}
}
-void CNameSpace::GcWeakPtrScanBySingleThread( int condemned, int max_gen, ScanContext* sc )
+void GCScan::GcWeakPtrScanBySingleThread( int condemned, int max_gen, ScanContext* sc )
{
UNREFERENCED_PARAMETER(condemned);
UNREFERENCED_PARAMETER(max_gen);
GCToEEInterface::SyncBlockCacheWeakPtrScan(&CheckPromoted, (uintptr_t)sc, 0);
}
-void CNameSpace::GcScanSizedRefs(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
+void GCScan::GcScanSizedRefs(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
{
Ref_ScanSizedRefHandles(condemned, max_gen, sc, fn);
}
-void CNameSpace::GcShortWeakPtrScan(promote_func* fn, int condemned, int max_gen,
+void GCScan::GcShortWeakPtrScan(promote_func* fn, int condemned, int max_gen,
ScanContext* sc)
{
UNREFERENCED_PARAMETER(fn);
@@ -166,7 +166,7 @@ void CNameSpace::GcShortWeakPtrScan(promote_func* fn, int condemned, int max_ge
* Scan all stack roots in this 'namespace'
*/
-void CNameSpace::GcScanRoots(promote_func* fn, int condemned, int max_gen,
+void GCScan::GcScanRoots(promote_func* fn, int condemned, int max_gen,
ScanContext* sc)
{
#if defined ( _DEBUG) && defined (CATCH_GC)
@@ -190,7 +190,7 @@ void CNameSpace::GcScanRoots(promote_func* fn, int condemned, int max_gen,
*/
-void CNameSpace::GcScanHandles (promote_func* fn, int condemned, int max_gen,
+void GCScan::GcScanHandles (promote_func* fn, int condemned, int max_gen,
ScanContext* sc)
{
@@ -229,7 +229,7 @@ void CNameSpace::GcScanHandles (promote_func* fn, int condemned, int max_gen,
* Scan all handle roots in this 'namespace' for profiling
*/
-void CNameSpace::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc)
+void GCScan::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc)
{
LIMITED_METHOD_CONTRACT;
@@ -254,7 +254,7 @@ void CNameSpace::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc)
/*
* Scan dependent handles in this 'namespace' for profiling
*/
-void CNameSpace::GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc)
+void GCScan::GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc)
{
LIMITED_METHOD_CONTRACT;
@@ -264,31 +264,31 @@ void CNameSpace::GcScanDependentHandlesForProfilerAndETW (int max_gen, Profiling
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
-void CNameSpace::GcRuntimeStructuresValid (BOOL bValid)
+void GCScan::GcRuntimeStructuresValid (BOOL bValid)
{
WRAPPER_NO_CONTRACT;
if (!bValid)
{
int32_t result;
- result = FastInterlockIncrement ((LONG*)&m_GcStructuresInvalidCnt);
+ result = Interlocked::Increment (&m_GcStructuresInvalidCnt);
_ASSERTE (result > 0);
}
else
{
int32_t result;
- result = FastInterlockDecrement ((LONG*)&m_GcStructuresInvalidCnt);
+ result = Interlocked::Decrement (&m_GcStructuresInvalidCnt);
_ASSERTE (result >= 0);
}
}
-void CNameSpace::GcDemote (int condemned, int max_gen, ScanContext* sc)
+void GCScan::GcDemote (int condemned, int max_gen, ScanContext* sc)
{
Ref_RejuvenateHandles (condemned, max_gen, (uintptr_t)sc);
if (!GCHeap::IsServerHeap() || sc->thread_number == 0)
GCToEEInterface::SyncBlockCacheDemote(max_gen);
}
-void CNameSpace::GcPromotionsGranted (int condemned, int max_gen, ScanContext* sc)
+void GCScan::GcPromotionsGranted (int condemned, int max_gen, ScanContext* sc)
{
Ref_AgeHandles(condemned, max_gen, (uintptr_t)sc);
if (!GCHeap::IsServerHeap() || sc->thread_number == 0)
@@ -296,7 +296,7 @@ void CNameSpace::GcPromotionsGranted (int condemned, int max_gen, ScanContext* s
}
-size_t CNameSpace::AskForMoreReservedMemory (size_t old_size, size_t need_size)
+size_t GCScan::AskForMoreReservedMemory (size_t old_size, size_t need_size)
{
LIMITED_METHOD_CONTRACT;
@@ -317,7 +317,7 @@ size_t CNameSpace::AskForMoreReservedMemory (size_t old_size, size_t need_size)
return old_size + need_size;
}
-void CNameSpace::VerifyHandleTable(int condemned, int max_gen, ScanContext* sc)
+void GCScan::VerifyHandleTable(int condemned, int max_gen, ScanContext* sc)
{
LIMITED_METHOD_CONTRACT;
Ref_VerifyHandleTable(condemned, max_gen, sc);
diff --git a/src/Native/gc/gcscan.h b/src/Native/gc/gcscan.h
index 0fddf3540..502a04c3b 100644
--- a/src/Native/gc/gcscan.h
+++ b/src/Native/gc/gcscan.h
@@ -32,14 +32,7 @@ struct DhContext
ScanContext *m_pScanContext; // The GC's scan context for this phase
};
-
-// <TODO>
-// @TODO (JSW): For compatibility with the existing GC code we use CNamespace
-// as the name of this class. I'm planning on changing it to
-// something like GCDomain....
-// </TODO>
-
-class CNameSpace
+class GCScan
{
friend struct ::_DacGlobals;
diff --git a/src/Native/gc/handletable.cpp b/src/Native/gc/handletable.cpp
index e14316bd0..7f855bba2 100644
--- a/src/Native/gc/handletable.cpp
+++ b/src/Native/gc/handletable.cpp
@@ -688,7 +688,7 @@ uintptr_t HndCompareExchangeHandleExtraInfo(OBJECTHANDLE handle, uint32_t uType,
if (pUserData)
{
// yes - attempt to store the info
- return (uintptr_t)FastInterlockCompareExchangePointer((void**)pUserData, (void*)lNewExtraInfo, (void*)lOldExtraInfo);
+ return (uintptr_t)Interlocked::CompareExchangePointer((void**)pUserData, (void*)lNewExtraInfo, (void*)lOldExtraInfo);
}
_ASSERTE(!"Shouldn't be trying to call HndCompareExchangeHandleExtraInfo on handle types without extra info");
diff --git a/src/Native/gc/handletable.inl b/src/Native/gc/handletable.inl
index 29594d0a7..15c38fdd9 100644
--- a/src/Native/gc/handletable.inl
+++ b/src/Native/gc/handletable.inl
@@ -67,7 +67,7 @@ inline void* HndInterlockedCompareExchangeHandle(OBJECTHANDLE handle, OBJECTREF
// store the pointer
- void* ret = FastInterlockCompareExchangePointer(reinterpret_cast<_UNCHECKED_OBJECTREF volatile*>(handle), value, oldValue);
+ void* ret = Interlocked::CompareExchangePointer(reinterpret_cast<_UNCHECKED_OBJECTREF volatile*>(handle), value, oldValue);
if (ret == oldValue)
HndLogSetEvent(handle, value);
@@ -101,7 +101,7 @@ inline BOOL HndFirstAssignHandle(OBJECTHANDLE handle, OBJECTREF objref)
_UNCHECKED_OBJECTREF null = NULL;
// store the pointer if we are the first ones here
- BOOL success = (NULL == FastInterlockCompareExchangePointer(reinterpret_cast<_UNCHECKED_OBJECTREF volatile*>(handle),
+ BOOL success = (NULL == Interlocked::CompareExchangePointer(reinterpret_cast<_UNCHECKED_OBJECTREF volatile*>(handle),
value,
null));
diff --git a/src/Native/gc/handletablecache.cpp b/src/Native/gc/handletablecache.cpp
index 717348fdb..33cc08e82 100644
--- a/src/Native/gc/handletablecache.cpp
+++ b/src/Native/gc/handletablecache.cpp
@@ -86,7 +86,7 @@ void SpinUntil(void *pCond, BOOL fNonZero)
#endif //_DEBUG
// sleep for a little while
- __SwitchToThread(dwThisSleepPeriod, CALLER_LIMITS_SPINNING);
+ GCToOSInterface::Sleep(dwThisSleepPeriod);
// now update our sleep period
dwThisSleepPeriod = dwNextSleepPeriod;
@@ -471,7 +471,7 @@ void TableFullRebalanceCache(HandleTable *pTable,
// update the write index for the free bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AFTER THIS POINT THE FREE BANK IS LIVE AND COULD RECEIVE NEW HANDLES
- FastInterlockExchange((LONG*)&pCache->lFreeIndex, lMinFreeIndex);
+ Interlocked::Exchange(&pCache->lFreeIndex, lMinFreeIndex);
// now if we have any handles left, store them in the reserve bank
if (uHandleCount)
@@ -488,7 +488,7 @@ void TableFullRebalanceCache(HandleTable *pTable,
// update the read index for the reserve bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AT THIS POINT THE RESERVE BANK IS LIVE AND HANDLES COULD BE ALLOCATED FROM IT
- FastInterlockExchange((LONG*)&pCache->lReserveIndex, lMinReserveIndex);
+ Interlocked::Exchange(&pCache->lReserveIndex, lMinReserveIndex);
}
@@ -599,12 +599,12 @@ void TableQuickRebalanceCache(HandleTable *pTable,
// update the write index for the free bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AFTER THIS POINT THE FREE BANK IS LIVE AND COULD RECEIVE NEW HANDLES
- FastInterlockExchange((LONG*)&pCache->lFreeIndex, lMinFreeIndex);
+ Interlocked::Exchange(&pCache->lFreeIndex, lMinFreeIndex);
// update the read index for the reserve bank
// NOTE: we use an interlocked exchange here to guarantee relative store order on MP
// AT THIS POINT THE RESERVE BANK IS LIVE AND HANDLES COULD BE ALLOCATED FROM IT
- FastInterlockExchange((LONG*)&pCache->lReserveIndex, lMinReserveIndex);
+ Interlocked::Exchange(&pCache->lReserveIndex, lMinReserveIndex);
}
@@ -630,13 +630,13 @@ OBJECTHANDLE TableCacheMissOnAlloc(HandleTable *pTable, HandleTypeCache *pCache,
CrstHolder ch(&pTable->Lock);
// try again to take a handle (somebody else may have rebalanced)
- int32_t lReserveIndex = FastInterlockDecrement((LONG*)&pCache->lReserveIndex);
+ int32_t lReserveIndex = Interlocked::Decrement(&pCache->lReserveIndex);
// are we still waiting for handles?
if (lReserveIndex < 0)
{
// yup, suspend free list usage...
- int32_t lFreeIndex = FastInterlockExchange((LONG*)&pCache->lFreeIndex, 0L);
+ int32_t lFreeIndex = Interlocked::Exchange(&pCache->lFreeIndex, 0);
// ...and rebalance the cache...
TableQuickRebalanceCache(pTable, pCache, uType, lReserveIndex, lFreeIndex, &handle, NULL);
@@ -680,13 +680,13 @@ void TableCacheMissOnFree(HandleTable *pTable, HandleTypeCache *pCache, uint32_t
CrstHolder ch(&pTable->Lock);
// try again to take a slot (somebody else may have rebalanced)
- int32_t lFreeIndex = FastInterlockDecrement((LONG*)&pCache->lFreeIndex);
+ int32_t lFreeIndex = Interlocked::Decrement(&pCache->lFreeIndex);
// are we still waiting for free slots?
if (lFreeIndex < 0)
{
// yup, suspend reserve list usage...
- int32_t lReserveIndex = FastInterlockExchange((LONG*)&pCache->lReserveIndex, 0L);
+ int32_t lReserveIndex = Interlocked::Exchange(&pCache->lReserveIndex, 0);
// ...and rebalance the cache...
TableQuickRebalanceCache(pTable, pCache, uType, lReserveIndex, lFreeIndex, NULL, handle);
@@ -718,7 +718,7 @@ OBJECTHANDLE TableAllocSingleHandleFromCache(HandleTable *pTable, uint32_t uType
if (pTable->rgQuickCache[uType])
{
// try to grab the handle we saw
- handle = FastInterlockExchangePointer(pTable->rgQuickCache + uType, (OBJECTHANDLE)NULL);
+ handle = Interlocked::ExchangePointer(pTable->rgQuickCache + uType, (OBJECTHANDLE)NULL);
// if it worked then we're done
if (handle)
@@ -729,7 +729,7 @@ OBJECTHANDLE TableAllocSingleHandleFromCache(HandleTable *pTable, uint32_t uType
HandleTypeCache *pCache = pTable->rgMainCache + uType;
// try to take a handle from the main cache
- int32_t lReserveIndex = FastInterlockDecrement((LONG*)&pCache->lReserveIndex);
+ int32_t lReserveIndex = Interlocked::Decrement(&pCache->lReserveIndex);
// did we underflow?
if (lReserveIndex < 0)
@@ -787,7 +787,7 @@ void TableFreeSingleHandleToCache(HandleTable *pTable, uint32_t uType, OBJECTHAN
if (!pTable->rgQuickCache[uType])
{
// yup - try to stuff our handle in the slot we saw
- handle = FastInterlockExchangePointer(&pTable->rgQuickCache[uType], handle);
+ handle = Interlocked::ExchangePointer(&pTable->rgQuickCache[uType], handle);
// if we didn't end up with another handle then we're done
if (!handle)
@@ -798,7 +798,7 @@ void TableFreeSingleHandleToCache(HandleTable *pTable, uint32_t uType, OBJECTHAN
HandleTypeCache *pCache = pTable->rgMainCache + uType;
// try to take a free slot from the main cache
- int32_t lFreeIndex = FastInterlockDecrement((LONG*)&pCache->lFreeIndex);
+ int32_t lFreeIndex = Interlocked::Decrement(&pCache->lFreeIndex);
// did we underflow?
if (lFreeIndex < 0)
diff --git a/src/Native/gc/handletablecore.cpp b/src/Native/gc/handletablecore.cpp
index 8435f9416..d302087ec 100644
--- a/src/Native/gc/handletablecore.cpp
+++ b/src/Native/gc/handletablecore.cpp
@@ -239,7 +239,7 @@ BOOL TableCanFreeSegmentNow(HandleTable *pTable, TableSegment *pSegment)
// fail but by the time a dump was created the lock was unowned so
// there was no way to tell who the previous owner was.
EEThreadId threadId = pTable->Lock.GetHolderThreadId();
- _ASSERTE(threadId.IsSameThread());
+ _ASSERTE(threadId.IsCurrentThread());
#endif // _DEBUG
// deterine if any segment is currently being scanned asynchronously
@@ -526,7 +526,7 @@ BOOL SegmentInitialize(TableSegment *pSegment, HandleTable *pTable)
dwCommit &= ~(g_SystemInfo.dwPageSize - 1);
// commit the header
- if (!ClrVirtualAlloc(pSegment, dwCommit, MEM_COMMIT, PAGE_READWRITE))
+ if (!GCToOSInterface::VirtualCommit(pSegment, dwCommit))
{
//_ASSERTE(FALSE);
return FALSE;
@@ -581,7 +581,7 @@ void SegmentFree(TableSegment *pSegment)
*/
// free the segment's memory
- ClrVirtualFree(pSegment, 0, MEM_RELEASE);
+ GCToOSInterface::VirtualRelease(pSegment, HANDLE_SEGMENT_SIZE);
}
@@ -611,7 +611,7 @@ TableSegment *SegmentAlloc(HandleTable *pTable)
_ASSERTE(HANDLE_SEGMENT_ALIGNMENT >= HANDLE_SEGMENT_SIZE);
_ASSERTE(HANDLE_SEGMENT_ALIGNMENT == 0x10000);
- pSegment = (TableSegment *)ClrVirtualAllocAligned(NULL, HANDLE_SEGMENT_SIZE, MEM_RESERVE, PAGE_NOACCESS, HANDLE_SEGMENT_ALIGNMENT);
+ pSegment = (TableSegment *)GCToOSInterface::VirtualReserve(NULL, HANDLE_SEGMENT_SIZE, HANDLE_SEGMENT_ALIGNMENT, VirtualReserveFlags::None);
_ASSERTE(((size_t)pSegment % HANDLE_SEGMENT_ALIGNMENT) == 0);
// bail out if we couldn't get any memory
@@ -1440,7 +1440,7 @@ uint32_t SegmentInsertBlockFromFreeListWorker(TableSegment *pSegment, uint32_t u
uint32_t dwCommit = g_SystemInfo.dwPageSize;
// commit the memory
- if (!ClrVirtualAlloc(pvCommit, dwCommit, MEM_COMMIT, PAGE_READWRITE))
+ if (!GCToOSInterface::VirtualCommit(pvCommit, dwCommit))
return BLOCK_INVALID;
// use the previous commit line as the new decommit line
@@ -1844,7 +1844,7 @@ void SegmentTrimExcessPages(TableSegment *pSegment)
if (dwHi > dwLo)
{
// decommit the memory
- ClrVirtualFree((LPVOID)dwLo, dwHi - dwLo, MEM_DECOMMIT);
+ GCToOSInterface::VirtualDecommit((LPVOID)dwLo, dwHi - dwLo);
// update the commit line
pSegment->bCommitLine = (uint8_t)((dwLo - (size_t)pSegment->rgValue) / HANDLE_BYTES_PER_BLOCK);
diff --git a/src/Native/gc/objecthandle.cpp b/src/Native/gc/objecthandle.cpp
index 8b72d0d43..1654cf9b9 100644
--- a/src/Native/gc/objecthandle.cpp
+++ b/src/Native/gc/objecthandle.cpp
@@ -787,7 +787,7 @@ HandleTableBucket *Ref_CreateHandleTableBucket(ADIndex uADIndex)
HndSetHandleTableIndex(result->pTable[uCPUindex], i+offset);
result->HandleTableIndex = i+offset;
- if (FastInterlockCompareExchangePointer(&walk->pBuckets[i], result, NULL) == 0) {
+ if (Interlocked::CompareExchangePointer(&walk->pBuckets[i], result, NULL) == 0) {
// Get a free slot.
bucketHolder.SuppressRelease();
return result;
@@ -812,7 +812,7 @@ HandleTableBucket *Ref_CreateHandleTableBucket(ADIndex uADIndex)
ZeroMemory(newMap->pBuckets,
INITIAL_HANDLE_TABLE_ARRAY_SIZE * sizeof (HandleTableBucket *));
- if (FastInterlockCompareExchangePointer(&last->pNext, newMap.GetValue(), NULL) != NULL)
+ if (Interlocked::CompareExchangePointer(&last->pNext, newMap.GetValue(), NULL) != NULL)
{
// This thread loses.
delete [] newMap->pBuckets;
@@ -1575,8 +1575,8 @@ void Ref_UpdatePointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Re
if (GCHeap::IsServerHeap())
{
- bDo = (FastInterlockIncrement((LONG*)&uCount) == 1);
- FastInterlockCompareExchange ((LONG*)&uCount, 0, GCHeap::GetGCHeap()->GetNumberOfHeaps());
+ bDo = (Interlocked::Increment(&uCount) == 1);
+ Interlocked::CompareExchange (&uCount, 0, GCHeap::GetGCHeap()->GetNumberOfHeaps());
_ASSERTE (uCount <= GCHeap::GetGCHeap()->GetNumberOfHeaps());
}
diff --git a/src/Native/gc/sample/CMakeLists.txt b/src/Native/gc/sample/CMakeLists.txt
index 8bed3adee..a46f9aeb8 100644
--- a/src/Native/gc/sample/CMakeLists.txt
+++ b/src/Native/gc/sample/CMakeLists.txt
@@ -1,11 +1,13 @@
project(clrgcsample)
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
+
include_directories(..)
include_directories(../env)
set(SOURCES
GCSample.cpp
- gcenv.cpp
+ gcenv.ee.cpp
../gccommon.cpp
../gceewks.cpp
../gcscan.cpp
@@ -19,45 +21,12 @@ set(SOURCES
if(WIN32)
list(APPEND SOURCES
- ../env/gcenv.windows.cpp)
+ gcenv.windows.cpp)
else()
list(APPEND SOURCES
- ../env/gcenv.unix.cpp)
-endif()
-
-if(CLR_CMAKE_PLATFORM_UNIX)
- add_compile_options(-Wno-format)
- add_compile_options(-Wno-unused-variable)
- add_compile_options(-Wno-unused-private-field)
- add_compile_options(-Wno-tautological-undefined-compare)
+ gcenv.unix.cpp)
endif()
-if(CLR_CMAKE_PLATFORM_ARCH_AMD64)
- add_definitions(-D_TARGET_AMD64_=1)
- set(IS_64BIT_BUILD 1)
-elseif(CLR_CMAKE_PLATFORM_ARCH_I386)
- add_definitions(-D_TARGET_X86_=1)
-elseif(CLR_CMAKE_PLATFORM_ARCH_ARM)
- add_definitions(-D_TARGET_ARM_=1)
-elseif(CLR_CMAKE_PLATFORM_ARCH_ARM64)
- add_definitions(-D_TARGET_ARM64_=1)
- set(IS_64BIT_BUILD 1)
-else()
- clr_unknown_arch()
-endif()
-
-if(IS_64BIT_BUILD)
- add_definitions(-DBIT64=1)
-endif(IS_64BIT_BUILD)
-
-if(WIN32)
- add_definitions(-DWIN32)
- add_definitions(-D_WIN32=1)
- if(IS_64BIT_BUILD)
- add_definitions(-D_WIN64=1)
- endif()
-endif(WIN32)
-
add_executable(gcsample
${SOURCES}
)
diff --git a/src/Native/gc/sample/GCSample.cpp b/src/Native/gc/sample/GCSample.cpp
index 446956110..eb2c9aa9c 100644
--- a/src/Native/gc/sample/GCSample.cpp
+++ b/src/Native/gc/sample/GCSample.cpp
@@ -111,12 +111,15 @@ void WriteBarrier(Object ** dst, Object * ref)
ErectWriteBarrier(dst, ref);
}
-int main(int argc, char* argv[])
+int __cdecl main(int argc, char* argv[])
{
//
// Initialize system info
//
- InitializeSystemInfo();
+ if (!GCToOSInterface::Initialize())
+ {
+ return -1;
+ }
//
// Initialize free object methodtable. The GC uses a special array-like methodtable as placeholder
@@ -170,7 +173,7 @@ int main(int argc, char* argv[])
My_MethodTable;
// 'My' contains the MethodTable*
- size_t baseSize = sizeof(My);
+ uint32_t baseSize = sizeof(My);
// GC expects the size of ObjHeader (extra void*) to be included in the size.
baseSize = baseSize + sizeof(ObjHeader);
// Add padding as necessary. GC requires the object size to be at least MIN_OBJECT_SIZE.
diff --git a/src/Native/gc/sample/GCSample.vcxproj b/src/Native/gc/sample/GCSample.vcxproj
index a0a79c59e..b196e1f34 100644
--- a/src/Native/gc/sample/GCSample.vcxproj
+++ b/src/Native/gc/sample/GCSample.vcxproj
@@ -50,7 +50,7 @@
<PrecompiledHeader>Use</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>WIN32;_X86_;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<PrecompiledHeaderFile>common.h</PrecompiledHeaderFile>
<AdditionalIncludeDirectories>.;..;..\env</AdditionalIncludeDirectories>
@@ -67,7 +67,7 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>WIN32;_X86_;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<SDLCheck>true</SDLCheck>
<AdditionalIncludeDirectories>.;..;..\env</AdditionalIncludeDirectories>
</ClCompile>
@@ -83,7 +83,8 @@
<ClInclude Include="gcenv.h" />
</ItemGroup>
<ItemGroup>
- <ClCompile Include="gcenv.cpp" />
+ <ClCompile Include="gcenv.ee.cpp" />
+ <ClCompile Include="gcenv.windows.cpp" />
<ClCompile Include="GCSample.cpp" />
<ClCompile Include="..\gccommon.cpp" />
<ClCompile Include="..\gceewks.cpp" />
@@ -94,7 +95,6 @@
<ClCompile Include="..\handletablecore.cpp" />
<ClCompile Include="..\handletablescan.cpp" />
<ClCompile Include="..\objecthandle.cpp" />
- <ClCompile Include="..\env\gcenv.windows.cpp" />
<ClCompile Include="..\env\common.cpp">
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
<PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
diff --git a/src/Native/gc/sample/GCSample.vcxproj.filters b/src/Native/gc/sample/GCSample.vcxproj.filters
index 1e9facde0..e46c05456 100644
--- a/src/Native/gc/sample/GCSample.vcxproj.filters
+++ b/src/Native/gc/sample/GCSample.vcxproj.filters
@@ -53,13 +53,13 @@
<ClCompile Include="..\gccommon.cpp">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\env\gcenv.windows.cpp">
+ <ClCompile Include="..\env\common.cpp">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\env\common.cpp">
+ <ClCompile Include="gcenv.ee.cpp">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="gcenv.cpp">
+ <ClCompile Include="gcenv.windows.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
diff --git a/src/Native/gc/sample/gcenv.cpp b/src/Native/gc/sample/gcenv.ee.cpp
index d213d6776..e17bb834f 100644
--- a/src/Native/gc/sample/gcenv.cpp
+++ b/src/Native/gc/sample/gcenv.ee.cpp
@@ -207,6 +207,17 @@ void GCToEEInterface::GcEnumAllocContexts (enum_alloc_context_func* fn, void* pa
}
}
+void GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC /*scanProc*/, uintptr_t /*lp1*/, uintptr_t /*lp2*/)
+{
+}
+
+void GCToEEInterface::SyncBlockCacheDemote(int /*max_gen*/)
+{
+}
+
+void GCToEEInterface::SyncBlockCachePromotionsGranted(int /*max_gen*/)
+{
+}
void FinalizerThread::EnableFinalization()
{
@@ -219,7 +230,7 @@ bool FinalizerThread::HaveExtraWorkForFinalizer()
return false;
}
-bool PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext)
+bool REDHAWK_PALAPI PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext)
{
// TODO: Implement for background GC
return false;
@@ -231,18 +242,6 @@ bool IsGCSpecialThread()
return false;
}
-void GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC /*scanProc*/, uintptr_t /*lp1*/, uintptr_t /*lp2*/)
-{
-}
-
-void GCToEEInterface::SyncBlockCacheDemote(int /*max_gen*/)
-{
-}
-
-void GCToEEInterface::SyncBlockCachePromotionsGranted(int /*max_gen*/)
-{
-}
-
void StompWriteBarrierEphemeral()
{
}
diff --git a/src/Native/gc/sample/gcenv.h b/src/Native/gc/sample/gcenv.h
index b6bcb005a..c09d012ec 100644
--- a/src/Native/gc/sample/gcenv.h
+++ b/src/Native/gc/sample/gcenv.h
@@ -16,12 +16,20 @@
#define _ASSERTE(_expr) ASSERT(_expr)
#endif
-#include "sal.h"
+typedef wchar_t WCHAR;
+#define W(s) L##s
+
#include "gcenv.structs.h"
#include "gcenv.base.h"
+#include "gcenv.ee.h"
+#include "gcenv.os.h"
+#include "gcenv.interlocked.h"
+#include "gcenv.interlocked.inl"
#include "gcenv.object.h"
#include "gcenv.sync.h"
+#define MAX_LONGPATH 1024
+
//
// Thread
//
diff --git a/src/Native/gc/env/gcenv.unix.cpp b/src/Native/gc/sample/gcenv.unix.cpp
index 30ad1d714..f3c502c78 100644
--- a/src/Native/gc/env/gcenv.unix.cpp
+++ b/src/Native/gc/sample/gcenv.unix.cpp
@@ -81,40 +81,6 @@ void UnsafeDeleteCriticalSection(CRITICAL_SECTION *lpCriticalSection)
pthread_mutex_destroy(&lpCriticalSection->mutex);
}
-
-void GetProcessMemoryLoad(GCMemoryStatus* pGCMemStatus)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
- pGCMemStatus->dwMemoryLoad = 0;
- pGCMemStatus->ullTotalPageFile = 0;
- pGCMemStatus->ullAvailPageFile = 0;
-
- // There is no API to get the total virtual address space size on
- // Unix, so we use a constant value representing 128TB, which is
- // the approximate size of total user virtual address space on
- // the currently supported Unix systems.
- static const uint64_t _128TB = (1ull << 47);
- pGCMemStatus->ullTotalVirtual = _128TB;
- pGCMemStatus->ullAvailVirtual = _128TB;
-
- // TODO: Implement
- pGCMemStatus->ullTotalPhys = _128TB;
- pGCMemStatus->ullAvailPhys = _128TB;
-
- // If the machine has more RAM than virtual address limit, let us cap it.
- // Our GC can never use more than virtual address limit.
- if (pGCMemStatus->ullAvailPhys > pGCMemStatus->ullTotalVirtual)
- {
- pGCMemStatus->ullAvailPhys = pGCMemStatus->ullAvailVirtual;
- }
-}
-
#if 0
void CLREventStatic::CreateManualEvent(bool bInitialState)
{
@@ -199,18 +165,14 @@ uint32_t CLREventStatic::Wait(uint32_t dwMilliseconds, bool bAlertable)
}
#endif // 0
-bool __SwitchToThread(uint32_t dwSleepMSec, uint32_t dwSwitchCount)
+void DestroyThread(Thread * pThread)
{
- return sched_yield() == 0;
+ // TODO: implement
}
-void * ClrVirtualAlloc(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect)
+bool __SwitchToThread(uint32_t dwSleepMSec, uint32_t dwSwitchCount)
{
- return ClrVirtualAllocAligned(lpAddress, dwSize, flAllocationType, flProtect, OS_PAGE_SIZE);
+ return sched_yield() == 0;
}
static int W32toUnixAccessControl(uint32_t flProtect)
@@ -232,87 +194,6 @@ static int W32toUnixAccessControl(uint32_t flProtect)
return prot;
}
-void * ClrVirtualAllocAligned(
- void * lpAddress,
- size_t dwSize,
- uint32_t flAllocationType,
- uint32_t flProtect,
- size_t dwAlignment)
-{
- if ((flAllocationType & ~(MEM_RESERVE | MEM_COMMIT)) != 0)
- {
- // TODO: Implement
- return NULL;
- }
-
- _ASSERTE(((size_t)lpAddress & (OS_PAGE_SIZE - 1)) == 0);
-
- // Align size to whole pages
- dwSize = (dwSize + (OS_PAGE_SIZE - 1)) & ~(OS_PAGE_SIZE - 1);
-
- if (flAllocationType & MEM_RESERVE)
- {
- size_t alignedSize = dwSize;
-
- if (dwAlignment > OS_PAGE_SIZE)
- alignedSize += (dwAlignment - OS_PAGE_SIZE);
-
- void * pRetVal = mmap(lpAddress, alignedSize, W32toUnixAccessControl(flProtect),
- MAP_ANON | MAP_PRIVATE, -1, 0);
-
- if (dwAlignment > OS_PAGE_SIZE && pRetVal != NULL)
- {
- void * pAlignedRetVal = (void *)(((size_t)pRetVal + (dwAlignment - 1)) & ~(dwAlignment - 1));
-
- size_t startPadding = (size_t)pAlignedRetVal - (size_t)pRetVal;
- if (startPadding != 0)
- {
- int ret = munmap(pRetVal, startPadding);
- _ASSERTE(ret == 0);
- }
-
- size_t endPadding = alignedSize - (startPadding + dwSize);
- if (endPadding != 0)
- {
- int ret = munmap((void *)((size_t)pAlignedRetVal + dwSize), endPadding);
- _ASSERTE(ret == 0);
- }
-
- pRetVal = pAlignedRetVal;
- }
-
- return pRetVal;
- }
-
- if (flAllocationType & MEM_COMMIT)
- {
- int ret = mprotect(lpAddress, dwSize, W32toUnixAccessControl(flProtect));
- return (ret == 0) ? lpAddress : NULL;
- }
-
- return NULL;
-}
-
-bool ClrVirtualFree(
- void * lpAddress,
- size_t dwSize,
- uint32_t dwFreeType)
-{
- // TODO: Implement
- return false;
-}
-
-bool
-ClrVirtualProtect(
- void * lpAddress,
- size_t dwSize,
- uint32_t flNewProtect,
- uint32_t * lpflOldProtect)
-{
- // TODO: Implement, not currently used
- return false;
-}
-
MethodTable * g_pFreeObjectMethodTable;
GCSystemInfo g_SystemInfo;
@@ -464,27 +345,6 @@ ResetWriteWatch(
return 1;
}
-WINBASEAPI
-BOOL
-WINAPI
-VirtualUnlock(
- LPVOID lpAddress,
- SIZE_T dwSize
- )
-{
- // TODO: Implement
- return false;
-}
-
-
-WINBASEAPI
-VOID
-WINAPI
-FlushProcessWriteBuffers()
-{
- // TODO: Implement
-}
-
const int tccSecondsToMillieSeconds = 1000;
const int tccSecondsToMicroSeconds = 1000000;
const int tccMillieSecondsToMicroSeconds = 1000; // 10^3
@@ -567,19 +427,6 @@ MemoryBarrier()
// File I/O - Used for tracking only
WINBASEAPI
-DWORD
-WINAPI
-SetFilePointer(
- HANDLE hFile,
- int32_t lDistanceToMove,
- int32_t * lpDistanceToMoveHigh,
- DWORD dwMoveMethod)
-{
- // TODO: Reimplement callers using CRT
- return 0;
-}
-
-WINBASEAPI
BOOL
WINAPI
FlushFileBuffers(
diff --git a/src/Native/gc/sample/gcenv.windows.cpp b/src/Native/gc/sample/gcenv.windows.cpp
new file mode 100644
index 000000000..b9654a194
--- /dev/null
+++ b/src/Native/gc/sample/gcenv.windows.cpp
@@ -0,0 +1,455 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+// Implementation of the GC environment
+//
+
+#include "common.h"
+
+#include "windows.h"
+
+#include "gcenv.h"
+#include "gc.h"
+
+static LARGE_INTEGER performanceFrequency;
+
+MethodTable * g_pFreeObjectMethodTable;
+
+int32_t g_TrapReturningThreads;
+
+bool g_fFinalizerRunOnShutDown;
+
+GCSystemInfo g_SystemInfo;
+
+// Initialize the interface implementation
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::Initialize()
+{
+ if (!::QueryPerformanceFrequency(&performanceFrequency))
+ {
+ return false;
+ }
+
+ SYSTEM_INFO systemInfo;
+ GetSystemInfo(&systemInfo);
+
+ g_SystemInfo.dwNumberOfProcessors = systemInfo.dwNumberOfProcessors;
+ g_SystemInfo.dwPageSize = systemInfo.dwPageSize;
+ g_SystemInfo.dwAllocationGranularity = systemInfo.dwAllocationGranularity;
+
+ return true;
+}
+
+// Shutdown the interface implementation
+void GCToOSInterface::Shutdown()
+{
+}
+
+// Get numeric id of the current thread if possible on the
+// current platform. It is indended for logging purposes only.
+// Return:
+// Numeric id of the current thread or 0 if the
+uint32_t GCToOSInterface::GetCurrentThreadIdForLogging()
+{
+ return ::GetCurrentThreadId();
+}
+
+// Get id of the process
+// Return:
+// Id of the current process
+uint32_t GCToOSInterface::GetCurrentProcessId()
+{
+ return ::GetCurrentProcessId();
+}
+
+// Set ideal affinity for the current thread
+// Parameters:
+// affinity - ideal processor affinity for the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity)
+{
+ bool success = true;
+
+#if !defined(FEATURE_CORESYSTEM)
+ SetThreadIdealProcessor(GetCurrentThread(), (DWORD)affinity->Processor);
+#elif
+ PROCESSOR_NUMBER proc;
+
+ if (affinity->Group != -1)
+ {
+ proc.Group = (WORD)affinity->Group;
+ proc.Number = (BYTE)affinity->Processor;
+ proc.Reserved = 0;
+
+ success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
+ }
+ else
+ {
+ if (GetThreadIdealProcessorEx(GetCurrentThread(), &proc))
+ {
+ proc.Number = affinity->Processor;
+ success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL);
+ }
+ }
+#endif
+
+ return success;
+}
+
+// Get the number of the current processor
+uint32_t GCToOSInterface::GetCurrentProcessorNumber()
+{
+ _ASSERTE(GCToOSInterface::CanGetCurrentProcessorNumber());
+ return ::GetCurrentProcessorNumber();
+}
+
+// Check if the OS supports getting current processor number
+bool GCToOSInterface::CanGetCurrentProcessorNumber()
+{
+ return true;
+}
+
+// Flush write buffers of processors that are executing threads of the current process
+void GCToOSInterface::FlushProcessWriteBuffers()
+{
+ ::FlushProcessWriteBuffers();
+}
+
+// Break into a debugger
+void GCToOSInterface::DebugBreak()
+{
+ ::DebugBreak();
+}
+
+// Get number of logical processors
+uint32_t GCToOSInterface::GetLogicalCpuCount()
+{
+ return g_SystemInfo.dwNumberOfProcessors;
+}
+
+// Causes the calling thread to sleep for the specified number of milliseconds
+// Parameters:
+// sleepMSec - time to sleep before switching to another thread
+void GCToOSInterface::Sleep(uint32_t sleepMSec)
+{
+ ::Sleep(sleepMSec);
+}
+
+// Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
+// Parameters:
+// switchCount - number of times the YieldThread was called in a loop
+void GCToOSInterface::YieldThread(uint32_t switchCount)
+{
+ SwitchToThread();
+}
+
+// Reserve virtual memory range.
+// Parameters:
+// address - starting virtual address, it can be NULL to let the function choose the starting address
+// size - size of the virtual memory range
+// alignment - requested memory alignment
+// flags - flags to control special settings like write watching
+// Return:
+// Starting virtual address of the reserved range
+void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags)
+{
+ DWORD memFlags = (flags & VirtualReserveFlags::WriteWatch) ? (MEM_RESERVE | MEM_WRITE_WATCH) : MEM_RESERVE;
+ return ::VirtualAlloc(0, size, memFlags, PAGE_READWRITE);
+}
+
+// Release virtual memory range previously reserved using VirtualReserve
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualRelease(void* address, size_t size)
+{
+ UNREFERENCED_PARAMETER(size);
+ return !!::VirtualFree(address, 0, MEM_RELEASE);
+}
+
+// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+{
+ return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != NULL;
+}
+
+// Decomit virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualDecommit(void* address, size_t size)
+{
+ return !!::VirtualFree(address, size, MEM_DECOMMIT);
+}
+
+// Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
+// longer of interest, but it should not be decommitted.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+// unlock - true if the memory range should also be unlocked
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock)
+{
+ bool success = ::VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE) != NULL;
+ if (success && unlock)
+ {
+ // Remove the page range from the working set
+ ::VirtualUnlock(address, size);
+ }
+
+ return success;
+}
+
+// Check if the OS supports write watching
+bool GCToOSInterface::SupportsWriteWatch()
+{
+ return false;
+}
+
+// Reset the write tracking state for the specified virtual memory range.
+// Parameters:
+// address - starting virtual address
+// size - size of the virtual memory range
+void GCToOSInterface::ResetWriteWatch(void* address, size_t size)
+{
+}
+
+// Retrieve addresses of the pages that are written to in a region of virtual memory
+// Parameters:
+// resetState - true indicates to reset the write tracking state
+// address - starting virtual address
+// size - size of the virtual memory range
+// pageAddresses - buffer that receives an array of page addresses in the memory region
+// pageAddressesCount - on input, size of the lpAddresses array, in array elements
+// on output, the number of page addresses that are returned in the array.
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount)
+{
+ return false;
+}
+
+// Get size of the largest cache on the processor die
+// Parameters:
+// trueSize - true to return true cache size, false to return scaled up size based on
+// the processor architecture
+// Return:
+// Size of the cache
+size_t GCToOSInterface::GetLargestOnDieCacheSize(bool trueSize)
+{
+ // TODO: implement
+ return 0;
+}
+
+// Get affinity mask of the current process
+// Parameters:
+// processMask - affinity mask for the specified process
+// systemMask - affinity mask for the system
+// Return:
+// true if it has succeeded, false if it has failed
+// Remarks:
+// A process affinity mask is a bit vector in which each bit represents the processors that
+// a process is allowed to run on. A system affinity mask is a bit vector in which each bit
+// represents the processors that are configured into a system.
+// A process affinity mask is a subset of the system affinity mask. A process is only allowed
+// to run on the processors configured into a system. Therefore, the process affinity mask cannot
+// specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor.
+bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processMask, uintptr_t* systemMask)
+{
+ return false;
+}
+
+// Get number of processors assigned to the current process
+// Return:
+// The number of processors
+uint32_t GCToOSInterface::GetCurrentProcessCpuCount()
+{
+ return g_SystemInfo.dwNumberOfProcessors;
+}
+
+// Get global memory status
+// Parameters:
+// ms - pointer to the structure that will be filled in with the memory status
+void GCToOSInterface::GetMemoryStatus(GCMemoryStatus* ms)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ MEMORYSTATUSEX memStatus;
+
+ memStatus.dwLength = sizeof(MEMORYSTATUSEX);
+ BOOL fRet = GlobalMemoryStatusEx(&memStatus);
+ _ASSERTE (fRet);
+
+ // If the machine has more RAM than virtual address limit, let us cap it.
+ // Our GC can never use more than virtual address limit.
+ if (memStatus.ullAvailPhys > memStatus.ullTotalVirtual)
+ {
+ memStatus.ullAvailPhys = memStatus.ullAvailVirtual;
+ }
+
+ // Convert Windows struct to abstract struct
+ ms->dwMemoryLoad = memStatus.dwMemoryLoad ;
+ ms->ullTotalPhys = memStatus.ullTotalPhys ;
+ ms->ullAvailPhys = memStatus.ullAvailPhys ;
+ ms->ullTotalPageFile = memStatus.ullTotalPageFile ;
+ ms->ullAvailPageFile = memStatus.ullAvailPageFile ;
+ ms->ullTotalVirtual = memStatus.ullTotalVirtual ;
+ ms->ullAvailVirtual = memStatus.ullAvailVirtual ;
+}
+
+// Get a high precision performance counter
+// Return:
+// The counter value
+int64_t GCToOSInterface::QueryPerformanceCounter()
+{
+ LARGE_INTEGER ts;
+ if (!::QueryPerformanceCounter(&ts))
+ {
+ _ASSERTE(!"Fatal Error - cannot query performance counter.");
+ abort();
+ }
+
+ return ts.QuadPart;
+}
+
+// Get a frequency of the high precision performance counter
+// Return:
+// The counter frequency
+int64_t GCToOSInterface::QueryPerformanceFrequency()
+{
+ LARGE_INTEGER frequency;
+ if (!::QueryPerformanceFrequency(&frequency))
+ {
+ _ASSERTE(!"Fatal Error - cannot query performance counter.");
+ abort();
+ }
+
+ return frequency.QuadPart;
+}
+
+// Get a time stamp with a low precision
+// Return:
+// Time stamp in milliseconds
+uint32_t GCToOSInterface::GetLowPrecisionTimeStamp()
+{
+ return ::GetTickCount();
+}
+
+// Parameters of the GC thread stub
+struct GCThreadStubParam
+{
+ GCThreadFunction GCThreadFunction;
+ void* GCThreadParam;
+};
+
+// GC thread stub to convert GC thread function to an OS specific thread function
+static DWORD __stdcall GCThreadStub(void* param)
+{
+ GCThreadStubParam *stubParam = (GCThreadStubParam*)param;
+ GCThreadFunction function = stubParam->GCThreadFunction;
+ void* threadParam = stubParam->GCThreadParam;
+
+ delete stubParam;
+
+ function(threadParam);
+
+ return 0;
+}
+
+// Create a new thread
+// Parameters:
+// function - the function to be executed by the thread
+// param - parameters of the thread
+// affinity - processor affinity of the thread
+// Return:
+// true if it has succeeded, false if it has failed
+bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity)
+{
+ DWORD thread_id;
+
+ GCThreadStubParam* stubParam = new (nothrow) GCThreadStubParam();
+ if (stubParam == NULL)
+ {
+ return false;
+ }
+
+ stubParam->GCThreadFunction = function;
+ stubParam->GCThreadParam = param;
+
+ HANDLE gc_thread = ::CreateThread(NULL, 0, GCThreadStub, stubParam, CREATE_SUSPENDED, &thread_id);
+
+ if (!gc_thread)
+ {
+ delete stubParam;
+ return false;
+ }
+
+ SetThreadPriority(gc_thread, /* THREAD_PRIORITY_ABOVE_NORMAL );*/ THREAD_PRIORITY_HIGHEST );
+
+ ResumeThread(gc_thread);
+
+ CloseHandle(gc_thread);
+
+ return true;
+}
+
+// Open a file
+// Parameters:
+// filename - name of the file to open
+// mode - mode to open the file in (like in the CRT fopen)
+// Return:
+// FILE* of the opened file
+FILE* GCToOSInterface::OpenFile(const WCHAR* filename, const WCHAR* mode)
+{
+ return _wfopen(filename, mode);
+}
+
+// Initialize the critical section
+void CLRCriticalSection::Initialize()
+{
+ ::InitializeCriticalSection(&m_cs);
+}
+
+// Destroy the critical section
+void CLRCriticalSection::Destroy()
+{
+ ::DeleteCriticalSection(&m_cs);
+}
+
+// Enter the critical section. Blocks until the section can be entered.
+void CLRCriticalSection::Enter()
+{
+ ::EnterCriticalSection(&m_cs);
+}
+
+// Leave the critical section
+void CLRCriticalSection::Leave()
+{
+ ::LeaveCriticalSection(&m_cs);
+}
+
+void DestroyThread(Thread * pThread)
+{
+ // TODO: implement
+}