Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/dotnet/runtime.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Vorlicek <jan.vorlicek@volny.cz>2021-07-11 17:46:53 +0300
committerGitHub <noreply@github.com>2021-07-11 17:46:53 +0300
commit6a47ecf4a8ee670356f5e554f08afb0b32cdac9a (patch)
treeeee15291b1ce2ad0c90c2e13b646d6e83fdf472b /src/coreclr/minipal
parent83a4d3cc02fb04fce17b24fc09b3cdf77a12ba51 (diff)
W^X support (#54954)
* W^X support This change is the last part of enabling the W^X support. It adds the actual executable allocator that handles all double mapped memory allocations and creating the writeable mappings. The platform specific functionality is placed in a new minipal that is going to be a basis for future removal of Windows APIs usage from the native runtime. The last state of the change was tested on all the platforms that we support using coreclr pri 1 tests with both the W^X enabled and disabled using the COMPlus_EnableWriteXorExecute variable. The debugger changes were tested using the managed debugger testing suite on Windows x64, x86 and on Apple Silicon so far. Further testing on other platforms is in progress. * Replace LeafLock in UMEntryThunkFreeList by a new lock * Also allocate LoaderHeapFreeBlock from regular heap. * Set the W^X default to disabled
Diffstat (limited to 'src/coreclr/minipal')
-rw-r--r--src/coreclr/minipal/CMakeLists.txt7
-rw-r--r--src/coreclr/minipal/Unix/CMakeLists.txt4
-rw-r--r--src/coreclr/minipal/Unix/doublemapping.cpp211
-rw-r--r--src/coreclr/minipal/Windows/CMakeLists.txt4
-rw-r--r--src/coreclr/minipal/Windows/doublemapping.cpp205
-rw-r--r--src/coreclr/minipal/minipal.h78
6 files changed, 509 insertions, 0 deletions
diff --git a/src/coreclr/minipal/CMakeLists.txt b/src/coreclr/minipal/CMakeLists.txt
new file mode 100644
index 00000000000..3096237d2a2
--- /dev/null
+++ b/src/coreclr/minipal/CMakeLists.txt
@@ -0,0 +1,7 @@
+include_directories(.)
+if (CLR_CMAKE_HOST_UNIX)
+ add_subdirectory(Unix)
+else (CLR_CMAKE_HOST_UNIX)
+ add_subdirectory(Windows)
+endif (CLR_CMAKE_HOST_UNIX)
+
diff --git a/src/coreclr/minipal/Unix/CMakeLists.txt b/src/coreclr/minipal/Unix/CMakeLists.txt
new file mode 100644
index 00000000000..b56b5017d37
--- /dev/null
+++ b/src/coreclr/minipal/Unix/CMakeLists.txt
@@ -0,0 +1,4 @@
+add_library(coreclrminipal
+ STATIC
+ doublemapping.cpp
+)
diff --git a/src/coreclr/minipal/Unix/doublemapping.cpp b/src/coreclr/minipal/Unix/doublemapping.cpp
new file mode 100644
index 00000000000..a50b326861a
--- /dev/null
+++ b/src/coreclr/minipal/Unix/doublemapping.cpp
@@ -0,0 +1,211 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+//
+
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <limits.h>
+#include <errno.h>
+#ifdef TARGET_LINUX
+#include <linux/memfd.h>
+#include <sys/syscall.h> // __NR_memfd_create
+#endif // TARGET_LINUX
+#include "minipal.h"
+
+#if defined(TARGET_OSX) && defined(TARGET_AMD64)
+#include <mach/mach.h>
+#endif // TARGET_OSX && TARGET_AMD64
+
+#ifndef TARGET_OSX
+
+#ifdef TARGET_64BIT
+static const off_t MaxDoubleMappedSize = 2048ULL*1024*1024*1024;
+#else
+static const off_t MaxDoubleMappedSize = UINT_MAX;
+#endif
+
+#ifdef TARGET_LINUX
+#define memfd_create(...) syscall(__NR_memfd_create, __VA_ARGS__)
+#endif // TARGET_LINUX
+
+#endif // TARGET_OSX
+
+bool VMToOSInterface::CreateDoubleMemoryMapper(void** pHandle, size_t *pMaxExecutableCodeSize)
+{
+#ifndef TARGET_OSX
+
+#ifdef TARGET_FREEBSD
+ int fd = shm_open(SHM_ANON, O_RDWR | O_CREAT, S_IRWXU);
+#else // TARGET_FREEBSD
+ int fd = memfd_create("doublemapper", MFD_CLOEXEC);
+#endif // TARGET_FREEBSD
+
+ if (fd == -1)
+ {
+ return false;
+ }
+
+ if (ftruncate(fd, MaxDoubleMappedSize) == -1)
+ {
+ close(fd);
+ return false;
+ }
+
+ *pMaxExecutableCodeSize = MaxDoubleMappedSize;
+ *pHandle = (void*)(size_t)fd;
+#else // !TARGET_OSX
+ *pMaxExecutableCodeSize = SIZE_MAX;
+ *pHandle = NULL;
+#endif // !TARGET_OSX
+
+ return true;
+}
+
+void VMToOSInterface::DestroyDoubleMemoryMapper(void *mapperHandle)
+{
+#ifndef TARGET_OSX
+ close((int)(size_t)mapperHandle);
+#endif
+}
+
+extern "C" void* PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(const void* lpBeginAddress, const void* lpEndAddress, size_t dwSize);
+
+#ifdef TARGET_OSX
+bool IsMapJitFlagNeeded()
+{
+ static volatile int isMapJitFlagNeeded = -1;
+
+ if (isMapJitFlagNeeded == -1)
+ {
+ int mapJitFlagCheckResult = 0;
+ int pageSize = sysconf(_SC_PAGE_SIZE);
+ // Try to map a page with read-write-execute protection. It should fail on Mojave hardened runtime and higher.
+ void* testPage = mmap(NULL, pageSize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ if (testPage == MAP_FAILED && (errno == EACCES))
+ {
+ // The mapping has failed with EACCES, check if making the same mapping with MAP_JIT flag works
+ testPage = mmap(NULL, pageSize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_JIT, -1, 0);
+ if (testPage != MAP_FAILED)
+ {
+ mapJitFlagCheckResult = 1;
+ }
+ }
+
+ if (testPage != MAP_FAILED)
+ {
+ munmap(testPage, pageSize);
+ }
+
+ isMapJitFlagNeeded = mapJitFlagCheckResult;
+ }
+
+ return (bool)isMapJitFlagNeeded;
+}
+#endif // TARGET_OSX
+
+void* VMToOSInterface::ReserveDoubleMappedMemory(void *mapperHandle, size_t offset, size_t size, const void *rangeStart, const void* rangeEnd)
+{
+ int fd = (int)(size_t)mapperHandle;
+
+ if (rangeStart != NULL || rangeEnd != NULL)
+ {
+ void* result = PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(rangeStart, rangeEnd, size);
+#ifndef TARGET_OSX
+ if (result != NULL)
+ {
+ // Map the shared memory over the range reserved from the executable memory allocator.
+ result = mmap(result, size, PROT_NONE, MAP_SHARED | MAP_FIXED, fd, offset);
+ if (result == MAP_FAILED)
+ {
+ assert(false);
+ result = NULL;
+ }
+ }
+#endif // TARGET_OSX
+
+ return result;
+ }
+
+#ifndef TARGET_OSX
+ void* result = mmap(NULL, size, PROT_NONE, MAP_SHARED, fd, offset);
+#else
+ int mmapFlags = MAP_ANON | MAP_PRIVATE;
+ if (IsMapJitFlagNeeded())
+ {
+ mmapFlags |= MAP_JIT;
+ }
+ void* result = mmap(NULL, size, PROT_NONE, mmapFlags, -1, 0);
+#endif
+ if (result == MAP_FAILED)
+ {
+ assert(false);
+ result = NULL;
+ }
+ return result;
+}
+
+void *VMToOSInterface::CommitDoubleMappedMemory(void* pStart, size_t size, bool isExecutable)
+{
+ if (mprotect(pStart, size, isExecutable ? (PROT_READ | PROT_EXEC) : (PROT_READ | PROT_WRITE)) == -1)
+ {
+ return NULL;
+ }
+
+ return pStart;
+}
+
+bool VMToOSInterface::ReleaseDoubleMappedMemory(void *mapperHandle, void* pStart, size_t offset, size_t size)
+{
+#ifndef TARGET_OSX
+ int fd = (int)(size_t)mapperHandle;
+ mmap(pStart, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, offset);
+ memset(pStart, 0, size);
+#endif // TARGET_OSX
+ return munmap(pStart, size) != -1;
+}
+
+void* VMToOSInterface::GetRWMapping(void *mapperHandle, void* pStart, size_t offset, size_t size)
+{
+#ifndef TARGET_OSX
+ int fd = (int)(size_t)mapperHandle;
+ return mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, offset);
+#else // TARGET_OSX
+#ifdef TARGET_AMD64
+ vm_address_t startRW;
+ vm_prot_t curProtection, maxProtection;
+ kern_return_t kr = vm_remap(mach_task_self(), &startRW, size, 0, VM_FLAGS_ANYWHERE | VM_FLAGS_RANDOM_ADDR,
+ mach_task_self(), (vm_address_t)pStart, FALSE, &curProtection, &maxProtection, VM_INHERIT_NONE);
+
+ if (kr != KERN_SUCCESS)
+ {
+ return NULL;
+ }
+
+ int st = mprotect((void*)startRW, size, PROT_READ | PROT_WRITE);
+ if (st == -1)
+ {
+ munmap((void*)startRW, size);
+ return NULL;
+ }
+
+ return (void*)startRW;
+#else // TARGET_AMD64
+ // This method should not be called on OSX ARM64
+ assert(false);
+ return NULL;
+#endif // TARGET_AMD64
+#endif // TARGET_OSX
+}
+
+bool VMToOSInterface::ReleaseRWMapping(void* pStart, size_t size)
+{
+ return munmap(pStart, size) != -1;
+}
diff --git a/src/coreclr/minipal/Windows/CMakeLists.txt b/src/coreclr/minipal/Windows/CMakeLists.txt
new file mode 100644
index 00000000000..b56b5017d37
--- /dev/null
+++ b/src/coreclr/minipal/Windows/CMakeLists.txt
@@ -0,0 +1,4 @@
+add_library(coreclrminipal
+ STATIC
+ doublemapping.cpp
+)
diff --git a/src/coreclr/minipal/Windows/doublemapping.cpp b/src/coreclr/minipal/Windows/doublemapping.cpp
new file mode 100644
index 00000000000..e265f1d139a
--- /dev/null
+++ b/src/coreclr/minipal/Windows/doublemapping.cpp
@@ -0,0 +1,205 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+//
+
+#include <windows.h>
+#include <inttypes.h>
+#include <assert.h>
+#include "minipal.h"
+
+#define HIDWORD(_qw) ((ULONG)((_qw) >> 32))
+#define LODWORD(_qw) ((ULONG)(_qw))
+
+#ifdef TARGET_64BIT
+static const uint64_t MaxDoubleMappedSize = 2048ULL*1024*1024*1024;
+#else
+static const uint64_t MaxDoubleMappedSize = UINT_MAX;
+#endif
+
+#define VIRTUAL_ALLOC_RESERVE_GRANULARITY (64*1024) // 0x10000 (64 KB)
+inline size_t ALIGN_UP( size_t val, size_t alignment )
+{
+ // alignment must be a power of 2 for this implementation to work (need modulo otherwise)
+ assert( 0 == (alignment & (alignment - 1)) );
+ size_t result = (val + (alignment - 1)) & ~(alignment - 1);
+ assert( result >= val ); // check for overflow
+ return result;
+}
+
+template <typename T> inline T ALIGN_UP(T val, size_t alignment)
+{
+ return (T)ALIGN_UP((size_t)val, alignment);
+}
+
+inline void *GetTopMemoryAddress(void)
+{
+ static void *result; // = NULL;
+ if( NULL == result )
+ {
+ SYSTEM_INFO sysInfo;
+ GetSystemInfo( &sysInfo );
+ result = sysInfo.lpMaximumApplicationAddress;
+ }
+ return result;
+}
+
+inline void *GetBotMemoryAddress(void)
+{
+ static void *result; // = NULL;
+ if( NULL == result )
+ {
+ SYSTEM_INFO sysInfo;
+ GetSystemInfo( &sysInfo );
+ result = sysInfo.lpMinimumApplicationAddress;
+ }
+ return result;
+}
+
+#define TOP_MEMORY (GetTopMemoryAddress())
+#define BOT_MEMORY (GetBotMemoryAddress())
+
+bool VMToOSInterface::CreateDoubleMemoryMapper(void **pHandle, size_t *pMaxExecutableCodeSize)
+{
+ *pMaxExecutableCodeSize = (size_t)MaxDoubleMappedSize;
+ *pHandle = CreateFileMapping(
+ INVALID_HANDLE_VALUE, // use paging file
+ NULL, // default security
+ PAGE_EXECUTE_READWRITE | SEC_RESERVE, // read/write/execute access
+ HIDWORD(MaxDoubleMappedSize), // maximum object size (high-order DWORD)
+ LODWORD(MaxDoubleMappedSize), // maximum object size (low-order DWORD)
+ NULL);
+
+ return *pHandle != NULL;
+}
+
+void VMToOSInterface::DestroyDoubleMemoryMapper(void *mapperHandle)
+{
+ CloseHandle((HANDLE)mapperHandle);
+}
+
+void* VMToOSInterface::ReserveDoubleMappedMemory(void *mapperHandle, size_t offset, size_t size, const void *pMinAddr, const void* pMaxAddr)
+{
+ BYTE *pResult = nullptr; // our return value;
+
+ if (size == 0)
+ {
+ return nullptr;
+ }
+
+ //
+ // First lets normalize the pMinAddr and pMaxAddr values
+ //
+ // If pMinAddr is NULL then set it to BOT_MEMORY
+ if ((pMinAddr == 0) || (pMinAddr < (BYTE *) BOT_MEMORY))
+ {
+ pMinAddr = (BYTE *) BOT_MEMORY;
+ }
+
+ // If pMaxAddr is NULL then set it to TOP_MEMORY
+ if ((pMaxAddr == 0) || (pMaxAddr > (BYTE *) TOP_MEMORY))
+ {
+ pMaxAddr = (BYTE *) TOP_MEMORY;
+ }
+
+ // If pMaxAddr is not greater than pMinAddr we can not make an allocation
+ if (pMaxAddr <= pMinAddr)
+ {
+ return nullptr;
+ }
+
+ // If pMinAddr is BOT_MEMORY and pMaxAddr is TOP_MEMORY
+ // then we can call ClrVirtualAlloc instead
+ if ((pMinAddr == (BYTE *) BOT_MEMORY) && (pMaxAddr == (BYTE *) TOP_MEMORY))
+ {
+ return (BYTE*)MapViewOfFile((HANDLE)mapperHandle,
+ FILE_MAP_EXECUTE | FILE_MAP_READ | FILE_MAP_WRITE,
+ HIDWORD((int64_t)offset),
+ LODWORD((int64_t)offset),
+ size);
+ }
+
+ // We will do one scan from [pMinAddr .. pMaxAddr]
+ // First align the tryAddr up to next 64k base address.
+ // See docs for VirtualAllocEx and lpAddress and 64k alignment for reasons.
+ //
+ BYTE * tryAddr = (BYTE *)ALIGN_UP((BYTE *)pMinAddr, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
+ bool virtualQueryFailed = false;
+ bool faultInjected = false;
+ unsigned virtualQueryCount = 0;
+
+ // Now scan memory and try to find a free block of the size requested.
+ while ((tryAddr + size) <= (BYTE *) pMaxAddr)
+ {
+ MEMORY_BASIC_INFORMATION mbInfo;
+
+ // Use VirtualQuery to find out if this address is MEM_FREE
+ //
+ virtualQueryCount++;
+ if (!VirtualQuery((LPCVOID)tryAddr, &mbInfo, sizeof(mbInfo)))
+ {
+ // Exit and return nullptr if the VirtualQuery call fails.
+ virtualQueryFailed = true;
+ break;
+ }
+
+ // Is there enough memory free from this start location?
+ // Note that for most versions of UNIX the mbInfo.RegionSize returned will always be 0
+ if ((mbInfo.State == MEM_FREE) &&
+ (mbInfo.RegionSize >= (SIZE_T) size || mbInfo.RegionSize == 0))
+ {
+ // Try reserving the memory using VirtualAlloc now
+ pResult = (BYTE*)MapViewOfFileEx((HANDLE)mapperHandle,
+ FILE_MAP_EXECUTE | FILE_MAP_READ | FILE_MAP_WRITE,
+ HIDWORD((int64_t)offset),
+ LODWORD((int64_t)offset),
+ size,
+ tryAddr);
+
+ // Normally this will be successful
+ //
+ if (pResult != nullptr)
+ {
+ // return pResult
+ break;
+ }
+
+ // We might fail in a race. So just move on to next region and continue trying
+ tryAddr = tryAddr + VIRTUAL_ALLOC_RESERVE_GRANULARITY;
+ }
+ else
+ {
+ // Try another section of memory
+ tryAddr = max(tryAddr + VIRTUAL_ALLOC_RESERVE_GRANULARITY,
+ (BYTE*) mbInfo.BaseAddress + mbInfo.RegionSize);
+ }
+ }
+
+ return pResult;
+}
+
+void *VMToOSInterface::CommitDoubleMappedMemory(void* pStart, size_t size, bool isExecutable)
+{
+ return VirtualAlloc(pStart, size, MEM_COMMIT, isExecutable ? PAGE_EXECUTE_READ : PAGE_READWRITE);
+}
+
+bool VMToOSInterface::ReleaseDoubleMappedMemory(void *mapperHandle, void* pStart, size_t offset, size_t size)
+{
+ // Zero the memory before the unmapping
+ VirtualAlloc(pStart, size, MEM_COMMIT, PAGE_READWRITE);
+ memset(pStart, 0, size);
+ return UnmapViewOfFile(pStart);
+}
+
+void* VMToOSInterface::GetRWMapping(void *mapperHandle, void* pStart, size_t offset, size_t size)
+{
+ return (BYTE*)MapViewOfFile((HANDLE)mapperHandle,
+ FILE_MAP_READ | FILE_MAP_WRITE,
+ HIDWORD((int64_t)offset),
+ LODWORD((int64_t)offset),
+ size);
+}
+
+bool VMToOSInterface::ReleaseRWMapping(void* pStart, size_t size)
+{
+ return UnmapViewOfFile(pStart);
+}
diff --git a/src/coreclr/minipal/minipal.h b/src/coreclr/minipal/minipal.h
new file mode 100644
index 00000000000..39098f9bc12
--- /dev/null
+++ b/src/coreclr/minipal/minipal.h
@@ -0,0 +1,78 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+//
+#include <stddef.h>
+
+// Interface between the runtime and platform specific functionality
+class VMToOSInterface
+{
+private:
+ ~VMToOSInterface() {}
+public:
+ // Create double mapped memory mapper
+ // Parameters:
+ // pHandle - receives handle of the double mapped memory mapper
+ // pMaxExecutableCodeSize - receives the maximum executable memory size it can map
+ // Return:
+ // true if it succeeded, false if it failed
+ static bool CreateDoubleMemoryMapper(void **pHandle, size_t *pMaxExecutableCodeSize);
+
+ // Destroy the double mapped memory mapper represented by the passed in handle
+ // Parameters:
+ // mapperHandle - handle of the double mapped memory mapper to destroy
+ static void DestroyDoubleMemoryMapper(void *mapperHandle);
+
+ // Reserve a block of memory that can be double mapped.
+ // Parameters:
+ // mapperHandle - handle of the double mapped memory mapper to use
+ // offset - offset in the underlying shared memory
+ // size - size of the block to reserve
+ // rangeStart
+ // rangeEnd - Requests reserving virtual memory in the specified range.
+ // Setting both rangeStart and rangeEnd to 0 means that the
+ // requested range is not limited.
+ // When a specific range is requested, it is obligatory.
+ // Return:
+ // starting virtual address of the reserved memory or NULL if it failed
+ static void* ReserveDoubleMappedMemory(void *mapperHandle, size_t offset, size_t size, const void *rangeStart, const void* rangeEnd);
+
+ // Commit a block of memory in the range previously reserved by the ReserveDoubleMappedMemory
+ // Parameters:
+ // pStart - start address of the virtual address range to commit
+ // size - size of the memory block to commit
+ // isExecutable - true means that the mapping should be RX, false means RW
+ // Return:
+ // Committed range start
+ static void* CommitDoubleMappedMemory(void* pStart, size_t size, bool isExecutable);
+
+ // Release a block of virtual memory previously commited by the CommitDoubleMappedMemory
+ // Parameters:
+ // mapperHandle - handle of the double mapped memory mapper to use
+ // pStart - start address of the virtual address range to release. It must be one
+ // that was previously returned by the CommitDoubleMappedMemory
+ // offset - offset in the underlying shared memory
+ // size - size of the memory block to release
+ // Return:
+ // true if it succeeded, false if it failed
+ static bool ReleaseDoubleMappedMemory(void *mapperHandle, void* pStart, size_t offset, size_t size);
+
+ // Get a RW mapping for the RX block specified by the arguments
+ // Parameters:
+ // mapperHandle - handle of the double mapped memory mapper to use
+ // pStart - start address of the RX virtual address range.
+ // offset - offset in the underlying shared memory
+ // size - size of the memory block to map as RW
+ // Return:
+ // Starting virtual address of the RW mapping.
+ static void* GetRWMapping(void *mapperHandle, void* pStart, size_t offset, size_t size);
+
+ // Release RW mapping of the block specified by the arguments
+ // Parameters:
+ // pStart - Start address of the RW virtual address range. It must be an address
+ // previously returned by the GetRWMapping.
+ // size - Size of the memory block to release. It must be the size previously
+ // passed to the GetRWMapping that returned the pStart.
+ // Return:
+ // true if it succeeded, false if it failed
+ static bool ReleaseRWMapping(void* pStart, size_t size);
+};