Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/windirstat/llfio.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmake/tests.cmake1
-rw-r--r--include/llfio/revision.hpp6
-rw-r--r--include/llfio/v2.0/algorithm/trivial_vector.hpp4
-rw-r--r--include/llfio/v2.0/detail/impl/posix/map_handle.ipp12
-rw-r--r--include/llfio/v2.0/detail/impl/posix/mapped_file_handle.ipp9
-rw-r--r--include/llfio/v2.0/detail/impl/posix/utils.ipp4
-rw-r--r--include/llfio/v2.0/detail/impl/storage_profile.ipp2
-rw-r--r--include/llfio/v2.0/detail/impl/windows/map_handle.ipp105
-rw-r--r--include/llfio/v2.0/detail/impl/windows/mapped_file_handle.ipp5
-rw-r--r--include/llfio/v2.0/detail/impl/windows/utils.ipp4
-rw-r--r--include/llfio/v2.0/map_handle.hpp48
-rw-r--r--include/llfio/v2.0/mapped.hpp4
-rw-r--r--include/llfio/v2.0/mapped_file_handle.hpp8
-rw-r--r--include/llfio/v2.0/utils.hpp22
-rw-r--r--release_notes.md3
-rw-r--r--test/tests/trivial_vector.cpp27
16 files changed, 187 insertions, 77 deletions
diff --git a/cmake/tests.cmake b/cmake/tests.cmake
index 77a678c4..8975c1eb 100644
--- a/cmake/tests.cmake
+++ b/cmake/tests.cmake
@@ -15,6 +15,7 @@ set(llfio_TESTS
"test/tests/directory_handle_enumerate/runner.cpp"
"test/tests/file_handle_create_close/runner.cpp"
"test/tests/file_handle_lock_unlock.cpp"
+ "test/tests/large_pages.cpp"
"test/tests/map_handle_create_close/runner.cpp"
"test/tests/mapped.cpp"
"test/tests/path_discovery.cpp"
diff --git a/include/llfio/revision.hpp b/include/llfio/revision.hpp
index c2135c38..eef43be5 100644
--- a/include/llfio/revision.hpp
+++ b/include/llfio/revision.hpp
@@ -1,4 +1,4 @@
// Note the second line of this file must ALWAYS be the git SHA, third line ALWAYS the git SHA update time
-#define LLFIO_PREVIOUS_COMMIT_REF 5f82c897584065164c593ea0ca3a602ac651198c
-#define LLFIO_PREVIOUS_COMMIT_DATE "2018-08-24 08:59:33 +00:00"
-#define LLFIO_PREVIOUS_COMMIT_UNIQUE 5f82c897
+#define LLFIO_PREVIOUS_COMMIT_REF 011874565dbbd95680ffbc7db8b250593760376b
+#define LLFIO_PREVIOUS_COMMIT_DATE "2018-08-28 18:21:40 +00:00"
+#define LLFIO_PREVIOUS_COMMIT_UNIQUE 01187456
diff --git a/include/llfio/v2.0/algorithm/trivial_vector.hpp b/include/llfio/v2.0/algorithm/trivial_vector.hpp
index f94de9ac..2faff3f8 100644
--- a/include/llfio/v2.0/algorithm/trivial_vector.hpp
+++ b/include/llfio/v2.0/algorithm/trivial_vector.hpp
@@ -314,7 +314,7 @@ namespace algorithm
}
size_type current_size = size();
size_type bytes = n * sizeof(value_type);
- bytes = utils::round_up_to_page_size(bytes);
+ bytes = utils::round_up_to_page_size(bytes, _mh.page_size());
if(!_sh.is_valid())
{
_sh = section_handle::section(bytes).value();
@@ -348,7 +348,7 @@ namespace algorithm
{
size_type current_size = size();
size_type bytes = current_size * sizeof(value_type);
- bytes = utils::round_up_to_page_size(bytes);
+ bytes = utils::round_up_to_page_size(bytes, _mh.page_size());
if(bytes / sizeof(value_type) == capacity())
{
return;
diff --git a/include/llfio/v2.0/detail/impl/posix/map_handle.ipp b/include/llfio/v2.0/detail/impl/posix/map_handle.ipp
index 1c174f03..efb393c4 100644
--- a/include/llfio/v2.0/detail/impl/posix/map_handle.ipp
+++ b/include/llfio/v2.0/detail/impl/posix/map_handle.ipp
@@ -312,7 +312,7 @@ result<map_handle> map_handle::map(size_type bytes, bool /*unused*/, section_han
{
return errc::argument_out_of_domain;
}
- bytes = utils::round_up_to_page_size(bytes);
+ bytes = utils::round_up_to_page_size(bytes, /*FIXME*/ utils::page_size());
result<map_handle> ret(map_handle(nullptr));
native_handle_type &nativeh = ret.value()._v;
OUTCOME_TRY(addr, do_mmap(nativeh, nullptr, 0, nullptr, bytes, 0, _flag));
@@ -352,7 +352,7 @@ result<map_handle::size_type> map_handle::truncate(size_type newsize, bool permi
OUTCOME_TRY(length_, _section->length()); // length of the backing file
length = length_;
}
- newsize = utils::round_up_to_page_size(newsize);
+ newsize = utils::round_up_to_page_size(newsize, _pagesize);
if(newsize == _reservation)
{
return success();
@@ -432,7 +432,7 @@ result<map_handle::buffer_type> map_handle::commit(buffer_type region, section_h
return errc::invalid_argument;
}
// Set permissions on the pages
- region = utils::round_to_page_size(region);
+ region = utils::round_to_page_size(region, _pagesize);
extent_type offset = _offset + (region.data() - _addr);
size_type bytes = region.size();
OUTCOME_TRYV(do_mmap(_v, region.data(), MAP_FIXED, _section, bytes, offset, flag));
@@ -451,7 +451,7 @@ result<map_handle::buffer_type> map_handle::decommit(buffer_type region) noexcep
{
return errc::invalid_argument;
}
- region = utils::round_to_page_size(region);
+ region = utils::round_to_page_size(region, _pagesize);
// Tell the kernel to kick these pages into storage
if(-1 == ::madvise(region.data(), region.size(), MADV_DONTNEED))
{
@@ -472,7 +472,7 @@ result<void> map_handle::zero_memory(buffer_type region) noexcept
return errc::invalid_argument;
}
#ifdef MADV_REMOVE
- buffer_type page_region{utils::round_up_to_page_size(region.data()), utils::round_down_to_page_size(region.size())};
+ buffer_type page_region{utils::round_up_to_page_size(region.data(), _pagesize), utils::round_down_to_page_size(region.size(), _pagesize)};
// Zero contents and punch a hole in any backing storage
if((page_region.size() != 0u) && -1 != ::madvise(page_region.data(), page_region.size(), MADV_REMOVE))
{
@@ -502,7 +502,7 @@ result<span<map_handle::buffer_type>> map_handle::prefetch(span<buffer_type> reg
result<map_handle::buffer_type> map_handle::do_not_store(buffer_type region) noexcept
{
LLFIO_LOG_FUNCTION_CALL(0);
- region = utils::round_to_page_size(region);
+ region = utils::round_to_page_size(region, _pagesize);
if(region.data() == nullptr)
{
return errc::invalid_argument;
diff --git a/include/llfio/v2.0/detail/impl/posix/mapped_file_handle.ipp b/include/llfio/v2.0/detail/impl/posix/mapped_file_handle.ipp
index 10ff177d..0ed3daae 100644
--- a/include/llfio/v2.0/detail/impl/posix/mapped_file_handle.ipp
+++ b/include/llfio/v2.0/detail/impl/posix/mapped_file_handle.ipp
@@ -40,7 +40,6 @@ result<mapped_file_handle::size_type> mapped_file_handle::reserve(size_type rese
{
reservation = length;
}
- reservation = utils::round_up_to_page_size(reservation);
if(!_sh.is_valid())
{
section_handle::flag sectionflags = section_handle::flag::readwrite;
@@ -60,8 +59,8 @@ result<mapped_file_handle::size_type> mapped_file_handle::reserve(size_type rese
OUTCOME_TRYV(_mh.close());
OUTCOME_TRY(mh, map_handle::map(_sh, reservation, 0, mapflags));
_mh = std::move(mh);
- _reservation = reservation;
- return reservation;
+ _reservation = utils::round_up_to_page_size(reservation, page_size());
+ return _reservation;
}
result<void> mapped_file_handle::close() noexcept
@@ -116,8 +115,8 @@ result<mapped_file_handle::extent_type> mapped_file_handle::truncate(extent_type
// otherwise some kernels keep them around until last fd close, effectively leaking them
if(newsize < size)
{
- byte *start = utils::round_up_to_page_size(_mh.address() + newsize);
- byte *end = utils::round_up_to_page_size(_mh.address() + size);
+ byte *start = utils::round_up_to_page_size(_mh.address() + newsize, page_size());
+ byte *end = utils::round_up_to_page_size(_mh.address() + size, page_size());
(void) _mh.do_not_store({start, static_cast<size_t>(end - start)});
}
// Resize the file, on unified page cache kernels it'll map any new pages into the reserved map
diff --git a/include/llfio/v2.0/detail/impl/posix/utils.ipp b/include/llfio/v2.0/detail/impl/posix/utils.ipp
index 61209b74..b30e0956 100644
--- a/include/llfio/v2.0/detail/impl/posix/utils.ipp
+++ b/include/llfio/v2.0/detail/impl/posix/utils.ipp
@@ -53,11 +53,11 @@ namespace utils
}
return ret;
}
- std::vector<size_t> page_sizes(bool only_actually_available)
+ const std::vector<size_t> &page_sizes(bool only_actually_available)
{
static QUICKCPPLIB_NAMESPACE::configurable_spinlock::spinlock<bool> lock;
- static std::vector<size_t> pagesizes, pagesizes_available;
std::lock_guard<decltype(lock)> g(lock);
+ static std::vector<size_t> pagesizes, pagesizes_available;
if(pagesizes.empty())
{
#if defined(__FreeBSD__)
diff --git a/include/llfio/v2.0/detail/impl/storage_profile.ipp b/include/llfio/v2.0/detail/impl/storage_profile.ipp
index 338dfcbc..fb2b46ed 100644
--- a/include/llfio/v2.0/detail/impl/storage_profile.ipp
+++ b/include/llfio/v2.0/detail/impl/storage_profile.ipp
@@ -1209,7 +1209,7 @@ namespace storage_profile
}
if(srch.requires_aligned_io())
{
- bytes = utils::round_down_to_page_size(bytes);
+ bytes = utils::round_down_to_page_size(bytes, utils::page_size());
}
if(cold_cache)
diff --git a/include/llfio/v2.0/detail/impl/windows/map_handle.ipp b/include/llfio/v2.0/detail/impl/windows/map_handle.ipp
index 308a10c8..17a22860 100644
--- a/include/llfio/v2.0/detail/impl/windows/map_handle.ipp
+++ b/include/llfio/v2.0/detail/impl/windows/map_handle.ipp
@@ -117,6 +117,19 @@ result<section_handle> section_handle::section(file_handle &backing, extent_type
{
// Handled during view mapping below
}
+ // Windows supports large pages, and no larger
+ if((_flag & section_handle::flag::page_sizes_3) == section_handle::flag::page_sizes_3)
+ {
+ return errc::invalid_argument;
+ }
+ else if((_flag & section_handle::flag::page_sizes_2) == section_handle::flag::page_sizes_2)
+ {
+ return errc::invalid_argument;
+ }
+ else if((_flag & section_handle::flag::page_sizes_1) == section_handle::flag::page_sizes_1)
+ {
+ attribs |= SEC_LARGE_PAGES;
+ }
nativeh.behaviour |= native_handle_type::disposition::section;
OBJECT_ATTRIBUTES oa{}, *poa = nullptr;
UNICODE_STRING _path{};
@@ -227,6 +240,19 @@ result<section_handle> section_handle::section(extent_type bytes, const path_han
{
// Handled during view mapping below
}
+ // Windows supports large pages, and no larger
+ if((_flag & section_handle::flag::page_sizes_3) == section_handle::flag::page_sizes_3)
+ {
+ return errc::invalid_argument;
+ }
+ else if((_flag & section_handle::flag::page_sizes_2) == section_handle::flag::page_sizes_2)
+ {
+ return errc::invalid_argument;
+ }
+ else if((_flag & section_handle::flag::page_sizes_1) == section_handle::flag::page_sizes_1)
+ {
+ attribs |= SEC_LARGE_PAGES;
+ }
nativeh.behaviour |= native_handle_type::disposition::section;
LARGE_INTEGER _maximum_size{}, *pmaximum_size = &_maximum_size;
_maximum_size.QuadPart = bytes;
@@ -291,7 +317,36 @@ template <class T> static inline T win32_round_up_to_allocation_size(T i) noexce
i = (T)((LLFIO_V2_NAMESPACE::detail::unsigned_integer_cast<uintptr_t>(i) + 65535) & ~(65535)); // NOLINT
return i;
}
-static inline void win32_map_flags(native_handle_type &nativeh, DWORD &allocation, DWORD &prot, size_t &commitsize, bool enable_reservation, section_handle::flag _flag)
+static inline result<size_t> win32_pagesize_from_flags(section_handle::flag _flag) noexcept
+{
+ try
+ {
+ const auto &pagesizes = utils::page_sizes();
+ // Windows supports large pages, and no larger
+ if((_flag & section_handle::flag::page_sizes_3) == section_handle::flag::page_sizes_3)
+ {
+ return errc::invalid_argument;
+ }
+ else if((_flag & section_handle::flag::page_sizes_2) == section_handle::flag::page_sizes_2)
+ {
+ return errc::invalid_argument;
+ }
+ else if((_flag & section_handle::flag::page_sizes_1) == section_handle::flag::page_sizes_1)
+ {
+ if(pagesizes.size() < 2)
+ {
+ return errc::invalid_argument;
+ }
+ return pagesizes[1];
+ }
+ return pagesizes[0];
+ }
+ catch(...)
+ {
+ return error_from_exception();
+ }
+}
+static inline result<void> win32_map_flags(native_handle_type &nativeh, DWORD &allocation, DWORD &prot, size_t &commitsize, bool enable_reservation, section_handle::flag _flag)
{
prot = PAGE_NOACCESS;
if(enable_reservation && ((_flag & section_handle::flag::nocommit) || (_flag == section_handle::flag::none)))
@@ -323,6 +378,26 @@ static inline void win32_map_flags(native_handle_type &nativeh, DWORD &allocatio
{
prot = PAGE_EXECUTE;
}
+ // Windows supports large pages, and no larger
+ if((_flag & section_handle::flag::page_sizes_3) == section_handle::flag::page_sizes_3)
+ {
+ return errc::invalid_argument;
+ }
+ else if((_flag & section_handle::flag::page_sizes_2) == section_handle::flag::page_sizes_2)
+ {
+ return errc::invalid_argument;
+ }
+ else if((_flag & section_handle::flag::page_sizes_1) == section_handle::flag::page_sizes_1)
+ {
+ // Windows does not permit address reservation with large pages
+ if(_flag & section_handle::flag::nocommit)
+ {
+ return errc::invalid_argument;
+ }
+ // Windows seems to require MEM_RESERVE with large pages
+ allocation |= MEM_RESERVE | MEM_LARGE_PAGES;
+ }
+ return success();
}
QUICKCPPLIB_BITFIELD_BEGIN(win32_map_sought)
@@ -505,14 +580,15 @@ map_handle::io_result<map_handle::const_buffers_type> map_handle::barrier(map_ha
result<map_handle> map_handle::map(size_type bytes, bool /*unused*/, section_handle::flag _flag) noexcept
{
// TODO: Keep a cache of DiscardVirtualMemory()/MEM_RESET pages deallocated
- bytes = win32_round_up_to_allocation_size(bytes);
+ OUTCOME_TRY(pagesize, win32_pagesize_from_flags(_flag));
+ bytes = utils::round_up_to_page_size(bytes, pagesize);
result<map_handle> ret(map_handle(nullptr));
native_handle_type &nativeh = ret.value()._v;
DWORD allocation = MEM_RESERVE | MEM_COMMIT, prot;
PVOID addr = nullptr;
{
size_t commitsize;
- win32_map_flags(nativeh, allocation, prot, commitsize, true, _flag);
+ OUTCOME_TRY(win32_map_flags(nativeh, allocation, prot, commitsize, true, _flag));
}
LLFIO_LOG_FUNCTION_CALL(&ret);
addr = VirtualAlloc(nullptr, bytes, allocation, prot);
@@ -523,6 +599,7 @@ result<map_handle> map_handle::map(size_type bytes, bool /*unused*/, section_han
ret.value()._addr = static_cast<byte *>(addr);
ret.value()._reservation = bytes;
ret.value()._length = bytes;
+ ret.value()._pagesize = pagesize;
// Windows has no way of getting the kernel to prefault maps on creation, so ...
if(_flag & section_handle::flag::prefault)
@@ -534,7 +611,6 @@ result<map_handle> map_handle::map(size_type bytes, bool /*unused*/, section_han
// If this kernel doesn't support that API, manually poke every page in the new map
if(PrefetchVirtualMemory_ == nullptr)
{
- size_t pagesize = utils::page_size();
volatile auto *a = static_cast<volatile char *>(addr);
for(size_t n = 0; n < bytes; n += pagesize)
{
@@ -556,8 +632,9 @@ result<map_handle> map_handle::map(section_handle &section, size_type bytes, ext
size_t commitsize = bytes;
LARGE_INTEGER _offset{};
_offset.QuadPart = offset;
- SIZE_T _bytes = utils::round_up_to_page_size(bytes);
- win32_map_flags(nativeh, allocation, prot, commitsize, section.backing() != nullptr, _flag);
+ OUTCOME_TRY(pagesize, win32_pagesize_from_flags(_flag));
+ SIZE_T _bytes = utils::round_up_to_page_size(bytes, pagesize);
+ OUTCOME_TRY(win32_map_flags(nativeh, allocation, prot, commitsize, section.backing() != nullptr, _flag));
LLFIO_LOG_FUNCTION_CALL(&ret);
NTSTATUS ntstat = NtMapViewOfSection(section.native_handle().h, GetCurrentProcess(), &addr, 0, commitsize, &_offset, &_bytes, ViewUnmap, allocation, prot);
if(ntstat < 0)
@@ -568,6 +645,7 @@ result<map_handle> map_handle::map(section_handle &section, size_type bytes, ext
ret.value()._offset = offset;
ret.value()._reservation = _bytes;
ret.value()._length = section.length().value() - offset;
+ ret.value()._pagesize = pagesize;
// Make my handle borrow the native handle of my backing storage
ret.value()._v.h = section.backing_native_handle().h;
@@ -580,7 +658,6 @@ result<map_handle> map_handle::map(section_handle &section, size_type bytes, ext
// If this kernel doesn't support that API, manually poke every page in the new map
if(PrefetchVirtualMemory_ == nullptr)
{
- size_t pagesize = utils::page_size();
volatile auto *a = static_cast<volatile char *>(addr);
for(size_t n = 0; n < _bytes; n += pagesize)
{
@@ -596,7 +673,7 @@ result<map_handle::size_type> map_handle::truncate(size_type newsize, bool /* un
windows_nt_kernel::init();
using namespace windows_nt_kernel;
LLFIO_LOG_FUNCTION_CALL(this);
- newsize = win32_round_up_to_allocation_size(newsize);
+ newsize = utils::round_up_to_page_size(newsize, _pagesize);
if(newsize == _reservation)
{
return success();
@@ -624,7 +701,7 @@ result<map_handle::size_type> map_handle::truncate(size_type newsize, bool /* un
native_handle_type nativeh;
DWORD allocation = MEM_RESERVE | MEM_COMMIT, prot;
size_t commitsize;
- win32_map_flags(nativeh, allocation, prot, commitsize, true, _flag);
+ OUTCOME_TRY(win32_map_flags(nativeh, allocation, prot, commitsize, true, _flag));
if(!VirtualAlloc(_addr + _reservation, newsize - _reservation, allocation, prot))
{
return win32_error();
@@ -658,7 +735,7 @@ result<map_handle::size_type> map_handle::truncate(size_type newsize, bool /* un
offset.QuadPart = _offset + _reservation;
SIZE_T _bytes = newsize - _reservation;
native_handle_type nativeh;
- win32_map_flags(nativeh, allocation, prot, commitsize, _section->backing() != nullptr, _flag);
+ OUTCOME_TRY(win32_map_flags(nativeh, allocation, prot, commitsize, _section->backing() != nullptr, _flag));
NTSTATUS ntstat = NtMapViewOfSection(_section->native_handle().h, GetCurrentProcess(), &addr, 0, commitsize, &offset, &_bytes, ViewUnmap, allocation, prot);
if(ntstat < 0)
{
@@ -705,7 +782,7 @@ result<map_handle::buffer_type> map_handle::commit(buffer_type region, section_h
{
prot = PAGE_EXECUTE;
}
- region = utils::round_to_page_size(region);
+ region = utils::round_to_page_size(region, _pagesize);
OUTCOME_TRYV(win32_maps_apply(region.data(), region.size(), win32_map_sought::committed | win32_map_sought::freed | win32_map_sought::reserved, [prot](byte *addr, size_t bytes) -> result<void> {
if(VirtualAlloc(addr, bytes, MEM_COMMIT, prot) == nullptr)
{
@@ -723,7 +800,7 @@ result<map_handle::buffer_type> map_handle::decommit(buffer_type region) noexcep
{
return errc::invalid_argument;
}
- region = utils::round_to_page_size(region);
+ region = utils::round_to_page_size(region, _pagesize);
OUTCOME_TRYV(win32_maps_apply(region.data(), region.size(), win32_map_sought::committed, [](byte *addr, size_t bytes) -> result<void> {
if(VirtualFree(addr, bytes, MEM_DECOMMIT) == 0)
{
@@ -747,7 +824,7 @@ result<void> map_handle::zero_memory(buffer_type region) noexcept
memset(region.data(), 0, region.size());
if((DiscardVirtualMemory_ != nullptr) && region.size() >= utils::page_size())
{
- region = utils::round_to_page_size(region);
+ region = utils::round_to_page_size(region, _pagesize);
if(region.size() > 0)
{
OUTCOME_TRYV(win32_maps_apply(region.data(), region.size(), win32_map_sought::committed, [](byte *addr, size_t bytes) -> result<void> {
@@ -784,7 +861,7 @@ result<map_handle::buffer_type> map_handle::do_not_store(buffer_type region) noe
windows_nt_kernel::init();
using namespace windows_nt_kernel;
LLFIO_LOG_FUNCTION_CALL(0);
- region = utils::round_to_page_size(region);
+ region = utils::round_to_page_size(region, _pagesize);
if(region.data() == nullptr)
{
return errc::invalid_argument;
diff --git a/include/llfio/v2.0/detail/impl/windows/mapped_file_handle.ipp b/include/llfio/v2.0/detail/impl/windows/mapped_file_handle.ipp
index 20493a45..93457a98 100644
--- a/include/llfio/v2.0/detail/impl/windows/mapped_file_handle.ipp
+++ b/include/llfio/v2.0/detail/impl/windows/mapped_file_handle.ipp
@@ -35,7 +35,6 @@ result<mapped_file_handle::size_type> mapped_file_handle::reserve(size_type rese
OUTCOME_TRY(length, underlying_file_maximum_extent());
reservation = length;
}
- reservation = utils::round_up_to_page_size(reservation);
if(!_sh.is_valid())
{
// Section must match read/write of file, as otherwise map reservation doesn't work on Windows
@@ -60,8 +59,8 @@ result<mapped_file_handle::size_type> mapped_file_handle::reserve(size_type rese
OUTCOME_TRYV(_mh.close());
OUTCOME_TRY(mh, map_handle::map(_sh, reservation, 0, mapflags));
_mh = std::move(mh);
- _reservation = reservation;
- return reservation;
+ _reservation = utils::round_up_to_page_size(reservation, page_size());
+ return _reservation;
}
result<void> mapped_file_handle::close() noexcept
diff --git a/include/llfio/v2.0/detail/impl/windows/utils.ipp b/include/llfio/v2.0/detail/impl/windows/utils.ipp
index f483b5ea..2b1e37f8 100644
--- a/include/llfio/v2.0/detail/impl/windows/utils.ipp
+++ b/include/llfio/v2.0/detail/impl/windows/utils.ipp
@@ -52,11 +52,11 @@ namespace utils
#pragma warning(push)
#pragma warning(disable : 6387) // MSVC sanitiser warns that GetModuleHandleA() might fail (hah!)
#endif
- std::vector<size_t> page_sizes(bool only_actually_available)
+ const std::vector<size_t> &page_sizes(bool only_actually_available)
{
static QUICKCPPLIB_NAMESPACE::configurable_spinlock::spinlock<bool> lock;
- static std::vector<size_t> pagesizes, pagesizes_available;
std::lock_guard<decltype(lock)> g(lock);
+ static std::vector<size_t> pagesizes, pagesizes_available;
if(pagesizes.empty())
{
using GetLargePageMinimum_t = size_t(WINAPI *)(void);
diff --git a/include/llfio/v2.0/map_handle.hpp b/include/llfio/v2.0/map_handle.hpp
index d84df116..af26d94d 100644
--- a/include/llfio/v2.0/map_handle.hpp
+++ b/include/llfio/v2.0/map_handle.hpp
@@ -69,6 +69,10 @@ public:
barrier_on_close = 1U << 16U, //!< Maps of this section, if writable, issue a `barrier()` when destructed blocking until data (not metadata) reaches physical storage.
nvram = 1U << 17U, //!< This section is of non-volatile RAM
+ page_sizes_1 = 1U << 24U, //!< Use `utils::page_sizes()[1]` sized pages, or fail.
+ page_sizes_2 = 2U << 24U, //!< Use `utils::page_sizes()[2]` sized pages, or fail.
+ page_sizes_3 = 3U << 24U, //!< Use `utils::page_sizes()[3]` sized pages, or fail.
+
// NOTE: IF UPDATING THIS UPDATE THE std::ostream PRINTER BELOW!!!
readwrite = (read | write)};
@@ -213,6 +217,18 @@ inline std::ostream &operator<<(std::ostream &s, const section_handle::flag &v)
{
temp.append("nvram|");
}
+ if((v & section_handle::flag::page_sizes_3) == section_handle::flag::page_sizes_3)
+ {
+ temp.append("page_sizes_3|");
+ }
+ else if((v & section_handle::flag::page_sizes_2) == section_handle::flag::page_sizes_2)
+ {
+ temp.append("page_sizes_2|");
+ }
+ else if((v & section_handle::flag::page_sizes_1) == section_handle::flag::page_sizes_1)
+ {
+ temp.append("page_sizes_1|");
+ }
if(!temp.empty())
{
temp.resize(temp.size() - 1);
@@ -256,6 +272,12 @@ up to the reservation limit.
You can attempt to modify the address space reservation after creation using `truncate()`. If successful,
this will be more efficient than tearing down the map and creating a new larger map.
+\note On Microsoft Windows, it is not permitted to reserve address space using large pages. Any attempt to do
+so will fail. Note also that on that kernel, you must have granted the ability to lock memory to the user
+or users running the process for large page support to be made available to that process and user. Finally,
+as of Windows 10 1803, using large pages in file backed memory maps is not supported. If a future kernel
+changes this, the existing code should "just work".
+
The native handle returned by this map handle is always that of the backing storage, but closing this handle
does not close that of the backing storage, nor does releasing this handle release that of the backing storage.
Locking byte ranges of this handle is therefore equal to locking byte ranges in the original backing storage,
@@ -299,7 +321,7 @@ protected:
section_handle *_section{nullptr};
byte *_addr{nullptr};
extent_type _offset{0};
- size_type _reservation{0}, _length{0};
+ size_type _reservation{0}, _length{0}, _pagesize{0};
section_handle::flag _flag{section_handle::flag::none};
explicit map_handle(section_handle *section)
@@ -313,13 +335,14 @@ public:
constexpr map_handle() {} // NOLINT
LLFIO_HEADERS_ONLY_VIRTUAL_SPEC ~map_handle() override;
//! Implicit move construction of map_handle permitted
- constexpr map_handle(map_handle &&o) noexcept : io_handle(std::move(o)), _section(o._section), _addr(o._addr), _offset(o._offset), _reservation(o._reservation), _length(o._length), _flag(o._flag)
+ constexpr map_handle(map_handle &&o) noexcept : io_handle(std::move(o)), _section(o._section), _addr(o._addr), _offset(o._offset), _reservation(o._reservation), _length(o._length), _pagesize(o._pagesize), _flag(o._flag)
{
o._section = nullptr;
o._addr = nullptr;
o._offset = 0;
o._reservation = 0;
o._length = 0;
+ o._pagesize = 0;
o._flag = section_handle::flag::none;
}
//! No copy construction (use `clone()`)
@@ -378,7 +401,7 @@ public:
}
/*! Map unused memory into view, creating new memory if insufficient unused memory is available. Note that the memory mapped by this call may contain non-zero bits (recycled memory) unless `zeroed` is true.
- \param bytes How many bytes to map. Typically will be rounded up to a multiple of the page size (see `utils::page_sizes()`) on POSIX, 64Kb on Windows.
+ \param bytes How many bytes to map. Typically will be rounded up to a multiple of the page size (see `page_size()`).
\param zeroed Set to true if only all bits zeroed memory is wanted.
\param _flag The permissions with which to map the view. `flag::none` can be useful for reserving virtual address space without committing system resources, use commit() to later change availability of memory.
@@ -395,7 +418,7 @@ public:
/*! Create a memory mapped view of a backing storage, optionally reserving additional address space for later growth.
\param section A memory section handle specifying the backing storage to use.
\param bytes How many bytes to reserve (0 = the size of the section). Rounded up to nearest 64Kb on Windows.
- \param offset The offset into the backing storage to map from. Typically needs to be at least a multiple of the page size (see utils::page_sizes()), on Windows it needs to be a multiple of the kernel memory allocation granularity (typically 64Kb).
+ \param offset The offset into the backing storage to map from. Typically needs to be at least a multiple of the page size (see `page_size()`), on Windows it needs to be a multiple of the kernel memory allocation granularity (typically 64Kb).
\param _flag The permissions with which to map the view which are constrained by the permissions of the memory section. `flag::none` can be useful for reserving virtual address space without committing system resources, use commit() to later change availability of memory.
\errors Any of the values POSIX mmap() or NtMapViewOfSection() can return.
@@ -421,6 +444,9 @@ public:
LLFIO_MAKE_FREE_FUNCTION
size_type length() const noexcept { return _length; }
+ //! The page size used by the map, in bytes.
+ size_type page_size() const noexcept { return _pagesize; }
+
//! True if the map is of non-volatile RAM
bool is_nvram() const noexcept { return !!(_flag & section_handle::flag::nvram); }
@@ -469,10 +495,10 @@ public:
LLFIO_MAKE_FREE_FUNCTION
LLFIO_HEADERS_ONLY_MEMFUNC_SPEC result<size_type> truncate(size_type newsize, bool permit_relocation = false) noexcept;
- //! Ask the system to commit the system resources to make the memory represented by the buffer available with the given permissions. addr and length should be page aligned (see utils::page_sizes()), if not the returned buffer is the region actually committed.
+ //! Ask the system to commit the system resources to make the memory represented by the buffer available with the given permissions. addr and length should be page aligned (see `page_size()`), if not the returned buffer is the region actually committed.
LLFIO_HEADERS_ONLY_MEMFUNC_SPEC result<buffer_type> commit(buffer_type region, section_handle::flag flag = section_handle::flag::readwrite) noexcept;
- //! Ask the system to make the memory represented by the buffer unavailable and to decommit the system resources representing them. addr and length should be page aligned (see utils::page_sizes()), if not the returned buffer is the region actually decommitted.
+ //! Ask the system to make the memory represented by the buffer unavailable and to decommit the system resources representing them. addr and length should be page aligned (see `page_size()`), if not the returned buffer is the region actually decommitted.
LLFIO_HEADERS_ONLY_MEMFUNC_SPEC result<buffer_type> decommit(buffer_type region) noexcept;
/*! Zero the memory represented by the buffer. Differs from zero() because it acts on mapped memory, but may call zero() internally.
@@ -484,8 +510,10 @@ public:
*/
LLFIO_HEADERS_ONLY_MEMFUNC_SPEC result<void> zero_memory(buffer_type region) noexcept;
- /*! Ask the system to unset the dirty flag for the memory represented by the buffer. This will prevent any changes not yet sent to the backing storage from being sent in the future, also if the system kicks out this page and reloads it you may see some edition of the underlying storage instead of what was here. addr
- and length should be page aligned (see utils::page_sizes()), if not the returned buffer is the region actually undirtied.
+ /*! Ask the system to unset the dirty flag for the memory represented by the buffer. This will prevent any changes not yet sent to
+ the backing storage from being sent in the future, also if the system kicks out this page and reloads it you may see some edition of
+ the underlying storage instead of what was here. `addr` and `length` should be page aligned (see`page_size()`), if not the returned
+ buffer is the region actually undirtied.
\warning This function destroys the contents of unwritten pages in the region in a totally unpredictable fashion. Only use it if you don't care how much of
the region reaches physical storage or not. Note that the region is not necessarily zeroed, and may be randomly zeroed.
@@ -637,7 +665,7 @@ inline map_handle::const_buffer_type barrier(map_handle &self, map_handle::const
return self.barrier(std::forward<decltype(req)>(req), std::forward<decltype(evict)>(evict));
}
/*! Create new memory and map it into view.
-\param bytes How many bytes to create and map. Typically will be rounded up to a multiple of the page size (see `utils::page_sizes()`) on POSIX, 64Kb on Windows.
+\param bytes How many bytes to create and map. Typically will be rounded up to a multiple of the page size (see `page_size()`) on POSIX, 64Kb on Windows.
\param _flag The permissions with which to map the view. `flag::none` can be useful for reserving virtual address space without committing system resources, use commit() to later change availability of memory.
\note On Microsoft Windows this constructor uses the faster VirtualAlloc() which creates less versatile page backed memory. If you want anonymous memory
@@ -654,7 +682,7 @@ inline result<map_handle> map(map_handle::size_type bytes, bool zeroed = false,
/*! Create a memory mapped view of a backing storage, optionally reserving additional address space for later growth.
\param section A memory section handle specifying the backing storage to use.
\param bytes How many bytes to reserve (0 = the size of the section). Rounded up to nearest 64Kb on Windows.
-\param offset The offset into the backing storage to map from. Typically needs to be at least a multiple of the page size (see utils::page_sizes()), on Windows it needs to be a multiple of the kernel memory allocation granularity (typically 64Kb).
+\param offset The offset into the backing storage to map from. Typically needs to be at least a multiple of the page size (see `page_size()`), on Windows it needs to be a multiple of the kernel memory allocation granularity (typically 64Kb).
\param _flag The permissions with which to map the view which are constrained by the permissions of the memory section. `flag::none` can be useful for reserving virtual address space without committing system resources, use commit() to later change availability of memory.
\errors Any of the values POSIX mmap() or NtMapViewOfSection() can return.
diff --git a/include/llfio/v2.0/mapped.hpp b/include/llfio/v2.0/mapped.hpp
index e723ca75..a992968e 100644
--- a/include/llfio/v2.0/mapped.hpp
+++ b/include/llfio/v2.0/mapped.hpp
@@ -106,7 +106,7 @@ public:
#ifdef _WIN32
byteoffset & ~65535,
#else
- utils::round_down_to_page_size(byteoffset),
+ utils::round_down_to_page_size(byteoffset, utils::page_size()),
#endif
byteoffset, &sh, (length == (size_type) -1) ? 0 : length * sizeof(T), _flag)) // NOLINT
{
@@ -124,7 +124,7 @@ public:
#ifdef _WIN32
byteoffset & ~65535,
#else
- utils::round_down_to_page_size(byteoffset),
+ utils::round_down_to_page_size(byteoffset, utils::page_size()),
#endif
byteoffset, nullptr, (length == (size_type) -1) ? 0 : length * sizeof(T), _flag)) // NOLINT
{
diff --git a/include/llfio/v2.0/mapped_file_handle.hpp b/include/llfio/v2.0/mapped_file_handle.hpp
index 97be4dde..ef8fab67 100644
--- a/include/llfio/v2.0/mapped_file_handle.hpp
+++ b/include/llfio/v2.0/mapped_file_handle.hpp
@@ -149,7 +149,7 @@ public:
auto out = reserve(reservation);
if(!out)
{
- _reservation = utils::round_up_to_page_size(reservation);
+ _reservation = reservation;
// sink the error
}
}
@@ -296,6 +296,12 @@ public:
//! The address in memory where this mapped file resides
byte *address() const noexcept { return _mh.address(); }
+ //! The page size used by the map, in bytes.
+ size_type page_size() const noexcept { return _mh.page_size(); }
+
+ //! True if the map is of non-volatile RAM
+ bool is_nvram() const noexcept { return _mh.is_nvram(); }
+
//! The maximum extent of the underlying file
result<extent_type> underlying_file_maximum_extent() const noexcept { return file_handle::maximum_extent(); }
diff --git a/include/llfio/v2.0/utils.hpp b/include/llfio/v2.0/utils.hpp
index 0de1930e..5523670f 100644
--- a/include/llfio/v2.0/utils.hpp
+++ b/include/llfio/v2.0/utils.hpp
@@ -48,26 +48,23 @@ namespace utils
/*! \brief Round a value to its next lowest page size multiple
*/
- template <class T> inline T round_down_to_page_size(T i) noexcept
+ template <class T> inline T round_down_to_page_size(T i, size_t pagesize) noexcept
{
- const size_t pagesize = page_size();
i = (T)(LLFIO_V2_NAMESPACE::detail::unsigned_integer_cast<uintptr_t>(i) & ~(pagesize - 1)); // NOLINT
return i;
}
/*! \brief Round a value to its next highest page size multiple
*/
- template <class T> inline T round_up_to_page_size(T i) noexcept
+ template <class T> inline T round_up_to_page_size(T i, size_t pagesize) noexcept
{
- const size_t pagesize = page_size();
i = (T)((LLFIO_V2_NAMESPACE::detail::unsigned_integer_cast<uintptr_t>(i) + pagesize - 1) & ~(pagesize - 1)); // NOLINT
return i;
}
/*! \brief Round a pair of a pointer and a size_t to their nearest page size multiples. The pointer will be rounded
down, the size_t upwards.
*/
- template <class T> inline T round_to_page_size(T i) noexcept
+ template <class T> inline T round_to_page_size(T i, size_t pagesize) noexcept
{
- const size_t pagesize = page_size();
i = {reinterpret_cast<byte *>((LLFIO_V2_NAMESPACE::detail::unsigned_integer_cast<uintptr_t>(i.data())) & ~(pagesize - 1)), (i.size() + pagesize - 1) & ~(pagesize - 1)};
return i;
}
@@ -77,10 +74,11 @@ namespace utils
\param only_actually_available Only return page sizes actually available to the user running this process
\return The page sizes of this architecture.
\ingroup utils
- \complexity{Whatever the system API takes (one would hope constant time).}
- \exceptionmodel{Any error from the operating system or std::bad_alloc.}
+ \complexity{First call performs multiple memory allocations, mutex locks and system calls. Subsequent calls
+ lock mutexes.}
+ \exceptionmodel{Throws any error from the operating system or std::bad_alloc.}
*/
- LLFIO_HEADERS_ONLY_FUNC_SPEC std::vector<size_t> page_sizes(bool only_actually_available = true);
+ LLFIO_HEADERS_ONLY_FUNC_SPEC const std::vector<size_t> &page_sizes(bool only_actually_available = true);
/*! \brief Returns a reasonable default size for page_allocator, typically the closest page size from
page_sizes() to 1Mb.
@@ -88,14 +86,14 @@ namespace utils
\return A value of a TLB large page size close to 1Mb.
\ingroup utils
\complexity{Whatever the system API takes (one would hope constant time).}
- \exceptionmodel{Any error from the operating system or std::bad_alloc.}
+ \exceptionmodel{Throws any error from the operating system or std::bad_alloc.}
*/
inline size_t file_buffer_default_size()
{
static size_t size;
if(size == 0u)
{
- std::vector<size_t> sizes(page_sizes(true));
+ const std::vector<size_t> &sizes = page_sizes(true);
for(auto &i : sizes)
{
if(i >= 1024 * 1024)
@@ -196,7 +194,7 @@ namespace utils
unmodified.
A particularly useful combination with this allocator is with the
- page_sizes() member function of __llfio_dispatcher__. This will return which
+ `page_sizes()` member function of __llfio_dispatcher__. This will return which
pages sizes are possible, and which page sizes are enabled for this user. If
writing a file copy routine for example, using this allocator with the
largest page size as the copy chunk makes a great deal of sense.
diff --git a/release_notes.md b/release_notes.md
index dd1b5145..6b960be9 100644
--- a/release_notes.md
+++ b/release_notes.md
@@ -150,8 +150,9 @@ Todo:
| ✔ | ✔ | ✔ | `shared_fs_mutex` shared/exclusive entities locking based on safe byte ranges
| | ✔ | ✔ | Set random or sequential i/o (prefetch).
| ✔ | ✔ | ✔ | i/o on `async_file_handle` is coroutines awaitable.
-| ✔ | ✔ | | `llfio::algorithm::trivial_vector<T>` with constant time reallocation if `T` is trivially copyable.
+| ✔ | ✔ | ✔ | `llfio::algorithm::trivial_vector<T>` with constant time reallocation if `T` is trivially copyable.
| | ✔ | ✔ | `symlink_handle`.
+| ✔ | ✔ | ✔ | Large, huge and massive page size support for memory allocation and (POSIX only) file maps.
Todo thereafter in order of priority:
diff --git a/test/tests/trivial_vector.cpp b/test/tests/trivial_vector.cpp
index 6cc5c60f..2ca92e6e 100644
--- a/test/tests/trivial_vector.cpp
+++ b/test/tests/trivial_vector.cpp
@@ -32,8 +32,9 @@ static inline void TestTrivialVector()
{
uint64_t v, _space[7]; // 64 bytes total
udt() = delete;
- explicit udt(int /*unused*/) : v(trivial_vector_udts_constructed++)
- , _space{ 1, 2, 3, 4, 5, 6, 7 }
+ explicit udt(int /*unused*/)
+ : v(trivial_vector_udts_constructed++)
+ , _space{1, 2, 3, 4, 5, 6, 7}
{
}
};
@@ -44,27 +45,27 @@ static inline void TestTrivialVector()
BOOST_CHECK(v.empty());
BOOST_CHECK(v.size() == 0); // NOLINT
std::cout << "Resizing to 4Kb ..." << std::endl;
- v.push_back(udt(5)); // first allocation of 4Kb
+ v.push_back(udt(5)); // first allocation of 4Kb
BOOST_CHECK(v.size() == 1);
BOOST_CHECK(v.capacity() == LLFIO_V2_NAMESPACE::utils::page_size() / sizeof(udt));
BOOST_REQUIRE(v[0].v == 78);
std::cout << "Resizing to capacity ..." << std::endl;
- v.resize(_4kb, udt(6)); // ought to be precisely 4Kb
+ v.resize(_4kb, udt(6)); // ought to be precisely 4Kb
BOOST_CHECK(v.size() == _4kb);
- BOOST_CHECK(v.capacity() == LLFIO_V2_NAMESPACE::utils::round_up_to_page_size(4096) / sizeof(udt));
+ BOOST_CHECK(v.capacity() == LLFIO_V2_NAMESPACE::utils::round_up_to_page_size(4096, LLFIO_V2_NAMESPACE::utils::page_size()) / sizeof(udt));
BOOST_REQUIRE(v[0].v == 78);
BOOST_REQUIRE(v[1].v == 79);
std::cout << "Resizing to 16Kb ..." << std::endl;
- v.resize(_16kb, udt(7)); // 16Kb
+ v.resize(_16kb, udt(7)); // 16Kb
BOOST_CHECK(v.size() == _16kb);
- BOOST_CHECK(v.capacity() == LLFIO_V2_NAMESPACE::utils::round_up_to_page_size(16384) / sizeof(udt));
+ BOOST_CHECK(v.capacity() == LLFIO_V2_NAMESPACE::utils::round_up_to_page_size(16384, LLFIO_V2_NAMESPACE::utils::page_size()) / sizeof(udt));
BOOST_REQUIRE(v[0].v == 78);
BOOST_REQUIRE(v[1].v == 79);
BOOST_REQUIRE(v[_4kb].v == 80);
std::cout << "Resizing to 64Kb ..." << std::endl;
- v.resize(_64kb, udt(8)); // 64Kb
+ v.resize(_64kb, udt(8)); // 64Kb
BOOST_CHECK(v.size() == _64kb);
- BOOST_CHECK(v.capacity() == LLFIO_V2_NAMESPACE::utils::round_up_to_page_size(65536) / sizeof(udt));
+ BOOST_CHECK(v.capacity() == LLFIO_V2_NAMESPACE::utils::round_up_to_page_size(65536, LLFIO_V2_NAMESPACE::utils::page_size()) / sizeof(udt));
BOOST_REQUIRE(v[0].v == 78);
BOOST_REQUIRE(v[1].v == 79);
BOOST_REQUIRE(v[_4kb].v == 80);
@@ -138,9 +139,9 @@ static inline void BenchmarkTrivialVector1()
{
struct udt
{
- uint64_t v[8]; // 64 bytes total
+ uint64_t v[8]; // 64 bytes total
constexpr udt() // NOLINT
- : v{ 1, 2, 3, 4, 5, 6, 7, 8 }
+ : v{1, 2, 3, 4, 5, 6, 7, 8}
{
}
};
@@ -187,9 +188,9 @@ static inline void BenchmarkTrivialVector2()
{
struct udt
{
- uint64_t v[8]; // 64 bytes total
+ uint64_t v[8]; // 64 bytes total
constexpr udt(int /*unused*/) // NOLINT
- : v{ 1, 2, 3, 4, 5, 6, 7, 8 }
+ : v{1, 2, 3, 4, 5, 6, 7, 8}
{
}
};