Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/windirstat/llfio.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'include/afio/v2.0/algorithm')
-rw-r--r--include/afio/v2.0/algorithm/mapped_span.hpp14
-rw-r--r--include/afio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp18
-rw-r--r--include/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp4
3 files changed, 18 insertions, 18 deletions
diff --git a/include/afio/v2.0/algorithm/mapped_span.hpp b/include/afio/v2.0/algorithm/mapped_span.hpp
index 29081720..52ada81e 100644
--- a/include/afio/v2.0/algorithm/mapped_span.hpp
+++ b/include/afio/v2.0/algorithm/mapped_span.hpp
@@ -55,7 +55,7 @@ namespace algorithm
: _mapping(map_handle::map(sh, (bytes == 0) ? 0 : bytes + (offset - page_offset), page_offset, _flag).value())
{
offset -= page_offset;
- char *addr = _mapping.address() + offset;
+ byte *addr = _mapping.address() + offset;
size_t len = sh.length().value() - offset; // use section length, not mapped length as mapped length is rounded up to page size
if(bytes != 0 && bytes < len)
{
@@ -67,15 +67,15 @@ namespace algorithm
public:
//! Default constructor
constexpr mapped_span() {} // NOLINT
- /*! Create a view of new memory.
-
- \param length The number of items to map.
- \param _flag The flags to pass to `map_handle::map()`.
- */
+ /*! Create a view of new memory.
+
+ \param length The number of items to map.
+ \param _flag The flags to pass to `map_handle::map()`.
+ */
explicit mapped_span(size_type length, section_handle::flag _flag = section_handle::flag::readwrite)
: _mapping(map_handle::map(length * sizeof(T), _flag).value())
{
- char *addr = _mapping.address();
+ byte *addr = _mapping.address();
static_cast<span<T> &>(*this) = span<T>(reinterpret_cast<T *>(addr), length); // NOLINT
}
/*! Construct a mapped view of the given section handle.
diff --git a/include/afio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp b/include/afio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
index deed9320..8fdfdfe4 100644
--- a/include/afio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
+++ b/include/afio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
@@ -138,8 +138,8 @@ namespace algorithm
bool first = true;
do
{
- OUTCOME_TRY(_, _h.read(0, {{reinterpret_cast<char *>(&_header), 48}}));
- if(_[0].data != reinterpret_cast<char *>(&_header))
+ OUTCOME_TRY(_, _h.read(0, {{reinterpret_cast<byte *>(&_header), 48}}));
+ if(_[0].data != reinterpret_cast<byte *>(&_header))
{
memcpy(&_header, _[0].data, _[0].len);
}
@@ -220,7 +220,7 @@ namespace algorithm
{
header.hash = QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash((reinterpret_cast<char *>(&header)) + 16, sizeof(header) - 16);
}
- OUTCOME_TRYV(ret.write(0, {{reinterpret_cast<char *>(&header), sizeof(header)}}));
+ OUTCOME_TRYV(ret.write(0, {{reinterpret_cast<byte *>(&header), sizeof(header)}}));
}
// Open a shared lock on last byte in header to prevent other users zomping the file
OUTCOME_TRY(guard, ret.lock(sizeof(header) - 1, 1, false));
@@ -287,11 +287,11 @@ namespace algorithm
OUTCOME_TRY(append_guard_, _h.lock(my_lock_request_offset, lastbyte, true));
append_guard = std::move(append_guard_);
}
- OUTCOME_TRYV(_h.write(0, {{reinterpret_cast<char *>(&lock_request), sizeof(lock_request)}}));
+ OUTCOME_TRYV(_h.write(0, {{reinterpret_cast<byte *>(&lock_request), sizeof(lock_request)}}));
}
// Find the record I just wrote
- alignas(64) char _buffer[4096 + 2048]; // 6Kb cache line aligned buffer
+ alignas(64) byte _buffer[4096 + 2048]; // 6Kb cache line aligned buffer
// Read onwards from length as reported before I wrote my lock request
// until I find my lock request. This loop should never actually iterate
// except under extreme load conditions.
@@ -473,7 +473,7 @@ namespace algorithm
{
atomic_append_detail::lock_request record;
#ifdef _DEBUG
- (void) _h.read(my_lock_request_offset, {{(char *) &record, sizeof(record)}});
+ (void) _h.read(my_lock_request_offset, {{(byte *) &record, sizeof(record)}});
if(!record.unique_id)
{
AFIO_LOG_FATAL(this, "atomic_append::unlock() I have been previously unlocked!");
@@ -487,7 +487,7 @@ namespace algorithm
}
#endif
memset(&record, 0, sizeof(record));
- (void) _h.write(my_lock_request_offset, {{reinterpret_cast<char *>(&record), sizeof(record)}});
+ (void) _h.write(my_lock_request_offset, {{reinterpret_cast<byte *>(&record), sizeof(record)}});
}
// Every 32 records or so, bump _header.first_known_good
@@ -497,7 +497,7 @@ namespace algorithm
// Forward scan records until first non-zero record is found
// and update header with new info
- alignas(64) char _buffer[4096 + 2048];
+ alignas(64) byte _buffer[4096 + 2048];
bool done = false;
while(!done)
{
@@ -543,7 +543,7 @@ namespace algorithm
_header.hash = QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash((reinterpret_cast<char *>(&_header)) + 16, sizeof(_header) - 16);
}
// Rewrite the first part of the header only
- (void) _h.write(0, {{reinterpret_cast<char *>(&_header), 48}});
+ (void) _h.write(0, {{reinterpret_cast<byte *>(&_header), 48}});
}
}
};
diff --git a/include/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp b/include/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
index 8f9b0287..18f57834 100644
--- a/include/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
+++ b/include/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
@@ -208,7 +208,7 @@ namespace algorithm
{
return lockinuse.error();
}
- char buffer[65536];
+ byte buffer[65536];
memset(buffer, 0, sizeof(buffer));
OUTCOME_TRYV(ret.read(0, {{buffer, 65535}}));
path_view temphpath(reinterpret_cast<filesystem::path::value_type *>(buffer));
@@ -244,7 +244,7 @@ namespace algorithm
char buffer[4096];
memset(buffer, 0, sizeof(buffer));
size_t bytes = temppath.native().size() * sizeof(*temppath.c_str());
- file_handle::const_buffer_type buffers[] = {{reinterpret_cast<const char *>(temppath.c_str()), bytes}, {static_cast<const char *>(buffer), 4096 - (bytes % 4096)}};
+ file_handle::const_buffer_type buffers[] = {{reinterpret_cast<const byte *>(temppath.c_str()), bytes}, {reinterpret_cast<const byte *>(buffer), 4096 - (bytes % 4096)}};
OUTCOME_TRYV(ret.truncate(65536));
OUTCOME_TRYV(ret.write({buffers, 0}));
// Map for read the maximum possible path file size, again to avoid race problems