Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/windirstat/llfio.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-tidy2
-rw-r--r--include/afio/revision.hpp6
-rw-r--r--include/afio/v2.0/algorithm/cached_parent_handle_adapter.hpp4
-rw-r--r--include/afio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp121
-rw-r--r--include/afio/v2.0/algorithm/shared_fs_mutex/base.hpp44
-rw-r--r--include/afio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp32
-rw-r--r--include/afio/v2.0/algorithm/shared_fs_mutex/lock_files.hpp24
-rw-r--r--include/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp92
-rw-r--r--include/afio/v2.0/algorithm/shared_fs_mutex/safe_byte_ranges.hpp6
-rw-r--r--include/afio/v2.0/algorithm/trivial_vector.hpp14
10 files changed, 235 insertions, 110 deletions
diff --git a/.clang-tidy b/.clang-tidy
index edccbd51..d406e68d 100644
--- a/.clang-tidy
+++ b/.clang-tidy
@@ -1,5 +1,5 @@
---
-Checks: '*,-llvm-header-guard,-google-build-using-namespace,-clang-analyzer-alpha.clone.CloneChecker,-google-runtime-int,-cppcoreguidelines-pro-bounds-array-to-pointer-decay,-clang-analyzer-alpha.deadcode.UnreachableCode,-misc-use-after-move,-cppcoreguidelines-pro-type-vararg,-modernize-use-emplace,-cert-err60-cpp,-cppcoreguidelines-pro-type-union-access,-fuchsia-*'
+Checks: '*,-llvm-header-guard,-google-build-using-namespace,-google-runtime-int,-cppcoreguidelines-pro-bounds-array-to-pointer-decay,-modernize-unary-static-assert,-fuchsia-*,-llvm-namespace-comment'
WarningsAsErrors: ''
HeaderFilterRegex: '.*'
AnalyzeTemporaryDtors: true
diff --git a/include/afio/revision.hpp b/include/afio/revision.hpp
index 590a62af..ed6a38b8 100644
--- a/include/afio/revision.hpp
+++ b/include/afio/revision.hpp
@@ -1,4 +1,4 @@
// Note the second line of this file must ALWAYS be the git SHA, third line ALWAYS the git SHA update time
-#define AFIO_PREVIOUS_COMMIT_REF 1be35da30b868cd6d31af217c116330d95d45e6d
-#define AFIO_PREVIOUS_COMMIT_DATE "2017-12-06 20:55:32 +00:00"
-#define AFIO_PREVIOUS_COMMIT_UNIQUE 1be35da3
+#define AFIO_PREVIOUS_COMMIT_REF a8f3238a7c75c17eae460189a7e3f72dd12c5627
+#define AFIO_PREVIOUS_COMMIT_DATE "2017-12-11 09:57:11 +00:00"
+#define AFIO_PREVIOUS_COMMIT_UNIQUE a8f3238a
diff --git a/include/afio/v2.0/algorithm/cached_parent_handle_adapter.hpp b/include/afio/v2.0/algorithm/cached_parent_handle_adapter.hpp
index fc84a780..9293615e 100644
--- a/include/afio/v2.0/algorithm/cached_parent_handle_adapter.hpp
+++ b/include/afio/v2.0/algorithm/cached_parent_handle_adapter.hpp
@@ -43,7 +43,7 @@ namespace algorithm
{
directory_handle h;
filesystem::path _lastpath;
- cached_path_handle(directory_handle &&_h)
+ explicit cached_path_handle(directory_handle &&_h)
: h(std::move(_h))
{
}
@@ -99,7 +99,7 @@ namespace algorithm
_sph = std::move(r.first);
_leafname = std::move(r.second);
}
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC ~cached_parent_handle_adapter()
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC ~cached_parent_handle_adapter() override
{
if(this->_v)
{
diff --git a/include/afio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp b/include/afio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
index 540d79cd..23e75a25 100644
--- a/include/afio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
+++ b/include/afio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
@@ -46,13 +46,16 @@ namespace algorithm
#pragma pack(1)
struct alignas(16) header
{
- uint128 hash; // Hash of remaining 112 bytes
- uint64 generation; // Iterated per write
- uint64 time_offset; // time_t in seconds at time of creation. Used to offset us_count below.
- uint64 first_known_good; // offset to first known good lock_request
- uint64 first_after_hole_punch; // offset to first byte after last hole punch
- // First 48 bytes are the header, remainder is zeros for future expansion
- uint64 _padding[10];
+ uint128 hash; // Hash of remaining 112 bytes
+ uint64 generation{}; // Iterated per write
+ uint64 time_offset{}; // time_t in seconds at time of creation. Used to
+ // offset us_count below.
+ uint64 first_known_good{}; // offset to first known good lock_request
+ uint64 first_after_hole_punch{}; // offset to first byte after last hole
+ // punch
+ // First 48 bytes are the header, remainder is zeros for future
+ // expansion
+ uint64 _padding[10]{};
// Last byte is used to detect first user of the file
};
static_assert(sizeof(header) == 128, "header structure is not 128 bytes long!");
@@ -60,7 +63,7 @@ namespace algorithm
struct alignas(16) lock_request
{
uint128 hash; // Hash of remaining 112 bytes
- uint64 unique_id; // A unique id identifying this locking instance
+ uint64 unique_id{}; // A unique id identifying this locking instance
uint64 us_count : 56; // Microseconds since the lock file created
uint64 items : 8; // The number of entities below which are valid
shared_fs_mutex::entity_type entities[12]; // Entities to exclusive or share lock
@@ -120,7 +123,7 @@ namespace algorithm
{
// guard now points at a non-existing handle
_guard.set_handle(&_h);
- utils::random_fill((char *) &_unique_id, sizeof(_unique_id)); // crypto strong random
+ utils::random_fill(reinterpret_cast<char *>(&_unique_id), sizeof(_unique_id)); // crypto strong random
memset(&_header, 0, sizeof(_header));
(void) _read_header();
}
@@ -133,16 +136,24 @@ namespace algorithm
do
{
OUTCOME_TRY(_, _h.read(0, (char *) &_header, 48));
- if(_.data != (char *) &_header)
+ if(_.data != reinterpret_cast<char *>(&_header))
+ {
memcpy(&_header, _.data, _.len);
+ }
if(_skip_hashing)
+ {
return success();
+ }
if(first)
+ {
first = false;
+ }
else
+ {
std::this_thread::yield();
+ }
// No timeout as this should very rarely block for any significant length of time
- } while(_header.hash != QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash(((char *) &_header) + 16, sizeof(_header) - 16));
+ } while(_header.hash != QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash((reinterpret_cast<char *>(&_header)) + 16, sizeof(_header) - 16));
return success();
}
@@ -184,7 +195,9 @@ namespace algorithm
if(lockresult.has_error())
{
if(lockresult.error() != std::errc::timed_out)
+ {
return lockresult.error();
+ }
// Somebody else is also using this file
}
else
@@ -196,14 +209,18 @@ namespace algorithm
header.first_known_good = sizeof(header);
header.first_after_hole_punch = sizeof(header);
if(!skip_hashing)
- header.hash = QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash(((char *) &header) + 16, sizeof(header) - 16);
+ {
+ header.hash = QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash((reinterpret_cast<char *>(&header)) + 16, sizeof(header) - 16);
+ }
OUTCOME_TRYV(ret.write(0, (char *) &header, sizeof(header)));
}
// Open a shared lock on last byte in header to prevent other users zomping the file
OUTCOME_TRY(guard, ret.lock(sizeof(header) - 1, 1, false));
// Unlock any exclusive lock I gained earlier now
if(lockresult)
+ {
lockresult.value().unlock();
+ }
// The constructor will read and cache the header
return atomic_append(std::move(ret), std::move(guard), nfs_compatibility, skip_hashing);
}
@@ -212,21 +229,27 @@ namespace algorithm
const file_handle &handle() const noexcept { return _h; }
protected:
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC result<void> _lock(entities_guard &out, deadline d, bool spin_not_sleep) noexcept override final
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC result<void> _lock(entities_guard &out, deadline d, bool spin_not_sleep) noexcept final
{
AFIO_LOG_FUNCTION_CALL(this);
atomic_append_detail::lock_request lock_request;
if(out.entities.size() > sizeof(lock_request.entities) / sizeof(lock_request.entities[0]))
+ {
return std::errc::argument_list_too_long;
+ }
std::chrono::steady_clock::time_point began_steady;
std::chrono::system_clock::time_point end_utc;
if(d)
{
if((d).steady)
+ {
began_steady = std::chrono::steady_clock::now();
+ }
else
+ {
end_utc = (d).to_time_point();
+ }
}
// Fire this if an error occurs
auto disableunlock = undoer([&] { out.release(); });
@@ -239,7 +262,9 @@ namespace algorithm
lock_request.items = out.entities.size();
memcpy(lock_request.entities, out.entities.data(), sizeof(lock_request.entities[0]) * out.entities.size());
if(!_skip_hashing)
- lock_request.hash = QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash(((char *) &lock_request) + 16, sizeof(lock_request) - 16);
+ {
+ lock_request.hash = QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash((reinterpret_cast<char *>(&lock_request)) + 16, sizeof(lock_request) - 16);
+ }
// My lock request will be the file's current length or higher
OUTCOME_TRY(my_lock_request_offset, _h.length());
{
@@ -248,7 +273,7 @@ namespace algorithm
file_handle::extent_guard append_guard;
if(_nfs_compatibility)
{
- file_handle::extent_type lastbyte = (file_handle::extent_type) -1;
+ auto lastbyte = static_cast<file_handle::extent_type>(-1);
// Lock up to the beginning of the shadow lock space
lastbyte &= ~(1ULL << 63);
OUTCOME_TRY(append_guard_, _h.lock(my_lock_request_offset, lastbyte, true));
@@ -274,10 +299,14 @@ namespace algorithm
std::terminate();
}
const atomic_append_detail::lock_request *record, *lastrecord;
- for(record = (const atomic_append_detail::lock_request *) readoutcome.value().data, lastrecord = (const atomic_append_detail::lock_request *) (readoutcome.value().data + readoutcome.value().len); record < lastrecord && record->hash != lock_request.hash; ++record)
+ for(record = reinterpret_cast<const atomic_append_detail::lock_request *>(readoutcome.value().data), lastrecord = reinterpret_cast<const atomic_append_detail::lock_request *>(readoutcome.value().data + readoutcome.value().len); record < lastrecord && record->hash != lock_request.hash; ++record)
+ {
my_lock_request_offset += sizeof(atomic_append_detail::lock_request);
+ }
if(record->hash == lock_request.hash)
+ {
break;
+ }
}
// extent_guard is now valid and will be unlocked on error
@@ -305,32 +334,44 @@ namespace algorithm
OUTCOME_TRYV(_read_header());
// If there are no preceding records, we're done
if(record_offset < _header.first_known_good)
+ {
break;
+ }
auto start_offset = record_offset;
if(start_offset > sizeof(_buffer) - sizeof(atomic_append_detail::lock_request))
+ {
start_offset -= sizeof(_buffer) - sizeof(atomic_append_detail::lock_request);
+ }
else
+ {
start_offset = sizeof(atomic_append_detail::lock_request);
+ }
if(start_offset < _header.first_known_good)
+ {
start_offset = _header.first_known_good;
+ }
assert(record_offset >= start_offset);
assert(record_offset - start_offset <= sizeof(_buffer));
OUTCOME_TRY(batchread, _h.read(start_offset, _buffer, (size_t)(record_offset - start_offset) + sizeof(atomic_append_detail::lock_request)));
assert(batchread.len == record_offset - start_offset + sizeof(atomic_append_detail::lock_request));
- const atomic_append_detail::lock_request *record = (atomic_append_detail::lock_request *) (batchread.data + batchread.len - sizeof(atomic_append_detail::lock_request));
- const atomic_append_detail::lock_request *firstrecord = (atomic_append_detail::lock_request *) batchread.data;
+ const atomic_append_detail::lock_request *record = reinterpret_cast<atomic_append_detail::lock_request *>(batchread.data + batchread.len - sizeof(atomic_append_detail::lock_request));
+ const atomic_append_detail::lock_request *firstrecord = reinterpret_cast<atomic_append_detail::lock_request *>(batchread.data);
// Skip all completed lock requests or not mentioning any of my entities
for(; record >= firstrecord; record_offset -= sizeof(atomic_append_detail::lock_request), --record)
{
// If a completed lock request, skip
- if(!record->hash && !record->unique_id)
+ if(!record->hash && (record->unique_id == 0u))
+ {
continue;
+ }
// If record hash doesn't match contents it's a torn read, reload
if(!_skip_hashing)
{
if(record->hash != QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash(((char *) record) + 16, sizeof(atomic_append_detail::lock_request) - 16))
+ {
goto reload;
+ }
}
// Does this record lock anything I am locking?
@@ -342,8 +383,10 @@ namespace algorithm
{
// Is the lock I want exclusive or the lock he wants exclusive?
// If so, need to block
- if(record->entities[n].exclusive || entity.exclusive)
+ if((record->entities[n].exclusive != 0u) || (entity.exclusive != 0u))
+ {
goto beginwait;
+ }
}
}
}
@@ -368,12 +411,18 @@ namespace algorithm
{
std::chrono::nanoseconds ns = std::chrono::duration_cast<std::chrono::nanoseconds>((began_steady + std::chrono::nanoseconds((d).nsecs)) - std::chrono::steady_clock::now());
if(ns.count() < 0)
+ {
(nd).nsecs = 0;
+ }
else
+ {
(nd).nsecs = ns.count();
+ }
}
else
+ {
(nd) = (d);
+ }
}
auto lock_offset = record_offset;
// Set the top bit to use the shadow lock space on Windows
@@ -386,12 +435,16 @@ namespace algorithm
if((d).steady)
{
if(std::chrono::steady_clock::now() >= (began_steady + std::chrono::nanoseconds((d).nsecs)))
+ {
return std::errc::timed_out;
+ }
}
else
{
if(std::chrono::system_clock::now() >= end_utc)
+ {
return std::errc::timed_out;
+ }
}
}
} while(record_offset >= _header.first_known_good);
@@ -399,16 +452,16 @@ namespace algorithm
}
public:
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(entities_type entities, unsigned long long hint) noexcept override final
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(entities_type entities, unsigned long long hint) noexcept final
{
(void) entities;
AFIO_LOG_FUNCTION_CALL(this);
- if(!hint)
+ if(hint == 0u)
{
AFIO_LOG_WARN(this, "atomic_append::unlock() currently requires a hint to work, assuming this is a failed lock.");
return;
}
- file_handle::extent_type my_lock_request_offset = (file_handle::extent_type) hint;
+ auto my_lock_request_offset = static_cast<file_handle::extent_type>(hint);
{
atomic_append_detail::lock_request record;
#ifdef _DEBUG
@@ -426,11 +479,11 @@ namespace algorithm
}
#endif
memset(&record, 0, sizeof(record));
- (void) _h.write(my_lock_request_offset, (char *) &record, sizeof(record));
+ (void) _h.write(my_lock_request_offset, reinterpret_cast<char *>(&record), sizeof(record));
}
// Every 32 records or so, bump _header.first_known_good
- if(!(my_lock_request_offset & 4095))
+ if((my_lock_request_offset & 4095) == 0u)
{
//_read_header();
@@ -450,15 +503,21 @@ namespace algorithm
const auto &bytesread = bytesread_.value();
// If read was partial, we are done after this round
if(bytesread.len < sizeof(_buffer))
+ {
done = true;
- const atomic_append_detail::lock_request *record = (const atomic_append_detail::lock_request *) bytesread.data;
- const atomic_append_detail::lock_request *lastrecord = (const atomic_append_detail::lock_request *) (bytesread.data + bytesread.len);
+ }
+ const auto *record = reinterpret_cast<const atomic_append_detail::lock_request *>(bytesread.data);
+ const auto *lastrecord = reinterpret_cast<const atomic_append_detail::lock_request *>(bytesread.data + bytesread.len);
for(; record < lastrecord; ++record)
{
- if(!record->hash && !record->unique_id)
+ if(!record->hash && (record->unique_id == 0u))
+ {
_header.first_known_good += sizeof(atomic_append_detail::lock_request);
+ }
else
+ {
break;
+ }
}
}
// Hole punch if >= 1Mb of zeros exists
@@ -472,9 +531,11 @@ namespace algorithm
}
++_header.generation;
if(!_skip_hashing)
- _header.hash = QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash(((char *) &_header) + 16, sizeof(_header) - 16);
+ {
+ _header.hash = QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash((reinterpret_cast<char *>(&_header)) + 16, sizeof(_header) - 16);
+ }
// Rewrite the first part of the header only
- (void) _h.write(0, (char *) &_header, 48);
+ (void) _h.write(0, reinterpret_cast<char *>(&_header), 48);
}
}
};
diff --git a/include/afio/v2.0/algorithm/shared_fs_mutex/base.hpp b/include/afio/v2.0/algorithm/shared_fs_mutex/base.hpp
index 59c9f8d2..fb31832e 100644
--- a/include/afio/v2.0/algorithm/shared_fs_mutex/base.hpp
+++ b/include/afio/v2.0/algorithm/shared_fs_mutex/base.hpp
@@ -86,7 +86,7 @@ namespace algorithm
entity_type(value_type _value, bool _exclusive) noexcept : _init(0)
{
value = _value;
- exclusive = _exclusive;
+ exclusive = _exclusive; // NOLINT
}
};
static_assert(std::is_literal_type<entity_type>::value, "entity_type is not a literal type");
@@ -95,36 +95,38 @@ namespace algorithm
using entities_type = span<entity_type>;
protected:
- constexpr shared_fs_mutex() {}
+ constexpr shared_fs_mutex() = default;
public:
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC ~shared_fs_mutex() {}
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC ~shared_fs_mutex() = default;
//! Generates an entity id from a sequence of bytes
entity_type entity_from_buffer(const char *buffer, size_t bytes, bool exclusive = true) noexcept
{
uint128 hash = QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash(buffer, bytes);
- return entity_type(hash.as_longlongs[0] ^ hash.as_longlongs[1], exclusive);
+ return {hash.as_longlongs[0] ^ hash.as_longlongs[1], exclusive};
}
//! Generates an entity id from a string
template <typename T> entity_type entity_from_string(const std::basic_string<T> &str, bool exclusive = true) noexcept
{
uint128 hash = QUICKCPPLIB_NAMESPACE::algorithm::hash::fast_hash::hash(str);
- return entity_type(hash.as_longlongs[0] ^ hash.as_longlongs[1], exclusive);
+ return {hash.as_longlongs[0] ^ hash.as_longlongs[1], exclusive};
}
//! Generates a cryptographically random entity id.
entity_type random_entity(bool exclusive = true) noexcept
{
entity_type::value_type v;
- utils::random_fill((char *) &v, sizeof(v));
- return entity_type(v, exclusive);
+ utils::random_fill(reinterpret_cast<char *>(&v), sizeof(v));
+ return {v, exclusive};
}
//! Fills a sequence of entity ids with cryptographic randomness. Much faster than calling random_entity() individually.
void fill_random_entities(span<entity_type> seq, bool exclusive = true) noexcept
{
- utils::random_fill((char *) seq.data(), seq.size() * sizeof(entity_type));
+ utils::random_fill(reinterpret_cast<char *>(seq.data()), seq.size() * sizeof(entity_type));
for(auto &i : seq)
- i.exclusive = exclusive;
+ {
+ i.exclusive = exclusive; // NOLINT
+ }
}
//! RAII holder for a lock on a sequence of entities
@@ -133,9 +135,9 @@ namespace algorithm
entity_type _entity;
public:
- shared_fs_mutex *parent;
+ shared_fs_mutex *parent{nullptr};
entities_type entities;
- unsigned long long hint;
+ unsigned long long hint{0};
entities_guard() = default;
entities_guard(shared_fs_mutex *_parent, entities_type _entities)
: parent(_parent)
@@ -152,10 +154,12 @@ namespace algorithm
}
entities_guard(const entities_guard &) = delete;
entities_guard &operator=(const entities_guard &) = delete;
- entities_guard(entities_guard &&o) noexcept : _entity(std::move(o._entity)), parent(o.parent), entities(std::move(o.entities)), hint(o.hint)
+ entities_guard(entities_guard &&o) noexcept : _entity(o._entity), parent(o.parent), entities(o.entities), hint(o.hint)
{
if(entities.data() == &o._entity)
+ {
entities = entities_type(&_entity, 1);
+ }
o.release();
}
entities_guard &operator=(entities_guard &&o) noexcept
@@ -166,8 +170,10 @@ namespace algorithm
}
~entities_guard()
{
- if(parent)
+ if(parent != nullptr)
+ {
unlock();
+ }
}
//! True if extent guard is valid
explicit operator bool() const noexcept { return parent != nullptr; }
@@ -176,7 +182,7 @@ namespace algorithm
//! Unlocks the locked entities immediately
void unlock() noexcept
{
- if(parent)
+ if(parent != nullptr)
{
parent->unlock(entities, hint);
release();
@@ -195,21 +201,21 @@ namespace algorithm
//! Lock all of a sequence of entities for exclusive or shared access
result<entities_guard> lock(entities_type entities, deadline d = deadline(), bool spin_not_sleep = false) noexcept
{
- entities_guard ret(this, std::move(entities));
- OUTCOME_TRYV(_lock(ret, std::move(d), spin_not_sleep));
+ entities_guard ret(this, entities);
+ OUTCOME_TRYV(_lock(ret, d, spin_not_sleep));
return std::move(ret);
}
//! Lock a single entity for exclusive or shared access
result<entities_guard> lock(entity_type entity, deadline d = deadline(), bool spin_not_sleep = false) noexcept
{
entities_guard ret(this, entity);
- OUTCOME_TRYV(_lock(ret, std::move(d), spin_not_sleep));
+ OUTCOME_TRYV(_lock(ret, d, spin_not_sleep));
return std::move(ret);
}
//! Try to lock all of a sequence of entities for exclusive or shared access
- result<entities_guard> try_lock(entities_type entities) noexcept { return lock(std::move(entities), deadline(std::chrono::seconds(0))); }
+ result<entities_guard> try_lock(entities_type entities) noexcept { return lock(entities, deadline(std::chrono::seconds(0))); }
//! Try to lock a single entity for exclusive or shared access
- result<entities_guard> try_lock(entity_type entity) noexcept { return lock(std::move(entity), deadline(std::chrono::seconds(0))); }
+ result<entities_guard> try_lock(entity_type entity) noexcept { return lock(entity, deadline(std::chrono::seconds(0))); }
//! Unlock a previously locked sequence of entities
virtual void unlock(entities_type entities, unsigned long long hint = 0) noexcept = 0;
};
diff --git a/include/afio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp b/include/afio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
index 97b9282d..05065b3c 100644
--- a/include/afio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
+++ b/include/afio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
@@ -74,7 +74,7 @@ namespace algorithm
{
file_handle _h;
- byte_ranges(file_handle &&h)
+ explicit byte_ranges(file_handle &&h)
: _h(std::move(h))
{
}
@@ -109,7 +109,7 @@ namespace algorithm
const file_handle &handle() const noexcept { return _h; }
protected:
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC result<void> _lock(entities_guard &out, deadline d, bool spin_not_sleep) noexcept override final
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC result<void> _lock(entities_guard &out, deadline d, bool spin_not_sleep) noexcept final
{
AFIO_LOG_FUNCTION_CALL(this);
std::chrono::steady_clock::time_point began_steady;
@@ -117,16 +117,20 @@ namespace algorithm
if(d)
{
if((d).steady)
+ {
began_steady = std::chrono::steady_clock::now();
+ }
else
+ {
end_utc = (d).to_time_point();
+ }
}
// Fire this if an error occurs
auto disableunlock = undoer([&] { out.release(); });
size_t n;
for(;;)
{
- size_t was_contended = (size_t) -1;
+ auto was_contended = static_cast<size_t>(-1);
{
auto undo = undoer([&] {
// 0 to (n-1) need to be closed
@@ -135,7 +139,9 @@ namespace algorithm
--n;
// Now 0 to n needs to be closed
for(; n > 0; n--)
+ {
_h.unlock(out.entities[n].value, 1);
+ }
_h.unlock(out.entities[0].value, 1);
}
});
@@ -143,8 +149,10 @@ namespace algorithm
{
deadline nd;
// Only for very first entity will we sleep until its lock becomes available
- if(n)
+ if(n != 0u)
+ {
nd = deadline(std::chrono::seconds(0));
+ }
else
{
nd = deadline();
@@ -154,15 +162,21 @@ namespace algorithm
{
std::chrono::nanoseconds ns = std::chrono::duration_cast<std::chrono::nanoseconds>((began_steady + std::chrono::nanoseconds((d).nsecs)) - std::chrono::steady_clock::now());
if(ns.count() < 0)
+ {
(nd).nsecs = 0;
+ }
else
+ {
(nd).nsecs = ns.count();
+ }
}
else
+ {
(nd) = (d);
+ }
}
}
- auto outcome = _h.lock(out.entities[n].value, 1, out.entities[n].exclusive, nd);
+ auto outcome = _h.lock(out.entities[n].value, 1, out.entities[n].exclusive != 0u, nd);
if(!outcome)
{
was_contended = n;
@@ -181,12 +195,16 @@ namespace algorithm
if((d).steady)
{
if(std::chrono::steady_clock::now() >= (began_steady + std::chrono::nanoseconds((d).nsecs)))
+ {
return std::errc::timed_out;
+ }
}
else
{
if(std::chrono::system_clock::now() >= end_utc)
+ {
return std::errc::timed_out;
+ }
}
}
// Move was_contended to front and randomise rest of out.entities
@@ -195,13 +213,15 @@ namespace algorithm
++front;
QUICKCPPLIB_NAMESPACE::algorithm::small_prng::random_shuffle(front, out.entities.end());
if(!spin_not_sleep)
+ {
std::this_thread::yield();
+ }
}
// return success();
}
public:
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(entities_type entities, unsigned long long) noexcept override final
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(entities_type entities, unsigned long long /*hint*/) noexcept final
{
AFIO_LOG_FUNCTION_CALL(this);
for(const auto &i : entities)
diff --git a/include/afio/v2.0/algorithm/shared_fs_mutex/lock_files.hpp b/include/afio/v2.0/algorithm/shared_fs_mutex/lock_files.hpp
index cbb8b9a4..9fd42cd8 100644
--- a/include/afio/v2.0/algorithm/shared_fs_mutex/lock_files.hpp
+++ b/include/afio/v2.0/algorithm/shared_fs_mutex/lock_files.hpp
@@ -75,7 +75,7 @@ namespace algorithm
const path_handle &_path;
std::vector<file_handle> _hs;
- lock_files(const path_handle &o)
+ explicit lock_files(const path_handle &o)
: _path(o)
{
}
@@ -110,7 +110,7 @@ namespace algorithm
const path_handle &path() const noexcept { return _path; }
protected:
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC result<void> _lock(entities_guard &out, deadline d, bool spin_not_sleep) noexcept override final
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC result<void> _lock(entities_guard &out, deadline d, bool spin_not_sleep) noexcept final
{
AFIO_LOG_FUNCTION_CALL(this);
std::chrono::steady_clock::time_point began_steady;
@@ -118,9 +118,13 @@ namespace algorithm
if(d)
{
if((d).steady)
+ {
began_steady = std::chrono::steady_clock::now();
+ }
else
+ {
end_utc = (d).to_time_point();
+ }
}
size_t n;
// Create a set of paths to files to exclusively create
@@ -128,12 +132,12 @@ namespace algorithm
for(n = 0; n < out.entities.size(); n++)
{
auto v = out.entities[n].value;
- entity_paths[n] = QUICKCPPLIB_NAMESPACE::algorithm::string::to_hex_string(span<char>((char *) &v, 8));
+ entity_paths[n] = QUICKCPPLIB_NAMESPACE::algorithm::string::to_hex_string(span<char>(reinterpret_cast<char *>(&v), 8));
}
_hs.resize(out.entities.size());
do
{
- size_t was_contended = (size_t) -1;
+ auto was_contended = static_cast<size_t>(-1);
{
auto undo = undoer([&] {
// 0 to (n-1) need to be closed
@@ -155,7 +159,9 @@ namespace algorithm
{
const auto &ec = ret.error();
if(ec != std::errc::resource_unavailable_try_again && ec != std::errc::file_exists)
+ {
return ret.error();
+ }
// Collided with another locker
was_contended = n;
break;
@@ -163,7 +169,9 @@ namespace algorithm
_hs[n] = std::move(ret.value());
}
if(n == out.entities.size())
+ {
undo.dismiss();
+ }
}
if(n != out.entities.size())
{
@@ -172,12 +180,16 @@ namespace algorithm
if((d).steady)
{
if(std::chrono::steady_clock::now() >= (began_steady + std::chrono::nanoseconds((d).nsecs)))
+ {
return std::errc::timed_out;
+ }
}
else
{
if(std::chrono::system_clock::now() >= end_utc)
+ {
return std::errc::timed_out;
+ }
}
}
// Move was_contended to front and randomise rest of out.entities
@@ -187,14 +199,16 @@ namespace algorithm
QUICKCPPLIB_NAMESPACE::algorithm::small_prng::random_shuffle(front, out.entities.end());
// Sleep for a very short time
if(!spin_not_sleep)
+ {
std::this_thread::yield();
+ }
}
} while(n < out.entities.size());
return success();
}
public:
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(entities_type, unsigned long long) noexcept override final
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(entities_type /*entities*/, unsigned long long /*hint*/) noexcept final
{
AFIO_LOG_FUNCTION_CALL(this);
for(auto &i : _hs)
diff --git a/include/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp b/include/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
index 9c236b6e..23729493 100644
--- a/include/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
+++ b/include/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
@@ -101,8 +101,8 @@ namespace algorithm
private:
static constexpr size_t _container_entries = HashIndexSize / sizeof(spinlock_type);
using _hash_index_type = std::array<spinlock_type, _container_entries>;
- static constexpr file_handle::extent_type _initialisingoffset = (file_handle::extent_type) 1024 * 1024;
- static constexpr file_handle::extent_type _lockinuseoffset = (file_handle::extent_type) 1024 * 1024 + 1;
+ static constexpr file_handle::extent_type _initialisingoffset = static_cast<file_handle::extent_type>(1024) * 1024;
+ static constexpr file_handle::extent_type _lockinuseoffset = static_cast<file_handle::extent_type>(1024) * 1024 + 1;
file_handle _h, _temph;
file_handle::extent_guard _hlockinuse; // shared lock of last byte of _h marking if lock is in use
@@ -110,7 +110,7 @@ namespace algorithm
_hash_index_type &_index() const
{
- _hash_index_type *ret = (_hash_index_type *) _temphmap.address();
+ auto *ret = (_hash_index_type *) _temphmap.address();
return *ret;
}
@@ -136,7 +136,7 @@ namespace algorithm
new(this) memory_map(std::move(o));
return *this;
}
- ~memory_map()
+ ~memory_map() override
{
if(_h.is_valid())
{
@@ -197,11 +197,15 @@ namespace algorithm
if(lockinuse.has_error())
{
if(lockinuse.error() != std::errc::timed_out)
+ {
return lockinuse.error();
+ }
// Somebody else is also using this file, so try to read the hash index file I ought to use
lockinuse = ret.lock(_lockinuseoffset, 1, false); // inuse shared access, blocking
if(!lockinuse)
+ {
return lockinuse.error();
+ }
char buffer[65536];
memset(buffer, 0, sizeof(buffer));
OUTCOME_TRYV(ret.read(0, buffer, 65535));
@@ -223,36 +227,34 @@ namespace algorithm
OUTCOME_TRY(hmap, map_handle::map(hsection, 0, 0, section_handle::flag::read));
return memory_map(std::move(ret), std::move(temph), std::move(lockinuse.value()), std::move(hmap), std::move(temphmap));
}
- else
- {
- // I am the first person to be using this (stale?) file, so create a new hash index file in /tmp
- auto &tempdirh = path_discovery::memory_backed_temporary_files_directory().is_valid() ? path_discovery::memory_backed_temporary_files_directory() : path_discovery::storage_backed_temporary_files_directory();
- OUTCOME_TRY(_temph, file_handle::random_file(tempdirh));
- temph = std::move(_temph);
- // Truncate it out to the hash index size, and map it into memory for read/write access
- OUTCOME_TRYV(temph.truncate(HashIndexSize));
- OUTCOME_TRY(temphsection, section_handle::section(temph, HashIndexSize));
- OUTCOME_TRY(temphmap, map_handle::map(temphsection, HashIndexSize));
- // Write the path of my new hash index file, padding zeros to the nearest page size
- // multiple to work around a race condition in the Linux kernel
- OUTCOME_TRY(temppath, temph.current_path());
- char buffer[4096];
- memset(buffer, 0, sizeof(buffer));
- size_t bytes = temppath.native().size() * sizeof(*temppath.c_str());
- file_handle::const_buffer_type buffers[] = {{(const char *) temppath.c_str(), bytes}, {(const char *) buffer, 4096 - (bytes % 4096)}};
- OUTCOME_TRYV(ret.truncate(65536));
- OUTCOME_TRYV(ret.write({buffers, 0}));
- // Map for read the maximum possible path file size, again to avoid race problems
- OUTCOME_TRY(hsection, section_handle::section(ret, 65536, section_handle::flag::read));
- OUTCOME_TRY(hmap, map_handle::map(hsection, 0, 0, section_handle::flag::read));
- /* Take shared locks on inuse. Even if this implementation doesn't implement
- atomic downgrade of exclusive range to shared range, we're fully prepared for other users
- now. The _initialisingoffset remains exclusive to prevent double entry into this init routine.
- */
- OUTCOME_TRY(lockinuse2, ret.lock(_lockinuseoffset, 1, false));
- lockinuse = std::move(lockinuse2); // releases exclusive lock on all three offsets
- return memory_map(std::move(ret), std::move(temph), std::move(lockinuse.value()), std::move(hmap), std::move(temphmap));
- }
+
+ // I am the first person to be using this (stale?) file, so create a new hash index file in /tmp
+ auto &tempdirh = path_discovery::memory_backed_temporary_files_directory().is_valid() ? path_discovery::memory_backed_temporary_files_directory() : path_discovery::storage_backed_temporary_files_directory();
+ OUTCOME_TRY(_temph, file_handle::random_file(tempdirh));
+ temph = std::move(_temph);
+ // Truncate it out to the hash index size, and map it into memory for read/write access
+ OUTCOME_TRYV(temph.truncate(HashIndexSize));
+ OUTCOME_TRY(temphsection, section_handle::section(temph, HashIndexSize));
+ OUTCOME_TRY(temphmap, map_handle::map(temphsection, HashIndexSize));
+ // Write the path of my new hash index file, padding zeros to the nearest page size
+ // multiple to work around a race condition in the Linux kernel
+ OUTCOME_TRY(temppath, temph.current_path());
+ char buffer[4096];
+ memset(buffer, 0, sizeof(buffer));
+ size_t bytes = temppath.native().size() * sizeof(*temppath.c_str());
+ file_handle::const_buffer_type buffers[] = {{static_cast<const char *>(temppath.c_str()), bytes}, {(const char *) buffer, 4096 - (bytes % 4096)}};
+ OUTCOME_TRYV(ret.truncate(65536));
+ OUTCOME_TRYV(ret.write({buffers, 0}));
+ // Map for read the maximum possible path file size, again to avoid race problems
+ OUTCOME_TRY(hsection, section_handle::section(ret, 65536, section_handle::flag::read));
+ OUTCOME_TRY(hmap, map_handle::map(hsection, 0, 0, section_handle::flag::read));
+ /* Take shared locks on inuse. Even if this implementation doesn't implement
+ atomic downgrade of exclusive range to shared range, we're fully prepared for other users
+ now. The _initialisingoffset remains exclusive to prevent double entry into this init routine.
+ */
+ OUTCOME_TRY(lockinuse2, ret.lock(_lockinuseoffset, 1, false));
+ lockinuse = std::move(lockinuse2); // releases exclusive lock on all three offsets
+ return memory_map(std::move(ret), std::move(temph), std::move(lockinuse.value()), std::move(hmap), std::move(temphmap));
}
catch(...)
{
@@ -283,16 +285,20 @@ namespace algorithm
if(entity_to_idx[m].value == ep->value)
{
if(ep->exclusive && !entity_to_idx[m].exclusive)
+ {
entity_to_idx[m].exclusive = true;
+ }
skip = true;
}
}
if(!skip)
+ {
++ep;
+ }
}
return span<_entity_idx>(entity_to_idx, ep - entity_to_idx);
}
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC result<void> _lock(entities_guard &out, deadline d, bool spin_not_sleep) noexcept override final
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC result<void> _lock(entities_guard &out, deadline d, bool spin_not_sleep) noexcept final
{
AFIO_LOG_FUNCTION_CALL(this);
std::chrono::steady_clock::time_point began_steady;
@@ -300,9 +306,13 @@ namespace algorithm
if(d)
{
if((d).steady)
+ {
began_steady = std::chrono::steady_clock::now();
+ }
else
+ {
end_utc = (d).to_time_point();
+ }
}
// alloca() always returns 16 byte aligned addresses
span<_entity_idx> entity_to_idx(_hash_entities((_entity_idx *) alloca(sizeof(_entity_idx) * out.entities.size()), out.entities));
@@ -312,7 +322,7 @@ namespace algorithm
size_t n;
for(;;)
{
- size_t was_contended = (size_t) -1;
+ auto was_contended = static_cast<size_t>(-1);
{
auto undo = undoer([&] {
// 0 to (n-1) need to be closed
@@ -321,7 +331,9 @@ namespace algorithm
--n;
// Now 0 to n needs to be closed
for(; n > 0; n--)
+ {
entity_to_idx[n].exclusive ? index[entity_to_idx[n].value].unlock() : index[entity_to_idx[n].value].unlock_shared();
+ }
entity_to_idx[0].exclusive ? index[entity_to_idx[0].value].unlock() : index[entity_to_idx[0].value].unlock_shared();
}
});
@@ -344,12 +356,16 @@ namespace algorithm
if((d).steady)
{
if(std::chrono::steady_clock::now() >= (began_steady + std::chrono::nanoseconds((d).nsecs)))
+ {
return std::errc::timed_out;
+ }
}
else
{
if(std::chrono::system_clock::now() >= end_utc)
+ {
return std::errc::timed_out;
+ }
}
}
// Move was_contended to front and randomise rest of out.entities
@@ -358,13 +374,15 @@ namespace algorithm
++front;
QUICKCPPLIB_NAMESPACE::algorithm::small_prng::random_shuffle(front, entity_to_idx.end());
if(!spin_not_sleep)
+ {
std::this_thread::yield();
+ }
}
// return success();
}
public:
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(entities_type entities, unsigned long long /*unused*/) noexcept override final
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(entities_type entities, unsigned long long /*unused*/) noexcept final
{
AFIO_LOG_FUNCTION_CALL(this);
span<_entity_idx> entity_to_idx(_hash_entities((_entity_idx *) alloca(sizeof(_entity_idx) * entities.size()), entities));
diff --git a/include/afio/v2.0/algorithm/shared_fs_mutex/safe_byte_ranges.hpp b/include/afio/v2.0/algorithm/shared_fs_mutex/safe_byte_ranges.hpp
index aeb4716d..7c679c3b 100644
--- a/include/afio/v2.0/algorithm/shared_fs_mutex/safe_byte_ranges.hpp
+++ b/include/afio/v2.0/algorithm/shared_fs_mutex/safe_byte_ranges.hpp
@@ -123,7 +123,7 @@ namespace algorithm
{
std::shared_ptr<shared_fs_mutex> _p;
- safe_byte_ranges(std::shared_ptr<shared_fs_mutex> p)
+ explicit safe_byte_ranges(std::shared_ptr<shared_fs_mutex> p)
: _p(std::move(p))
{
}
@@ -154,10 +154,10 @@ namespace algorithm
}
protected:
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC result<void> _lock(entities_guard &out, deadline d, bool spin_not_sleep) noexcept override final { return _p->_lock(out, d, spin_not_sleep); }
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC result<void> _lock(entities_guard &out, deadline d, bool spin_not_sleep) noexcept final { return _p->_lock(out, d, spin_not_sleep); }
public:
- AFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(entities_type entities, unsigned long long hint) noexcept override final { return _p->unlock(entities, hint); }
+ AFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(entities_type entities, unsigned long long hint) noexcept final { return _p->unlock(entities, hint); }
};
} // namespace shared_fs_mutex
diff --git a/include/afio/v2.0/algorithm/trivial_vector.hpp b/include/afio/v2.0/algorithm/trivial_vector.hpp
index 01f57aab..a96c274f 100644
--- a/include/afio/v2.0/algorithm/trivial_vector.hpp
+++ b/include/afio/v2.0/algorithm/trivial_vector.hpp
@@ -102,7 +102,7 @@ namespace algorithm
return *this;
}
//! Decrement
- trivial_vector_iterator operator--(int /*unused*/)
+ const trivial_vector_iterator operator--(int /*unused*/)
{
trivial_vector_iterator ret(*this);
_v--;
@@ -116,7 +116,7 @@ namespace algorithm
return *this;
}
//! Increment
- trivial_vector_iterator operator++(int /*unused*/)
+ const trivial_vector_iterator operator++(int /*unused*/)
{
trivial_vector_iterator ret(*this);
_v++;
@@ -243,14 +243,18 @@ namespace algorithm
reference at(size_type i)
{
if(i >= size())
+ {
throw std::out_of_range("bounds exceeded");
+ }
return _begin[i];
}
//! Item index, bounds checked
const_reference at(size_type i) const
{
if(i >= size())
+ {
throw std::out_of_range("bounds exceeded");
+ }
return _begin[i];
}
//! Item index, unchecked
@@ -300,12 +304,14 @@ namespace algorithm
//! Items in container
size_type size() const noexcept { return _end - _begin; }
//! Maximum items in container
- size_type max_size() const noexcept { return (map_handle::size_type) -1 / sizeof(T); }
+ size_type max_size() const noexcept { return static_cast<map_handle::size_type>(-1) / sizeof(T); }
//! Increase capacity
void reserve(size_type n)
{
if(n > max_size())
+ {
throw std::length_error("Max size exceeded");
+ }
size_type current_size = size();
size_type bytes = n * sizeof(value_type);
bytes = utils::round_up_to_page_size(bytes);
@@ -485,7 +491,7 @@ namespace algorithm
{
reserve(count);
}
- // TODO: Kinda assuming the compiler will do the right thing below, should really check that
+ // TODO(ned): Kinda assuming the compiler will do the right thing below, should really check that
while(count > size())
{
emplace_back(v);