Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/windirstat/llfio.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'include/llfio/v2.0/algorithm')
-rw-r--r--include/llfio/v2.0/algorithm/handle_adapter/combining.hpp73
-rw-r--r--include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp10
-rw-r--r--include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp8
-rw-r--r--include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp8
4 files changed, 50 insertions, 49 deletions
diff --git a/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp b/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp
index 5c794503..bf2661eb 100644
--- a/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp
+++ b/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp
@@ -305,37 +305,63 @@ namespace algorithm
}
return std::move(reqs.buffers);
}
+ };
+ template <template <class, class> class Op, class Target, class Source> class combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, true> : public combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, false>
+ {
+ using _base = combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, false>;
+
+ protected:
+ static constexpr bool _have_source = _base::_have_source;
+
+ public:
+ using path_type = io_handle::path_type;
+ using extent_type = io_handle::extent_type;
+ using size_type = io_handle::size_type;
+ using mode = io_handle::mode;
+ using creation = io_handle::creation;
+ using caching = io_handle::caching;
+ using flag = io_handle::flag;
+ using buffer_type = io_handle::buffer_type;
+ using const_buffer_type = io_handle::const_buffer_type;
+ using buffers_type = io_handle::buffers_type;
+ using const_buffers_type = io_handle::const_buffers_type;
+ template <class T> using io_request = io_handle::io_request<T>;
+ template <class T> using io_result = io_handle::io_result<T>;
+
+ combining_handle_adapter_base() = default;
+ using _base::_base;
- using extent_guard = typename Base::extent_guard;
+ using lock_kind = typename _base::lock_kind;
+ using extent_guard = typename _base::extent_guard;
private:
struct _extent_guard : public extent_guard
{
friend class combining_handle_adapter;
_extent_guard() = default;
- constexpr _extent_guard(io_handle *h, extent_type offset, extent_type length, bool exclusive)
- : extent_guard(h, offset, length, exclusive)
+ constexpr _extent_guard(file_handle *h, extent_type offset, extent_type length, lock_kind kind)
+ : extent_guard(h, offset, length, kind)
{
}
};
public:
//! \brief Lock the given extent in one or both of the attached handles. Any second handle is always locked for shared.
- LLFIO_HEADERS_ONLY_VIRTUAL_SPEC result<extent_guard> lock(extent_type offset, extent_type bytes, bool exclusive = true, deadline d = deadline()) noexcept override
+ LLFIO_HEADERS_ONLY_VIRTUAL_SPEC result<extent_guard> lock_range(extent_type offset, extent_type bytes, lock_kind kind, deadline d = deadline()) noexcept override
{
optional<result<extent_guard>> _locks[2];
#if !defined(LLFIO_DISABLE_OPENMP) && defined(_OPENMP)
-#pragma omp parallel for if(_have_source && (_flags &flag::disable_parallelism) == 0)
+#pragma omp parallel for if(_have_source && (_flags & flag::disable_parallelism) == 0)
#endif
for(size_t n = 0; n < 2; n++)
{
if(n == 0)
{
- _locks[n] = _target->lock(offset, bytes, exclusive, d);
+ _locks[n] = this->_target->lock_range(offset, bytes, kind, d);
}
else if(_have_source)
{
- _locks[n] = _source->lock(offset, bytes, false, d);
+ _locks[n] = this->_source->lock_range(offset, bytes, lock_kind::shared, d);
}
}
// Handle any errors
@@ -348,42 +374,17 @@ namespace algorithm
OUTCOME_TRY(_, std::move(*_locks[1]));
_.release();
}
- return _extent_guard(this, offset, bytes, exclusive);
+ return _extent_guard(this, offset, bytes, kind);
}
//! \brief Unlock the given extent in one or both of the attached handles.
- LLFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(extent_type offset, extent_type bytes) noexcept override
+ LLFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock_range(extent_type offset, extent_type bytes) noexcept override
{
- _target->unlock(offset, bytes);
+ this->_target->unlock_range(offset, bytes);
if(_have_source)
{
- _source->unlock(offset, bytes);
+ this->_source->unlock_range(offset, bytes);
}
}
- };
- template <template <class, class> class Op, class Target, class Source> class combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, true> : public combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, false>
- {
- using _base = combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, false>;
-
- protected:
- static constexpr bool _have_source = _base::_have_source;
-
- public:
- using path_type = io_handle::path_type;
- using extent_type = io_handle::extent_type;
- using size_type = io_handle::size_type;
- using mode = io_handle::mode;
- using creation = io_handle::creation;
- using caching = io_handle::caching;
- using flag = io_handle::flag;
- using buffer_type = io_handle::buffer_type;
- using const_buffer_type = io_handle::const_buffer_type;
- using buffers_type = io_handle::buffers_type;
- using const_buffers_type = io_handle::const_buffers_type;
- template <class T> using io_request = io_handle::io_request<T>;
- template <class T> using io_result = io_handle::io_result<T>;
-
- combining_handle_adapter_base() = default;
- using _base::_base;
//! \brief Return the lesser of one or both of the attached handles
LLFIO_HEADERS_ONLY_VIRTUAL_SPEC result<extent_type> maximum_extent() const noexcept override
diff --git a/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp b/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
index 9a1811b5..d8d0f354 100644
--- a/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
+++ b/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
@@ -206,7 +206,7 @@ namespace algorithm
OUTCOME_TRY(ret, file_handle::file(base, lockfile, file_handle::mode::write, file_handle::creation::if_needed, file_handle::caching::temporary));
atomic_append_detail::header header;
// Lock the entire header for exclusive access
- auto lockresult = ret.try_lock(0, sizeof(header), true);
+ auto lockresult = ret.try_lock_range(0, sizeof(header), file_handle::lock_kind::exclusive);
//! \todo fs_mutex_append needs to check if file still exists after lock is granted, awaiting path fetching.
if(lockresult.has_error())
{
@@ -231,7 +231,7 @@ namespace algorithm
OUTCOME_TRYV(ret.write(0, {{reinterpret_cast<byte *>(&header), sizeof(header)}}));
}
// Open a shared lock on last byte in header to prevent other users zomping the file
- OUTCOME_TRY(guard, ret.lock(sizeof(header) - 1, 1, false));
+ OUTCOME_TRY(guard, ret.lock_range(sizeof(header) - 1, 1, file_handle::lock_kind::shared));
// Unlock any exclusive lock I gained earlier now
if(lockresult)
{
@@ -292,7 +292,7 @@ namespace algorithm
auto lastbyte = static_cast<file_handle::extent_type>(-1);
// Lock up to the beginning of the shadow lock space
lastbyte &= ~(1ULL << 63U);
- OUTCOME_TRY(append_guard_, _h.lock(my_lock_request_offset, lastbyte, true));
+ OUTCOME_TRY(append_guard_, _h.lock_range(my_lock_request_offset, lastbyte, file_handle::lock_kind::exclusive));
append_guard = std::move(append_guard_);
}
OUTCOME_TRYV(_h.write(0, {{reinterpret_cast<byte *>(&lock_request), sizeof(lock_request)}}));
@@ -338,7 +338,7 @@ namespace algorithm
auto lock_offset = my_lock_request_offset;
// Set the top bit to use the shadow lock space on Windows
lock_offset |= (1ULL << 63U);
- OUTCOME_TRY(my_request_guard_, _h.lock(lock_offset, sizeof(lock_request), true));
+ OUTCOME_TRY(my_request_guard_, _h.lock_range(lock_offset, sizeof(lock_request), file_handle::lock_kind::exclusive));
my_request_guard = std::move(my_request_guard_);
}
@@ -446,7 +446,7 @@ namespace algorithm
auto lock_offset = record_offset;
// Set the top bit to use the shadow lock space on Windows
lock_offset |= (1ULL << 63U);
- OUTCOME_TRYV(_h.lock(lock_offset, sizeof(*record), false, nd));
+ OUTCOME_TRYV(_h.lock_range(lock_offset, sizeof(*record), file_handle::lock_kind::shared, nd));
}
// Make sure we haven't timed out during this wait
if(d)
diff --git a/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp b/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
index cb42ce05..7e952bac 100644
--- a/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
+++ b/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
@@ -143,9 +143,9 @@ namespace algorithm
// Now 0 to n needs to be closed
for(; n > 0; n--)
{
- _h.unlock(out.entities[n].value, 1);
+ _h.unlock_range(out.entities[n].value, 1);
}
- _h.unlock(out.entities[0].value, 1);
+ _h.unlock_range(out.entities[0].value, 1);
}
});
for(n = 0; n < out.entities.size(); n++)
@@ -179,7 +179,7 @@ namespace algorithm
}
}
}
- auto outcome = _h.lock(out.entities[n].value, 1, out.entities[n].exclusive != 0u, nd);
+ auto outcome = _h.lock_range(out.entities[n].value, 1, (out.entities[n].exclusive != 0u) ? file_handle::lock_kind::exclusive : file_handle::lock_kind::shared, nd);
if(!outcome)
{
was_contended = n;
@@ -229,7 +229,7 @@ namespace algorithm
LLFIO_LOG_FUNCTION_CALL(this);
for(const auto &i : entities)
{
- _h.unlock(i.value, 1);
+ _h.unlock_range(i.value, 1);
}
}
};
diff --git a/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp b/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
index 9b8c6941..c16202ee 100644
--- a/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
+++ b/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
@@ -148,7 +148,7 @@ namespace algorithm
_temphmap = {};
// Release my shared locks and try locking inuse exclusively
_hlockinuse.unlock();
- auto lockresult = _h.try_lock(_initialisingoffset, 2, true);
+ auto lockresult = _h.try_lock_range(_initialisingoffset, 2, file_handle::lock_kind::exclusive);
#ifndef NDEBUG
if(!lockresult && lockresult.error() != errc::timed_out)
{
@@ -202,7 +202,7 @@ namespace algorithm
OUTCOME_TRY(ret, file_handle::file(base, lockfile, file_handle::mode::write, file_handle::creation::if_needed, file_handle::caching::reads));
file_handle temph;
// Am I the first person to this file? Lock everything exclusively
- auto lockinuse = ret.try_lock(_initialisingoffset, 2, true);
+ auto lockinuse = ret.try_lock_range(_initialisingoffset, 2, file_handle::lock_kind::exclusive);
if(lockinuse.has_error())
{
if(lockinuse.error() != errc::timed_out)
@@ -210,7 +210,7 @@ namespace algorithm
return std::move(lockinuse).error();
}
// Somebody else is also using this file, so try to read the hash index file I ought to use
- lockinuse = ret.lock(_lockinuseoffset, 1, false); // inuse shared access, blocking
+ lockinuse = ret.lock_range(_lockinuseoffset, 1, file_handle::lock_kind::shared); // inuse shared access, blocking
if(!lockinuse)
{
return std::move(lockinuse).error();
@@ -261,7 +261,7 @@ namespace algorithm
atomic downgrade of exclusive range to shared range, we're fully prepared for other users
now. The _initialisingoffset remains exclusive to prevent double entry into this init routine.
*/
- OUTCOME_TRY(lockinuse2, ret.lock(_lockinuseoffset, 1, false));
+ OUTCOME_TRY(lockinuse2, ret.lock_range(_lockinuseoffset, 1, file_handle::lock_kind::shared));
lockinuse = std::move(lockinuse2); // releases exclusive lock on all three offsets
return memory_map(std::move(ret), std::move(temph), std::move(lockinuse.value()), std::move(hmap), std::move(temphmap));
}