Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/windirstat/llfio.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNiall Douglas (s [underscore] sourceforge {at} nedprod [dot] com) <spamtrap@nedprod.com>2019-11-18 14:14:56 +0300
committerNiall Douglas (s [underscore] sourceforge {at} nedprod [dot] com) <spamtrap@nedprod.com>2019-11-18 14:14:56 +0300
commite66a774d599804a31ebbb701874ca2dde5e36e10 (patch)
tree6630d2fbbf2b9634190deea9c3f9a51e401d4023
parentb6c21fdf2dcf0f7c72e117cfc8396fdbb9d5973c (diff)
As per WG21 guidance, moved byte range locking API out of io_handle, and into an extension to file_handle. This makes space to make a future lockable_io_handle to model SharedMutex, which locks an inode like a std::shared_mutex. Note that the new .lock_range() API no longer does special semantics if you pass in zero length to lock the whole file.develop
-rw-r--r--include/llfio/revision.hpp6
-rw-r--r--include/llfio/v2.0/algorithm/handle_adapter/combining.hpp73
-rw-r--r--include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp10
-rw-r--r--include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp8
-rw-r--r--include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp8
-rw-r--r--include/llfio/v2.0/detail/impl/posix/file_handle.ipp124
-rw-r--r--include/llfio/v2.0/detail/impl/posix/io_handle.ipp121
-rw-r--r--include/llfio/v2.0/detail/impl/safe_byte_ranges.ipp12
-rw-r--r--include/llfio/v2.0/detail/impl/windows/file_handle.ipp84
-rw-r--r--include/llfio/v2.0/detail/impl/windows/io_handle.ipp84
-rw-r--r--include/llfio/v2.0/fast_random_file_handle.hpp6
-rw-r--r--include/llfio/v2.0/file_handle.hpp166
-rw-r--r--include/llfio/v2.0/handle.hpp2
-rw-r--r--include/llfio/v2.0/io_handle.hpp161
-rw-r--r--test/tests/file_handle_lock_unlock.cpp12
15 files changed, 443 insertions, 434 deletions
diff --git a/include/llfio/revision.hpp b/include/llfio/revision.hpp
index e13e53cd..f21c9150 100644
--- a/include/llfio/revision.hpp
+++ b/include/llfio/revision.hpp
@@ -1,4 +1,4 @@
// Note the second line of this file must ALWAYS be the git SHA, third line ALWAYS the git SHA update time
-#define LLFIO_PREVIOUS_COMMIT_REF 1ebceb7e076b3d6d0f51f185f94d65493e26f192
-#define LLFIO_PREVIOUS_COMMIT_DATE "2019-10-19 18:01:38 +00:00"
-#define LLFIO_PREVIOUS_COMMIT_UNIQUE 1ebceb7e
+#define LLFIO_PREVIOUS_COMMIT_REF b6c21fdf2dcf0f7c72e117cfc8396fdbb9d5973c
+#define LLFIO_PREVIOUS_COMMIT_DATE "2019-11-15 17:29:01 +00:00"
+#define LLFIO_PREVIOUS_COMMIT_UNIQUE b6c21fdf
diff --git a/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp b/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp
index 5c794503..bf2661eb 100644
--- a/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp
+++ b/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp
@@ -305,37 +305,63 @@ namespace algorithm
}
return std::move(reqs.buffers);
}
+ };
+ template <template <class, class> class Op, class Target, class Source> class combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, true> : public combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, false>
+ {
+ using _base = combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, false>;
+
+ protected:
+ static constexpr bool _have_source = _base::_have_source;
+
+ public:
+ using path_type = io_handle::path_type;
+ using extent_type = io_handle::extent_type;
+ using size_type = io_handle::size_type;
+ using mode = io_handle::mode;
+ using creation = io_handle::creation;
+ using caching = io_handle::caching;
+ using flag = io_handle::flag;
+ using buffer_type = io_handle::buffer_type;
+ using const_buffer_type = io_handle::const_buffer_type;
+ using buffers_type = io_handle::buffers_type;
+ using const_buffers_type = io_handle::const_buffers_type;
+ template <class T> using io_request = io_handle::io_request<T>;
+ template <class T> using io_result = io_handle::io_result<T>;
+
+ combining_handle_adapter_base() = default;
+ using _base::_base;
- using extent_guard = typename Base::extent_guard;
+ using lock_kind = typename _base::lock_kind;
+ using extent_guard = typename _base::extent_guard;
private:
struct _extent_guard : public extent_guard
{
friend class combining_handle_adapter;
_extent_guard() = default;
- constexpr _extent_guard(io_handle *h, extent_type offset, extent_type length, bool exclusive)
- : extent_guard(h, offset, length, exclusive)
+ constexpr _extent_guard(file_handle *h, extent_type offset, extent_type length, lock_kind kind)
+ : extent_guard(h, offset, length, kind)
{
}
};
public:
//! \brief Lock the given extent in one or both of the attached handles. Any second handle is always locked for shared.
- LLFIO_HEADERS_ONLY_VIRTUAL_SPEC result<extent_guard> lock(extent_type offset, extent_type bytes, bool exclusive = true, deadline d = deadline()) noexcept override
+ LLFIO_HEADERS_ONLY_VIRTUAL_SPEC result<extent_guard> lock_range(extent_type offset, extent_type bytes, lock_kind kind, deadline d = deadline()) noexcept override
{
optional<result<extent_guard>> _locks[2];
#if !defined(LLFIO_DISABLE_OPENMP) && defined(_OPENMP)
-#pragma omp parallel for if(_have_source && (_flags &flag::disable_parallelism) == 0)
+#pragma omp parallel for if(_have_source && (_flags & flag::disable_parallelism) == 0)
#endif
for(size_t n = 0; n < 2; n++)
{
if(n == 0)
{
- _locks[n] = _target->lock(offset, bytes, exclusive, d);
+ _locks[n] = this->_target->lock_range(offset, bytes, kind, d);
}
else if(_have_source)
{
- _locks[n] = _source->lock(offset, bytes, false, d);
+ _locks[n] = this->_source->lock_range(offset, bytes, lock_kind::shared, d);
}
}
// Handle any errors
@@ -348,42 +374,17 @@ namespace algorithm
OUTCOME_TRY(_, std::move(*_locks[1]));
_.release();
}
- return _extent_guard(this, offset, bytes, exclusive);
+ return _extent_guard(this, offset, bytes, kind);
}
//! \brief Unlock the given extent in one or both of the attached handles.
- LLFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(extent_type offset, extent_type bytes) noexcept override
+ LLFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock_range(extent_type offset, extent_type bytes) noexcept override
{
- _target->unlock(offset, bytes);
+ this->_target->unlock_range(offset, bytes);
if(_have_source)
{
- _source->unlock(offset, bytes);
+ this->_source->unlock_range(offset, bytes);
}
}
- };
- template <template <class, class> class Op, class Target, class Source> class combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, true> : public combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, false>
- {
- using _base = combining_handle_adapter_base<Op, Target, Source, file_handle_wrapper, false>;
-
- protected:
- static constexpr bool _have_source = _base::_have_source;
-
- public:
- using path_type = io_handle::path_type;
- using extent_type = io_handle::extent_type;
- using size_type = io_handle::size_type;
- using mode = io_handle::mode;
- using creation = io_handle::creation;
- using caching = io_handle::caching;
- using flag = io_handle::flag;
- using buffer_type = io_handle::buffer_type;
- using const_buffer_type = io_handle::const_buffer_type;
- using buffers_type = io_handle::buffers_type;
- using const_buffers_type = io_handle::const_buffers_type;
- template <class T> using io_request = io_handle::io_request<T>;
- template <class T> using io_result = io_handle::io_result<T>;
-
- combining_handle_adapter_base() = default;
- using _base::_base;
//! \brief Return the lesser of one or both of the attached handles
LLFIO_HEADERS_ONLY_VIRTUAL_SPEC result<extent_type> maximum_extent() const noexcept override
diff --git a/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp b/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
index 9a1811b5..d8d0f354 100644
--- a/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
+++ b/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
@@ -206,7 +206,7 @@ namespace algorithm
OUTCOME_TRY(ret, file_handle::file(base, lockfile, file_handle::mode::write, file_handle::creation::if_needed, file_handle::caching::temporary));
atomic_append_detail::header header;
// Lock the entire header for exclusive access
- auto lockresult = ret.try_lock(0, sizeof(header), true);
+ auto lockresult = ret.try_lock_range(0, sizeof(header), file_handle::lock_kind::exclusive);
//! \todo fs_mutex_append needs to check if file still exists after lock is granted, awaiting path fetching.
if(lockresult.has_error())
{
@@ -231,7 +231,7 @@ namespace algorithm
OUTCOME_TRYV(ret.write(0, {{reinterpret_cast<byte *>(&header), sizeof(header)}}));
}
// Open a shared lock on last byte in header to prevent other users zomping the file
- OUTCOME_TRY(guard, ret.lock(sizeof(header) - 1, 1, false));
+ OUTCOME_TRY(guard, ret.lock_range(sizeof(header) - 1, 1, file_handle::lock_kind::shared));
// Unlock any exclusive lock I gained earlier now
if(lockresult)
{
@@ -292,7 +292,7 @@ namespace algorithm
auto lastbyte = static_cast<file_handle::extent_type>(-1);
// Lock up to the beginning of the shadow lock space
lastbyte &= ~(1ULL << 63U);
- OUTCOME_TRY(append_guard_, _h.lock(my_lock_request_offset, lastbyte, true));
+ OUTCOME_TRY(append_guard_, _h.lock_range(my_lock_request_offset, lastbyte, file_handle::lock_kind::exclusive));
append_guard = std::move(append_guard_);
}
OUTCOME_TRYV(_h.write(0, {{reinterpret_cast<byte *>(&lock_request), sizeof(lock_request)}}));
@@ -338,7 +338,7 @@ namespace algorithm
auto lock_offset = my_lock_request_offset;
// Set the top bit to use the shadow lock space on Windows
lock_offset |= (1ULL << 63U);
- OUTCOME_TRY(my_request_guard_, _h.lock(lock_offset, sizeof(lock_request), true));
+ OUTCOME_TRY(my_request_guard_, _h.lock_range(lock_offset, sizeof(lock_request), file_handle::lock_kind::exclusive));
my_request_guard = std::move(my_request_guard_);
}
@@ -446,7 +446,7 @@ namespace algorithm
auto lock_offset = record_offset;
// Set the top bit to use the shadow lock space on Windows
lock_offset |= (1ULL << 63U);
- OUTCOME_TRYV(_h.lock(lock_offset, sizeof(*record), false, nd));
+ OUTCOME_TRYV(_h.lock_range(lock_offset, sizeof(*record), file_handle::lock_kind::shared, nd));
}
// Make sure we haven't timed out during this wait
if(d)
diff --git a/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp b/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
index cb42ce05..7e952bac 100644
--- a/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
+++ b/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
@@ -143,9 +143,9 @@ namespace algorithm
// Now 0 to n needs to be closed
for(; n > 0; n--)
{
- _h.unlock(out.entities[n].value, 1);
+ _h.unlock_range(out.entities[n].value, 1);
}
- _h.unlock(out.entities[0].value, 1);
+ _h.unlock_range(out.entities[0].value, 1);
}
});
for(n = 0; n < out.entities.size(); n++)
@@ -179,7 +179,7 @@ namespace algorithm
}
}
}
- auto outcome = _h.lock(out.entities[n].value, 1, out.entities[n].exclusive != 0u, nd);
+ auto outcome = _h.lock_range(out.entities[n].value, 1, (out.entities[n].exclusive != 0u) ? file_handle::lock_kind::exclusive : file_handle::lock_kind::shared, nd);
if(!outcome)
{
was_contended = n;
@@ -229,7 +229,7 @@ namespace algorithm
LLFIO_LOG_FUNCTION_CALL(this);
for(const auto &i : entities)
{
- _h.unlock(i.value, 1);
+ _h.unlock_range(i.value, 1);
}
}
};
diff --git a/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp b/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
index 9b8c6941..c16202ee 100644
--- a/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
+++ b/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
@@ -148,7 +148,7 @@ namespace algorithm
_temphmap = {};
// Release my shared locks and try locking inuse exclusively
_hlockinuse.unlock();
- auto lockresult = _h.try_lock(_initialisingoffset, 2, true);
+ auto lockresult = _h.try_lock_range(_initialisingoffset, 2, file_handle::lock_kind::exclusive);
#ifndef NDEBUG
if(!lockresult && lockresult.error() != errc::timed_out)
{
@@ -202,7 +202,7 @@ namespace algorithm
OUTCOME_TRY(ret, file_handle::file(base, lockfile, file_handle::mode::write, file_handle::creation::if_needed, file_handle::caching::reads));
file_handle temph;
// Am I the first person to this file? Lock everything exclusively
- auto lockinuse = ret.try_lock(_initialisingoffset, 2, true);
+ auto lockinuse = ret.try_lock_range(_initialisingoffset, 2, file_handle::lock_kind::exclusive);
if(lockinuse.has_error())
{
if(lockinuse.error() != errc::timed_out)
@@ -210,7 +210,7 @@ namespace algorithm
return std::move(lockinuse).error();
}
// Somebody else is also using this file, so try to read the hash index file I ought to use
- lockinuse = ret.lock(_lockinuseoffset, 1, false); // inuse shared access, blocking
+ lockinuse = ret.lock_range(_lockinuseoffset, 1, file_handle::lock_kind::shared); // inuse shared access, blocking
if(!lockinuse)
{
return std::move(lockinuse).error();
@@ -261,7 +261,7 @@ namespace algorithm
atomic downgrade of exclusive range to shared range, we're fully prepared for other users
now. The _initialisingoffset remains exclusive to prevent double entry into this init routine.
*/
- OUTCOME_TRY(lockinuse2, ret.lock(_lockinuseoffset, 1, false));
+ OUTCOME_TRY(lockinuse2, ret.lock_range(_lockinuseoffset, 1, file_handle::lock_kind::shared));
lockinuse = std::move(lockinuse2); // releases exclusive lock on all three offsets
return memory_map(std::move(ret), std::move(temph), std::move(lockinuse.value()), std::move(hmap), std::move(temphmap));
}
diff --git a/include/llfio/v2.0/detail/impl/posix/file_handle.ipp b/include/llfio/v2.0/detail/impl/posix/file_handle.ipp
index 7a0efa72..a0f0d52d 100644
--- a/include/llfio/v2.0/detail/impl/posix/file_handle.ipp
+++ b/include/llfio/v2.0/detail/impl/posix/file_handle.ipp
@@ -357,6 +357,130 @@ result<file_handle> file_handle::clone(mode mode_, caching caching_, deadline d)
}
}
+#if 0
+#if !defined(__linux__) && !defined(F_OFD_SETLK)
+ if(0 == bytes)
+ {
+ // Non-Linux has a sane locking system in flock() if you are willing to lock the entire file
+ int operation = ((d && !d.nsecs) ? LOCK_NB : 0) | ((kind != file_handle::lock_kind::shared) ? LOCK_EX : LOCK_SH);
+ if(-1 == flock(_v.fd, operation))
+ failed = true;
+ }
+ else
+#endif
+
+#if !defined(__linux__) && !defined(F_OFD_SETLK)
+ if(0 == bytes)
+ {
+ if(-1 == flock(_v.fd, LOCK_UN))
+ failed = true;
+ }
+ else
+#endif
+#endif
+
+result<file_handle::extent_guard> file_handle::lock_range(io_handle::extent_type offset, io_handle::extent_type bytes, file_handle::lock_kind kind, deadline d) noexcept
+{
+ LLFIO_LOG_FUNCTION_CALL(this);
+ if(d && d.nsecs > 0)
+ {
+ return errc::not_supported;
+ }
+ bool failed = false;
+ {
+ struct flock fl
+ {
+ };
+ memset(&fl, 0, sizeof(fl));
+ fl.l_type = (kind != file_handle::lock_kind::shared) ? F_WRLCK : F_RDLCK;
+ constexpr extent_type extent_topbit = static_cast<extent_type>(1) << (8 * sizeof(extent_type) - 1);
+ if((offset & extent_topbit) != 0u)
+ {
+ LLFIO_LOG_WARN(_v.fd, "file_handle::lock() called with offset with top bit set, masking out");
+ }
+ if((bytes & extent_topbit) != 0u)
+ {
+ LLFIO_LOG_WARN(_v.fd, "file_handle::lock() called with bytes with top bit set, masking out");
+ }
+ fl.l_whence = SEEK_SET;
+ fl.l_start = offset & ~extent_topbit;
+ fl.l_len = bytes & ~extent_topbit;
+#ifdef F_OFD_SETLK
+ if(-1 == fcntl(_v.fd, (d && !d.nsecs) ? F_OFD_SETLK : F_OFD_SETLKW, &fl))
+ {
+ if(EINVAL == errno) // OFD locks not supported on this kernel
+ {
+ if(-1 == fcntl(_v.fd, (d && !d.nsecs) ? F_SETLK : F_SETLKW, &fl))
+ failed = true;
+ else
+ _flags |= flag::byte_lock_insanity;
+ }
+ else
+ failed = true;
+ }
+#else
+ if(-1 == fcntl(_v.fd, (d && (d.nsecs == 0u)) ? F_SETLK : F_SETLKW, &fl))
+ {
+ failed = true;
+ }
+ else
+ {
+ _flags |= flag::byte_lock_insanity;
+ }
+#endif
+ }
+ if(failed)
+ {
+ if(d && (d.nsecs == 0u) && (EACCES == errno || EAGAIN == errno || EWOULDBLOCK == errno))
+ {
+ return errc::timed_out;
+ }
+ return posix_error();
+ }
+ return extent_guard(this, offset, bytes, kind);
+}
+
+void file_handle::unlock_range(io_handle::extent_type offset, io_handle::extent_type bytes) noexcept
+{
+ LLFIO_LOG_FUNCTION_CALL(this);
+ bool failed = false;
+ {
+ struct flock fl
+ {
+ };
+ memset(&fl, 0, sizeof(fl));
+ fl.l_type = F_UNLCK;
+ constexpr extent_type extent_topbit = static_cast<extent_type>(1) << (8 * sizeof(extent_type) - 1);
+ fl.l_whence = SEEK_SET;
+ fl.l_start = offset & ~extent_topbit;
+ fl.l_len = bytes & ~extent_topbit;
+#ifdef F_OFD_SETLK
+ if(-1 == fcntl(_v.fd, F_OFD_SETLK, &fl))
+ {
+ if(EINVAL == errno) // OFD locks not supported on this kernel
+ {
+ if(-1 == fcntl(_v.fd, F_SETLK, &fl))
+ failed = true;
+ }
+ else
+ failed = true;
+ }
+#else
+ if(-1 == fcntl(_v.fd, F_SETLK, &fl))
+ {
+ failed = true;
+ }
+#endif
+ }
+ if(failed)
+ {
+ auto ret(posix_error());
+ (void) ret;
+ LLFIO_LOG_FATAL(_v.fd, "io_handle::unlock() failed");
+ std::terminate();
+ }
+}
+
result<file_handle::extent_type> file_handle::maximum_extent() const noexcept
{
LLFIO_LOG_FUNCTION_CALL(this);
diff --git a/include/llfio/v2.0/detail/impl/posix/io_handle.ipp b/include/llfio/v2.0/detail/impl/posix/io_handle.ipp
index 37a082b2..ddd0ba2e 100644
--- a/include/llfio/v2.0/detail/impl/posix/io_handle.ipp
+++ b/include/llfio/v2.0/detail/impl/posix/io_handle.ipp
@@ -192,125 +192,4 @@ io_handle::io_result<io_handle::const_buffers_type> io_handle::write(io_handle::
return {reqs.buffers};
}
-result<io_handle::extent_guard> io_handle::lock(io_handle::extent_type offset, io_handle::extent_type bytes, bool exclusive, deadline d) noexcept
-{
- LLFIO_LOG_FUNCTION_CALL(this);
- if(d && d.nsecs > 0)
- {
- return errc::not_supported;
- }
- bool failed = false;
-#if !defined(__linux__) && !defined(F_OFD_SETLK)
- if(0 == bytes)
- {
- // Non-Linux has a sane locking system in flock() if you are willing to lock the entire file
- int operation = ((d && !d.nsecs) ? LOCK_NB : 0) | (exclusive ? LOCK_EX : LOCK_SH);
- if(-1 == flock(_v.fd, operation))
- failed = true;
- }
- else
-#endif
- {
- struct flock fl
- {
- };
- memset(&fl, 0, sizeof(fl));
- fl.l_type = exclusive ? F_WRLCK : F_RDLCK;
- constexpr extent_type extent_topbit = static_cast<extent_type>(1) << (8 * sizeof(extent_type) - 1);
- if((offset & extent_topbit) != 0u)
- {
- LLFIO_LOG_WARN(_v.fd, "io_handle::lock() called with offset with top bit set, masking out");
- }
- if((bytes & extent_topbit) != 0u)
- {
- LLFIO_LOG_WARN(_v.fd, "io_handle::lock() called with bytes with top bit set, masking out");
- }
- fl.l_whence = SEEK_SET;
- fl.l_start = offset & ~extent_topbit;
- fl.l_len = bytes & ~extent_topbit;
-#ifdef F_OFD_SETLK
- if(-1 == fcntl(_v.fd, (d && !d.nsecs) ? F_OFD_SETLK : F_OFD_SETLKW, &fl))
- {
- if(EINVAL == errno) // OFD locks not supported on this kernel
- {
- if(-1 == fcntl(_v.fd, (d && !d.nsecs) ? F_SETLK : F_SETLKW, &fl))
- failed = true;
- else
- _flags |= flag::byte_lock_insanity;
- }
- else
- failed = true;
- }
-#else
- if(-1 == fcntl(_v.fd, (d && (d.nsecs == 0u)) ? F_SETLK : F_SETLKW, &fl))
- {
- failed = true;
- }
- else
- {
- _flags |= flag::byte_lock_insanity;
- }
-#endif
- }
- if(failed)
- {
- if(d && (d.nsecs == 0u) && (EACCES == errno || EAGAIN == errno || EWOULDBLOCK == errno))
- {
- return errc::timed_out;
- }
- return posix_error();
- }
- return extent_guard(this, offset, bytes, exclusive);
-}
-
-void io_handle::unlock(io_handle::extent_type offset, io_handle::extent_type bytes) noexcept
-{
- LLFIO_LOG_FUNCTION_CALL(this);
- bool failed = false;
-#if !defined(__linux__) && !defined(F_OFD_SETLK)
- if(0 == bytes)
- {
- if(-1 == flock(_v.fd, LOCK_UN))
- failed = true;
- }
- else
-#endif
- {
- struct flock fl
- {
- };
- memset(&fl, 0, sizeof(fl));
- fl.l_type = F_UNLCK;
- constexpr extent_type extent_topbit = static_cast<extent_type>(1) << (8 * sizeof(extent_type) - 1);
- fl.l_whence = SEEK_SET;
- fl.l_start = offset & ~extent_topbit;
- fl.l_len = bytes & ~extent_topbit;
-#ifdef F_OFD_SETLK
- if(-1 == fcntl(_v.fd, F_OFD_SETLK, &fl))
- {
- if(EINVAL == errno) // OFD locks not supported on this kernel
- {
- if(-1 == fcntl(_v.fd, F_SETLK, &fl))
- failed = true;
- }
- else
- failed = true;
- }
-#else
- if(-1 == fcntl(_v.fd, F_SETLK, &fl))
- {
- failed = true;
- }
-#endif
- }
- if(failed)
- {
- auto ret(posix_error());
- (void) ret;
- LLFIO_LOG_FATAL(_v.fd, "io_handle::unlock() failed");
- std::terminate();
- }
-}
-
-
LLFIO_V2_NAMESPACE_END
diff --git a/include/llfio/v2.0/detail/impl/safe_byte_ranges.ipp b/include/llfio/v2.0/detail/impl/safe_byte_ranges.ipp
index 6dd29464..473dc65b 100644
--- a/include/llfio/v2.0/detail/impl/safe_byte_ranges.ipp
+++ b/include/llfio/v2.0/detail/impl/safe_byte_ranges.ipp
@@ -71,8 +71,8 @@ namespace algorithm
{
std::vector<unsigned> reader_tids; // thread ids of all shared lock holders
unsigned writer_tid; // thread id of exclusive lock holder
- io_handle::extent_guard filelock; // exclusive if writer_tid, else shared
- _entity_info(bool exclusive, unsigned tid, io_handle::extent_guard _filelock)
+ file_handle::extent_guard filelock; // exclusive if writer_tid, else shared
+ _entity_info(bool exclusive, unsigned tid, file_handle::extent_guard _filelock)
: writer_tid(exclusive ? tid : 0)
, filelock(std::move(_filelock))
{
@@ -95,7 +95,7 @@ namespace algorithm
if(!it->second.reader_tids.empty())
{
// Downgrade the lock from exclusive to shared
- auto l = _h.lock(entity.value, 1, false).value();
+ auto l = _h.lock_range(entity.value, 1, file_handle::lock_kind::shared).value();
#ifndef _WIN32
// On POSIX byte range locks replace
it->second.filelock.release();
@@ -120,7 +120,7 @@ namespace algorithm
if(it->second.reader_tids.empty())
{
// Release the lock and delete this entity from the map
- _h.unlock(entity.value, 1);
+ _h.unlock_range(entity.value, 1);
_thread_locks.erase(it);
}
}
@@ -207,7 +207,7 @@ namespace algorithm
}
// Allow other threads to use this threaded_byte_ranges
guard.unlock();
- auto outcome = _h.lock(out.entities[n].value, 1, out.entities[n].exclusive != 0u, nd);
+ auto outcome = _h.lock_range(out.entities[n].value, 1, (out.entities[n].exclusive != 0u) ? file_handle::lock_kind::exclusive : file_handle::lock_kind::shared, nd);
guard.lock();
if(!outcome)
{
@@ -294,7 +294,7 @@ namespace algorithm
}
// Allow other threads to use this threaded_byte_ranges
guard.unlock();
- auto outcome = _h.lock(out.entities[n].value, 1, true, nd);
+ auto outcome = _h.lock_range(out.entities[n].value, 1, file_handle::lock_kind::exclusive, nd);
guard.lock();
if(!outcome)
{
diff --git a/include/llfio/v2.0/detail/impl/windows/file_handle.ipp b/include/llfio/v2.0/detail/impl/windows/file_handle.ipp
index b9e5c701..8e402cbd 100644
--- a/include/llfio/v2.0/detail/impl/windows/file_handle.ipp
+++ b/include/llfio/v2.0/detail/impl/windows/file_handle.ipp
@@ -343,6 +343,90 @@ result<file_handle> file_handle::clone(mode mode_, caching caching_, deadline /*
return ret;
}
+result<file_handle::extent_guard> file_handle::lock_range(io_handle::extent_type offset, io_handle::extent_type bytes, lock_kind kind, deadline d) noexcept
+{
+ LLFIO_LOG_FUNCTION_CALL(_v.h);
+ if(d && d.nsecs > 0 && !_v.is_overlapped())
+ {
+ return errc::not_supported;
+ }
+ DWORD flags = (lock_kind::shared != kind) ? LOCKFILE_EXCLUSIVE_LOCK : 0;
+ if(d && (d.nsecs == 0u))
+ {
+ flags |= LOCKFILE_FAIL_IMMEDIATELY;
+ }
+ LLFIO_WIN_DEADLINE_TO_SLEEP_INIT(d);
+ OVERLAPPED ol{};
+ memset(&ol, 0, sizeof(ol));
+ ol.Internal = static_cast<ULONG_PTR>(-1);
+ ol.OffsetHigh = (offset >> 32) & 0xffffffff;
+ ol.Offset = offset & 0xffffffff;
+ DWORD bytes_high = bytes == 0u ? MAXDWORD : static_cast<DWORD>((bytes >> 32) & 0xffffffff);
+ DWORD bytes_low = bytes == 0u ? MAXDWORD : static_cast<DWORD>(bytes & 0xffffffff);
+ if(LockFileEx(_v.h, flags, 0, bytes_low, bytes_high, &ol) == 0)
+ {
+ if(ERROR_LOCK_VIOLATION == GetLastError() && d && (d.nsecs == 0u))
+ {
+ return errc::timed_out;
+ }
+ if(ERROR_IO_PENDING != GetLastError())
+ {
+ return win32_error();
+ }
+ }
+ // If handle is overlapped, wait for completion of each i/o.
+ if(_v.is_overlapped())
+ {
+ if(STATUS_TIMEOUT == ntwait(_v.h, ol, d))
+ {
+ LLFIO_WIN_DEADLINE_TO_TIMEOUT(d);
+ }
+ // It seems the NT kernel is guilty of casting bugs sometimes
+ ol.Internal = ol.Internal & 0xffffffff;
+ if(ol.Internal != 0)
+ {
+ return ntkernel_error(static_cast<NTSTATUS>(ol.Internal));
+ }
+ }
+ return extent_guard(this, offset, bytes, kind);
+}
+
+void file_handle::unlock_range(io_handle::extent_type offset, io_handle::extent_type bytes) noexcept
+{
+ LLFIO_LOG_FUNCTION_CALL(this);
+ OVERLAPPED ol{};
+ memset(&ol, 0, sizeof(ol));
+ ol.Internal = static_cast<ULONG_PTR>(-1);
+ ol.OffsetHigh = (offset >> 32) & 0xffffffff;
+ ol.Offset = offset & 0xffffffff;
+ DWORD bytes_high = bytes == 0u ? MAXDWORD : static_cast<DWORD>((bytes >> 32) & 0xffffffff);
+ DWORD bytes_low = bytes == 0u ? MAXDWORD : static_cast<DWORD>(bytes & 0xffffffff);
+ if(UnlockFileEx(_v.h, 0, bytes_low, bytes_high, &ol) == 0)
+ {
+ if(ERROR_IO_PENDING != GetLastError())
+ {
+ auto ret = win32_error();
+ (void) ret;
+ LLFIO_LOG_FATAL(_v.h, "io_handle::unlock() failed");
+ std::terminate();
+ }
+ }
+ // If handle is overlapped, wait for completion of each i/o.
+ if(_v.is_overlapped())
+ {
+ ntwait(_v.h, ol, deadline());
+ if(ol.Internal != 0)
+ {
+ // It seems the NT kernel is guilty of casting bugs sometimes
+ ol.Internal = ol.Internal & 0xffffffff;
+ auto ret = ntkernel_error(static_cast<NTSTATUS>(ol.Internal));
+ (void) ret;
+ LLFIO_LOG_FATAL(_v.h, "io_handle::unlock() failed");
+ std::terminate();
+ }
+ }
+}
+
result<file_handle::extent_type> file_handle::maximum_extent() const noexcept
{
LLFIO_LOG_FUNCTION_CALL(this);
diff --git a/include/llfio/v2.0/detail/impl/windows/io_handle.ipp b/include/llfio/v2.0/detail/impl/windows/io_handle.ipp
index 359c7573..4ae8a9fe 100644
--- a/include/llfio/v2.0/detail/impl/windows/io_handle.ipp
+++ b/include/llfio/v2.0/detail/impl/windows/io_handle.ipp
@@ -133,88 +133,4 @@ io_handle::io_result<io_handle::const_buffers_type> io_handle::write(io_handle::
return do_read_write(_v, &WriteFile, reqs, d);
}
-result<io_handle::extent_guard> io_handle::lock(io_handle::extent_type offset, io_handle::extent_type bytes, bool exclusive, deadline d) noexcept
-{
- LLFIO_LOG_FUNCTION_CALL(_v.h);
- if(d && d.nsecs > 0 && !_v.is_overlapped())
- {
- return errc::not_supported;
- }
- DWORD flags = exclusive ? LOCKFILE_EXCLUSIVE_LOCK : 0;
- if(d && (d.nsecs == 0u))
- {
- flags |= LOCKFILE_FAIL_IMMEDIATELY;
- }
- LLFIO_WIN_DEADLINE_TO_SLEEP_INIT(d);
- OVERLAPPED ol{};
- memset(&ol, 0, sizeof(ol));
- ol.Internal = static_cast<ULONG_PTR>(-1);
- ol.OffsetHigh = (offset >> 32) & 0xffffffff;
- ol.Offset = offset & 0xffffffff;
- DWORD bytes_high = bytes == 0u ? MAXDWORD : static_cast<DWORD>((bytes >> 32) & 0xffffffff);
- DWORD bytes_low = bytes == 0u ? MAXDWORD : static_cast<DWORD>(bytes & 0xffffffff);
- if(LockFileEx(_v.h, flags, 0, bytes_low, bytes_high, &ol) == 0)
- {
- if(ERROR_LOCK_VIOLATION == GetLastError() && d && (d.nsecs == 0u))
- {
- return errc::timed_out;
- }
- if(ERROR_IO_PENDING != GetLastError())
- {
- return win32_error();
- }
- }
- // If handle is overlapped, wait for completion of each i/o.
- if(_v.is_overlapped())
- {
- if(STATUS_TIMEOUT == ntwait(_v.h, ol, d))
- {
- LLFIO_WIN_DEADLINE_TO_TIMEOUT(d);
- }
- // It seems the NT kernel is guilty of casting bugs sometimes
- ol.Internal = ol.Internal & 0xffffffff;
- if(ol.Internal != 0)
- {
- return ntkernel_error(static_cast<NTSTATUS>(ol.Internal));
- }
- }
- return extent_guard(this, offset, bytes, exclusive);
-}
-
-void io_handle::unlock(io_handle::extent_type offset, io_handle::extent_type bytes) noexcept
-{
- LLFIO_LOG_FUNCTION_CALL(this);
- OVERLAPPED ol{};
- memset(&ol, 0, sizeof(ol));
- ol.Internal = static_cast<ULONG_PTR>(-1);
- ol.OffsetHigh = (offset >> 32) & 0xffffffff;
- ol.Offset = offset & 0xffffffff;
- DWORD bytes_high = bytes == 0u ? MAXDWORD : static_cast<DWORD>((bytes >> 32) & 0xffffffff);
- DWORD bytes_low = bytes == 0u ? MAXDWORD : static_cast<DWORD>(bytes & 0xffffffff);
- if(UnlockFileEx(_v.h, 0, bytes_low, bytes_high, &ol) == 0)
- {
- if(ERROR_IO_PENDING != GetLastError())
- {
- auto ret = win32_error();
- (void) ret;
- LLFIO_LOG_FATAL(_v.h, "io_handle::unlock() failed");
- std::terminate();
- }
- }
- // If handle is overlapped, wait for completion of each i/o.
- if(_v.is_overlapped())
- {
- ntwait(_v.h, ol, deadline());
- if(ol.Internal != 0)
- {
- // It seems the NT kernel is guilty of casting bugs sometimes
- ol.Internal = ol.Internal & 0xffffffff;
- auto ret = ntkernel_error(static_cast<NTSTATUS>(ol.Internal));
- (void) ret;
- LLFIO_LOG_FATAL(_v.h, "io_handle::unlock() failed");
- std::terminate();
- }
- }
-}
-
LLFIO_V2_NAMESPACE_END
diff --git a/include/llfio/v2.0/fast_random_file_handle.hpp b/include/llfio/v2.0/fast_random_file_handle.hpp
index 1af381dd..1a7a842d 100644
--- a/include/llfio/v2.0/fast_random_file_handle.hpp
+++ b/include/llfio/v2.0/fast_random_file_handle.hpp
@@ -290,13 +290,13 @@ private:
};
public:
- LLFIO_HEADERS_ONLY_VIRTUAL_SPEC result<extent_guard> lock(extent_type offset, extent_type bytes, bool exclusive = true, deadline /* unused */ = deadline()) noexcept override
+ LLFIO_HEADERS_ONLY_VIRTUAL_SPEC result<extent_guard> lock_range(extent_type offset, extent_type bytes, lock_kind kind, deadline /* unused */ = deadline()) noexcept override
{
// Lock nothing
- return _extent_guard(this, offset, bytes, exclusive);
+ return _extent_guard(this, offset, bytes, kind);
}
- LLFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(extent_type /*unused*/, extent_type /*unused*/) noexcept override
+ LLFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock_range(extent_type /*unused*/, extent_type /*unused*/) noexcept override
{
// Unlock nothing
}
diff --git a/include/llfio/v2.0/file_handle.hpp b/include/llfio/v2.0/file_handle.hpp
index b3a0ebc6..c70d192b 100644
--- a/include/llfio/v2.0/file_handle.hpp
+++ b/include/llfio/v2.0/file_handle.hpp
@@ -74,6 +74,14 @@ public:
using ino_t = fs_handle::ino_t;
using path_view_type = fs_handle::path_view_type;
+ //! The kinds of concurrent user exclusion which can be performed.
+ enum class lock_kind
+ {
+ unknown,
+ shared, //!< Exclude only those requesting an exclusive lock on the same inode.
+ exclusive //!< Exclude those requesting any kind of lock on the same inode.
+ };
+
protected:
io_service *_service{nullptr};
@@ -254,6 +262,164 @@ public:
return std::move(ret).error();
}
+ /*! \class extent_guard
+ \brief EXTENSION: RAII holder a locked extent of bytes in a file.
+ */
+ class extent_guard
+ {
+ friend class file_handle;
+ file_handle *_h{nullptr};
+ extent_type _offset{0}, _length{0};
+ lock_kind _kind{lock_kind::unknown};
+
+ protected:
+ constexpr extent_guard(file_handle *h, extent_type offset, extent_type length, lock_kind kind)
+ : _h(h)
+ , _offset(offset)
+ , _length(length)
+ , _kind(kind)
+ {
+ }
+
+ public:
+ extent_guard(const extent_guard &) = delete;
+ extent_guard &operator=(const extent_guard &) = delete;
+
+ //! Default constructor
+ constexpr extent_guard() {} // NOLINT
+ //! Move constructor
+ extent_guard(extent_guard &&o) noexcept
+ : _h(o._h)
+ , _offset(o._offset)
+ , _length(o._length)
+ , _kind(o._kind)
+ {
+ o.release();
+ }
+ //! Move assign
+ extent_guard &operator=(extent_guard &&o) noexcept
+ {
+ unlock();
+ _h = o._h;
+ _offset = o._offset;
+ _length = o._length;
+ _kind = o._kind;
+ o.release();
+ return *this;
+ }
+ ~extent_guard()
+ {
+ if(_h != nullptr)
+ {
+ unlock();
+ }
+ }
+ //! True if extent guard is valid
+ explicit operator bool() const noexcept { return _h != nullptr; }
+
+ //! The `file_handle` to be unlocked
+ file_handle *handle() const noexcept { return _h; }
+ //! Sets the `file_handle` to be unlocked
+ void set_handle(file_handle *h) noexcept { _h = h; }
+ //! The extent to be unlocked
+ std::tuple<extent_type, extent_type, lock_kind> extent() const noexcept { return std::make_tuple(_offset, _length, _kind); }
+
+ //! Unlocks the locked extent immediately
+ void unlock() noexcept
+ {
+ if(_h != nullptr)
+ {
+ _h->unlock_range(_offset, _length);
+ release();
+ }
+ }
+
+ //! Detach this RAII unlocker from the locked state
+ void release() noexcept
+ {
+ _h = nullptr;
+ _offset = 0;
+ _length = 0;
+ _kind = lock_kind::unknown;
+ }
+ };
+
+ /*! \brief EXTENSION: Tries to lock the range of bytes specified for shared or exclusive access. Be aware
+ this passes through the same semantics as the underlying OS call, including any POSIX insanity
+ present on your platform:
+
+ - Any fd closed on an inode must release all byte range locks on that inode for all
+ other fds. If your OS isn't new enough to support the non-insane lock API,
+ `flag::byte_lock_insanity` will be set in flags() after the first call to this function.
+ - Threads replace each other's locks, indeed locks replace each other's locks.
+
+ You almost cetainly should use your choice of an `algorithm::shared_fs_mutex::*` instead of this
+ as those are more portable and performant, or use the `SharedMutex` modelling member functions
+ which lock the whole inode for exclusive or shared access.
+
+ \warning This is a low-level API which you should not use directly in portable code. Another
+ issue is that atomic lock upgrade/downgrade, if your platform implements that (you should assume
+ it does not in portable code), means that on POSIX you need to *release* the old `extent_guard`
+ after creating a new one over the same byte range, otherwise the old `extent_guard`'s destructor
+ will simply unlock the range entirely. On Windows however upgrade/downgrade locks overlay, so on
+ that platform you must *not* release the old `extent_guard`. Look into
+ `algorithm::shared_fs_mutex::safe_byte_ranges` for a portable solution.
+
+ \return An extent guard, the destruction of which will call unlock().
+ \param offset The offset to lock. Note that on POSIX the top bit is always cleared before use
+ as POSIX uses signed transport for offsets. If you want an advisory rather than mandatory lock
+ on Windows, one technique is to force top bit set so the region you lock is not the one you will
+ i/o - obviously this reduces maximum file size to (2^63)-1.
+ \param bytes The number of bytes to lock.
+ \param kind Whether the lock is to be shared or exclusive.
+ \param d An optional deadline by which the lock must complete, else it is cancelled.
+ \errors Any of the values POSIX fcntl() can return, `errc::timed_out`, `errc::not_supported` may be
+ returned if deadline i/o is not possible with this particular handle configuration (e.g.
+ non-overlapped HANDLE on Windows).
+ \mallocs The default synchronous implementation in file_handle performs no memory allocation.
+ The asynchronous implementation in async_file_handle performs one calloc and one free.
+ */
+ LLFIO_HEADERS_ONLY_VIRTUAL_SPEC result<extent_guard> lock_range(extent_type offset, extent_type bytes, lock_kind kind, deadline d = deadline()) noexcept;
+ //! \overload
+ result<extent_guard> try_lock_range(extent_type offset, extent_type bytes, lock_kind kind) noexcept { return lock_range(offset, bytes, kind, deadline(std::chrono::seconds(0))); }
+ //! \overload EXTENSION: Locks for shared access
+ result<extent_guard> lock_range(io_request<buffers_type> reqs, deadline d = deadline()) noexcept
+ {
+ size_t bytes = 0;
+ for(auto &i : reqs.buffers)
+ {
+ if(bytes + i.size() < bytes)
+ {
+ return errc::value_too_large;
+ }
+ bytes += i.size();
+ }
+ return lock_range(reqs.offset, bytes, lock_kind::shared, d);
+ }
+ //! \overload EXTENSION: Locks for exclusive access
+ result<extent_guard> lock_range(io_request<const_buffers_type> reqs, deadline d = deadline()) noexcept
+ {
+ size_t bytes = 0;
+ for(auto &i : reqs.buffers)
+ {
+ if(bytes + i.size() < bytes)
+ {
+ return errc::value_too_large;
+ }
+ bytes += i.size();
+ }
+ return lock_range(reqs.offset, bytes, lock_kind::exclusive, d);
+ }
+
+ /*! \brief EXTENSION: Unlocks a byte range previously locked.
+
+ \param offset The offset to unlock. This should be an offset previously locked.
+ \param bytes The number of bytes to unlock. This should be a byte extent previously locked.
+ \errors Any of the values POSIX fcntl() can return.
+ \mallocs None.
+ */
+ LLFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock_range(extent_type offset, extent_type bytes) noexcept;
+
/*! Return the current maximum permitted extent of the file.
\errors Any of the values POSIX fstat() or GetFileInformationByHandleEx() can return.
diff --git a/include/llfio/v2.0/handle.hpp b/include/llfio/v2.0/handle.hpp
index 27c7a613..b0b821b5 100644
--- a/include/llfio/v2.0/handle.hpp
+++ b/include/llfio/v2.0/handle.hpp
@@ -290,7 +290,7 @@ public:
bool is_writable() const noexcept { return _v.is_writable(); }
//! True if the handle is append only
bool is_append_only() const noexcept { return _v.is_append_only(); }
- /*! Changes whether this handle is append only or not.
+ /*! \brief EXTENSION: Changes whether this handle is append only or not.
\warning On Windows this is implemented as a bit of a hack to make it fast like on POSIX,
so make sure you open the handle for read/write originally. Note unlike on POSIX the
diff --git a/include/llfio/v2.0/io_handle.hpp b/include/llfio/v2.0/io_handle.hpp
index 7155d8a2..9d05a6b4 100644
--- a/include/llfio/v2.0/io_handle.hpp
+++ b/include/llfio/v2.0/io_handle.hpp
@@ -409,167 +409,6 @@ public:
*/
LLFIO_MAKE_FREE_FUNCTION
virtual io_result<const_buffers_type> barrier(io_request<const_buffers_type> reqs = io_request<const_buffers_type>(), barrier_kind kind = barrier_kind::nowait_data_only, deadline d = deadline()) noexcept = 0;
-
- /*! \class extent_guard
- \brief RAII holder a locked extent of bytes in a file.
- */
- class extent_guard
- {
- friend class io_handle;
- io_handle *_h{nullptr};
- extent_type _offset{0}, _length{0};
- bool _exclusive{false};
-
- protected:
- constexpr extent_guard(io_handle *h, extent_type offset, extent_type length, bool exclusive)
- : _h(h)
- , _offset(offset)
- , _length(length)
- , _exclusive(exclusive)
- {
- }
-
- public:
- extent_guard(const extent_guard &) = delete;
- extent_guard &operator=(const extent_guard &) = delete;
-
- //! Default constructor
- constexpr extent_guard() {} // NOLINT
- //! Move constructor
- extent_guard(extent_guard &&o) noexcept
- : _h(o._h)
- , _offset(o._offset)
- , _length(o._length)
- , _exclusive(o._exclusive)
- {
- o.release();
- }
- //! Move assign
- extent_guard &operator=(extent_guard &&o) noexcept
- {
- unlock();
- _h = o._h;
- _offset = o._offset;
- _length = o._length;
- _exclusive = o._exclusive;
- o.release();
- return *this;
- }
- ~extent_guard()
- {
- if(_h != nullptr)
- {
- unlock();
- }
- }
- //! True if extent guard is valid
- explicit operator bool() const noexcept { return _h != nullptr; }
- //! True if extent guard is invalid
- bool operator!() const noexcept { return _h == nullptr; }
-
- //! The io_handle to be unlocked
- io_handle *handle() const noexcept { return _h; }
- //! Sets the io_handle to be unlocked
- void set_handle(io_handle *h) noexcept { _h = h; }
- //! The extent to be unlocked
- std::tuple<extent_type, extent_type, bool> extent() const noexcept { return std::make_tuple(_offset, _length, _exclusive); }
-
- //! Unlocks the locked extent immediately
- void unlock() noexcept
- {
- if(_h != nullptr)
- {
- _h->unlock(_offset, _length);
- release();
- }
- }
-
- //! Detach this RAII unlocker from the locked state
- void release() noexcept
- {
- _h = nullptr;
- _offset = 0;
- _length = 0;
- _exclusive = false;
- }
- };
-
- /*! \brief Tries to lock the range of bytes specified for shared or exclusive access. Be aware
- this passes through the same semantics as the underlying OS call, including any POSIX insanity
- present on your platform:
-
- - Any fd closed on an inode must release all byte range locks on that inode for all
- other fds. If your OS isn't new enough to support the non-insane lock API,
- `flag::byte_lock_insanity` will be set in flags() after the first call to this function.
- - Threads replace each other's locks, indeed locks replace each other's locks.
-
- You almost cetainly should use your choice of an `algorithm::shared_fs_mutex::*` instead of this
- as those are more portable and performant.
-
- \warning This is a low-level API which you should not use directly in portable code. Another
- issue is that atomic lock upgrade/downgrade, if your platform implements that (you should assume
- it does not in portable code), means that on POSIX you need to *release* the old `extent_guard`
- after creating a new one over the same byte range, otherwise the old `extent_guard`'s destructor
- will simply unlock the range entirely. On Windows however upgrade/downgrade locks overlay, so on
- that platform you must *not* release the old `extent_guard`. Look into
- `algorithm::shared_fs_mutex::safe_byte_ranges` for a portable solution.
-
- \return An extent guard, the destruction of which will call unlock().
- \param offset The offset to lock. Note that on POSIX the top bit is always cleared before use
- as POSIX uses signed transport for offsets. If you want an advisory rather than mandatory lock
- on Windows, one technique is to force top bit set so the region you lock is not the one you will
- i/o - obviously this reduces maximum file size to (2^63)-1.
- \param bytes The number of bytes to lock. Zero means lock the entire file using any more
- efficient alternative algorithm where available on your platform (specifically, on BSD and OS X use
- flock() for non-insane semantics).
- \param exclusive Whether the lock is to be exclusive.
- \param d An optional deadline by which the lock must complete, else it is cancelled.
- \errors Any of the values POSIX fcntl() can return, `errc::timed_out`, `errc::not_supported` may be
- returned if deadline i/o is not possible with this particular handle configuration (e.g.
- non-overlapped HANDLE on Windows).
- \mallocs The default synchronous implementation in file_handle performs no memory allocation.
- The asynchronous implementation in async_file_handle performs one calloc and one free.
- */
- LLFIO_HEADERS_ONLY_VIRTUAL_SPEC result<extent_guard> lock(extent_type offset, extent_type bytes, bool exclusive = true, deadline d = deadline()) noexcept;
- //! \overload
- result<extent_guard> try_lock(extent_type offset, extent_type bytes, bool exclusive = true) noexcept { return lock(offset, bytes, exclusive, deadline(std::chrono::seconds(0))); }
- //! \overload Locks for shared access
- result<extent_guard> lock(io_request<buffers_type> reqs, deadline d = deadline()) noexcept
- {
- size_t bytes = 0;
- for(auto &i : reqs.buffers)
- {
- if(bytes + i.size() < bytes)
- {
- return errc::value_too_large;
- }
- bytes += i.size();
- }
- return lock(reqs.offset, bytes, false, d);
- }
- //! \overload Locks for exclusive access
- result<extent_guard> lock(io_request<const_buffers_type> reqs, deadline d = deadline()) noexcept
- {
- size_t bytes = 0;
- for(auto &i : reqs.buffers)
- {
- if(bytes + i.size() < bytes)
- {
- return errc::value_too_large;
- }
- bytes += i.size();
- }
- return lock(reqs.offset, bytes, true, d);
- }
-
- /*! \brief Unlocks a byte range previously locked.
-
- \param offset The offset to unlock. This should be an offset previously locked.
- \param bytes The number of bytes to unlock. This should be a byte extent previously locked.
- \errors Any of the values POSIX fcntl() can return.
- \mallocs None.
- */
- LLFIO_HEADERS_ONLY_VIRTUAL_SPEC void unlock(extent_type offset, extent_type bytes) noexcept;
};
diff --git a/test/tests/file_handle_lock_unlock.cpp b/test/tests/file_handle_lock_unlock.cpp
index 141a5233..b09856b2 100644
--- a/test/tests/file_handle_lock_unlock.cpp
+++ b/test/tests/file_handle_lock_unlock.cpp
@@ -31,29 +31,29 @@ static inline void TestFileHandleLockUnlock()
llfio::file_handle h2 = llfio::file_handle::file({}, "temp", llfio::file_handle::mode::write, llfio::file_handle::creation::if_needed, llfio::file_handle::caching::temporary, llfio::file_handle::flag::unlink_on_first_close).value();
// Two exclusive locks not possible
{
- auto _1 = h1.lock(0, 0, true, std::chrono::seconds(0));
+ auto _1 = h1.lock_range(0, 0, llfio::file_handle::lock_kind::exclusive, std::chrono::seconds(0));
BOOST_REQUIRE(!_1.has_error());
if(h1.flags() & llfio::file_handle::flag::byte_lock_insanity)
{
std::cout << "This platform has byte_lock_insanity so this test won't be useful, bailing out" << std::endl;
return;
}
- auto _2 = h2.lock(0, 0, true, std::chrono::seconds(0));
+ auto _2 = h2.lock_range(0, 0, llfio::file_handle::lock_kind::exclusive, std::chrono::seconds(0));
BOOST_REQUIRE(_2.has_error());
BOOST_CHECK(_2.error() == llfio::errc::timed_out);
}
// Two non-exclusive locks okay
{
- auto _1 = h1.lock(0, 0, false, std::chrono::seconds(0));
+ auto _1 = h1.lock_range(0, 0, llfio::file_handle::lock_kind::shared, std::chrono::seconds(0));
BOOST_REQUIRE(!_1.has_error());
- auto _2 = h2.lock(0, 0, false, std::chrono::seconds(0));
+ auto _2 = h2.lock_range(0, 0, llfio::file_handle::lock_kind::shared, std::chrono::seconds(0));
BOOST_REQUIRE(!_2.has_error());
}
// Non-exclusive excludes exclusive
{
- auto _1 = h1.lock(0, 0, false, std::chrono::seconds(0));
+ auto _1 = h1.lock_range(0, 0, llfio::file_handle::lock_kind::shared, std::chrono::seconds(0));
BOOST_REQUIRE(!_1.has_error());
- auto _2 = h2.lock(0, 0, true, std::chrono::seconds(0));
+ auto _2 = h2.lock_range(0, 0, llfio::file_handle::lock_kind::exclusive, std::chrono::seconds(0));
BOOST_REQUIRE(_2.has_error());
BOOST_CHECK(_2.error() == llfio::errc::timed_out);
}