Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/windirstat/llfio.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'include/llfio/v2.0/algorithm/shared_fs_mutex')
-rw-r--r--include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp10
-rw-r--r--include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp8
-rw-r--r--include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp8
3 files changed, 13 insertions, 13 deletions
diff --git a/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp b/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
index 9a1811b5..d8d0f354 100644
--- a/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
+++ b/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
@@ -206,7 +206,7 @@ namespace algorithm
OUTCOME_TRY(ret, file_handle::file(base, lockfile, file_handle::mode::write, file_handle::creation::if_needed, file_handle::caching::temporary));
atomic_append_detail::header header;
// Lock the entire header for exclusive access
- auto lockresult = ret.try_lock(0, sizeof(header), true);
+ auto lockresult = ret.try_lock_range(0, sizeof(header), file_handle::lock_kind::exclusive);
//! \todo fs_mutex_append needs to check if file still exists after lock is granted, awaiting path fetching.
if(lockresult.has_error())
{
@@ -231,7 +231,7 @@ namespace algorithm
OUTCOME_TRYV(ret.write(0, {{reinterpret_cast<byte *>(&header), sizeof(header)}}));
}
// Open a shared lock on last byte in header to prevent other users zomping the file
- OUTCOME_TRY(guard, ret.lock(sizeof(header) - 1, 1, false));
+ OUTCOME_TRY(guard, ret.lock_range(sizeof(header) - 1, 1, file_handle::lock_kind::shared));
// Unlock any exclusive lock I gained earlier now
if(lockresult)
{
@@ -292,7 +292,7 @@ namespace algorithm
auto lastbyte = static_cast<file_handle::extent_type>(-1);
// Lock up to the beginning of the shadow lock space
lastbyte &= ~(1ULL << 63U);
- OUTCOME_TRY(append_guard_, _h.lock(my_lock_request_offset, lastbyte, true));
+ OUTCOME_TRY(append_guard_, _h.lock_range(my_lock_request_offset, lastbyte, file_handle::lock_kind::exclusive));
append_guard = std::move(append_guard_);
}
OUTCOME_TRYV(_h.write(0, {{reinterpret_cast<byte *>(&lock_request), sizeof(lock_request)}}));
@@ -338,7 +338,7 @@ namespace algorithm
auto lock_offset = my_lock_request_offset;
// Set the top bit to use the shadow lock space on Windows
lock_offset |= (1ULL << 63U);
- OUTCOME_TRY(my_request_guard_, _h.lock(lock_offset, sizeof(lock_request), true));
+ OUTCOME_TRY(my_request_guard_, _h.lock_range(lock_offset, sizeof(lock_request), file_handle::lock_kind::exclusive));
my_request_guard = std::move(my_request_guard_);
}
@@ -446,7 +446,7 @@ namespace algorithm
auto lock_offset = record_offset;
// Set the top bit to use the shadow lock space on Windows
lock_offset |= (1ULL << 63U);
- OUTCOME_TRYV(_h.lock(lock_offset, sizeof(*record), false, nd));
+ OUTCOME_TRYV(_h.lock_range(lock_offset, sizeof(*record), file_handle::lock_kind::shared, nd));
}
// Make sure we haven't timed out during this wait
if(d)
diff --git a/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp b/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
index cb42ce05..7e952bac 100644
--- a/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
+++ b/include/llfio/v2.0/algorithm/shared_fs_mutex/byte_ranges.hpp
@@ -143,9 +143,9 @@ namespace algorithm
// Now 0 to n needs to be closed
for(; n > 0; n--)
{
- _h.unlock(out.entities[n].value, 1);
+ _h.unlock_range(out.entities[n].value, 1);
}
- _h.unlock(out.entities[0].value, 1);
+ _h.unlock_range(out.entities[0].value, 1);
}
});
for(n = 0; n < out.entities.size(); n++)
@@ -179,7 +179,7 @@ namespace algorithm
}
}
}
- auto outcome = _h.lock(out.entities[n].value, 1, out.entities[n].exclusive != 0u, nd);
+ auto outcome = _h.lock_range(out.entities[n].value, 1, (out.entities[n].exclusive != 0u) ? file_handle::lock_kind::exclusive : file_handle::lock_kind::shared, nd);
if(!outcome)
{
was_contended = n;
@@ -229,7 +229,7 @@ namespace algorithm
LLFIO_LOG_FUNCTION_CALL(this);
for(const auto &i : entities)
{
- _h.unlock(i.value, 1);
+ _h.unlock_range(i.value, 1);
}
}
};
diff --git a/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp b/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
index 9b8c6941..c16202ee 100644
--- a/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
+++ b/include/llfio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
@@ -148,7 +148,7 @@ namespace algorithm
_temphmap = {};
// Release my shared locks and try locking inuse exclusively
_hlockinuse.unlock();
- auto lockresult = _h.try_lock(_initialisingoffset, 2, true);
+ auto lockresult = _h.try_lock_range(_initialisingoffset, 2, file_handle::lock_kind::exclusive);
#ifndef NDEBUG
if(!lockresult && lockresult.error() != errc::timed_out)
{
@@ -202,7 +202,7 @@ namespace algorithm
OUTCOME_TRY(ret, file_handle::file(base, lockfile, file_handle::mode::write, file_handle::creation::if_needed, file_handle::caching::reads));
file_handle temph;
// Am I the first person to this file? Lock everything exclusively
- auto lockinuse = ret.try_lock(_initialisingoffset, 2, true);
+ auto lockinuse = ret.try_lock_range(_initialisingoffset, 2, file_handle::lock_kind::exclusive);
if(lockinuse.has_error())
{
if(lockinuse.error() != errc::timed_out)
@@ -210,7 +210,7 @@ namespace algorithm
return std::move(lockinuse).error();
}
// Somebody else is also using this file, so try to read the hash index file I ought to use
- lockinuse = ret.lock(_lockinuseoffset, 1, false); // inuse shared access, blocking
+ lockinuse = ret.lock_range(_lockinuseoffset, 1, file_handle::lock_kind::shared); // inuse shared access, blocking
if(!lockinuse)
{
return std::move(lockinuse).error();
@@ -261,7 +261,7 @@ namespace algorithm
atomic downgrade of exclusive range to shared range, we're fully prepared for other users
now. The _initialisingoffset remains exclusive to prevent double entry into this init routine.
*/
- OUTCOME_TRY(lockinuse2, ret.lock(_lockinuseoffset, 1, false));
+ OUTCOME_TRY(lockinuse2, ret.lock_range(_lockinuseoffset, 1, file_handle::lock_kind::shared));
lockinuse = std::move(lockinuse2); // releases exclusive lock on all three offsets
return memory_map(std::move(ret), std::move(temph), std::move(lockinuse.value()), std::move(hmap), std::move(temphmap));
}