Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/windirstat/llfio.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--example/use_cases.cpp4
-rw-r--r--include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp12
-rw-r--r--include/llfio/v2.0/detail/impl/posix/file_handle.ipp8
-rw-r--r--include/llfio/v2.0/directory_handle.hpp2
-rw-r--r--include/llfio/v2.0/io_handle.hpp24
-rw-r--r--test/tests/map_handle_create_close/runner.cpp6
6 files changed, 36 insertions, 20 deletions
diff --git a/example/use_cases.cpp b/example/use_cases.cpp
index 740312d6..6fc448b6 100644
--- a/example/use_cases.cpp
+++ b/example/use_cases.cpp
@@ -52,7 +52,7 @@ void read_entire_file1()
std::vector<llfio::byte> buffer(fh.maximum_extent().value());
// Synchronous scatter read from file
- llfio::file_handle::buffers_type filled = llfio::read(
+ llfio::file_handle::size_type bytesread = llfio::read(
fh, // handle to read from
0, // offset
{{ buffer.data(), buffer.size() }} // Single scatter buffer of the vector
@@ -61,7 +61,7 @@ void read_entire_file1()
// In case of racy truncation of file by third party to new length, adjust buffer to
// bytes actually read
- buffer.resize(filled[0].size());
+ buffer.resize(bytesread);
//! [file_entire_file1]
}
diff --git a/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp b/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
index 39d08564..9870b5cd 100644
--- a/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
+++ b/include/llfio/v2.0/algorithm/shared_fs_mutex/atomic_append.hpp
@@ -138,7 +138,8 @@ namespace algorithm
bool first = true;
do
{
- OUTCOME_TRY(_, _h.read(0, {{reinterpret_cast<byte *>(&_header), 48}}));
+ file_handle::buffer_type req{reinterpret_cast<byte *>(&_header), 48};
+ OUTCOME_TRY(_, _h.read({req, 0}));
if(_[0].data() != reinterpret_cast<byte *>(&_header))
{
memcpy(&_header, _[0].data(), _[0].size());
@@ -299,7 +300,8 @@ namespace algorithm
//! to avoid a duplicate read later
for(;;)
{
- file_handle::io_result<file_handle::buffers_type> readoutcome = _h.read(my_lock_request_offset, {{_buffer, sizeof(_buffer)}});
+ file_handle::buffer_type req{_buffer, sizeof(_buffer)};
+ file_handle::io_result<file_handle::buffers_type> readoutcome = _h.read({req, my_lock_request_offset});
// Should never happen :)
if(readoutcome.has_error())
{
@@ -361,7 +363,8 @@ namespace algorithm
}
assert(record_offset >= start_offset);
assert(record_offset - start_offset <= sizeof(_buffer));
- OUTCOME_TRY(batchread, _h.read(start_offset, {{_buffer, (size_t)(record_offset - start_offset) + sizeof(atomic_append_detail::lock_request)}}));
+ file_handle::buffer_type req{_buffer, (size_t)(record_offset - start_offset) + sizeof(atomic_append_detail::lock_request)};
+ OUTCOME_TRY(batchread, _h.read({req, start_offset}));
assert(batchread[0].size() == record_offset - start_offset + sizeof(atomic_append_detail::lock_request));
const atomic_append_detail::lock_request *record = reinterpret_cast<atomic_append_detail::lock_request *>(batchread[0].data() + batchread[0].size() - sizeof(atomic_append_detail::lock_request));
const atomic_append_detail::lock_request *firstrecord = reinterpret_cast<atomic_append_detail::lock_request *>(batchread[0].data());
@@ -502,7 +505,8 @@ namespace algorithm
bool done = false;
while(!done)
{
- auto bytesread_ = _h.read(_header.first_known_good, {{_buffer, sizeof(_buffer)}});
+ file_handle::buffer_type req{_buffer, sizeof(_buffer)};
+ auto bytesread_ = _h.read({req, _header.first_known_good});
if(bytesread_.has_error())
{
// If distance between original first known good and end of file is exactly
diff --git a/include/llfio/v2.0/detail/impl/posix/file_handle.ipp b/include/llfio/v2.0/detail/impl/posix/file_handle.ipp
index 7972f16b..7e67f193 100644
--- a/include/llfio/v2.0/detail/impl/posix/file_handle.ipp
+++ b/include/llfio/v2.0/detail/impl/posix/file_handle.ipp
@@ -464,7 +464,7 @@ result<file_handle::extent_type> file_handle::zero(file_handle::extent_type offs
auto *buffer = static_cast<byte *>(alloca(bytes));
memset(buffer, 0, bytes);
OUTCOME_TRY(written, write(offset, {{buffer, bytes}}, d));
- return written[0].size();
+ return written;
}
try
{
@@ -477,9 +477,9 @@ result<file_handle::extent_type> file_handle::zero(file_handle::extent_type offs
{
auto towrite = (bytes < blocksize) ? bytes : blocksize;
OUTCOME_TRY(written, write(offset, {{buffer, towrite}}, d));
- offset += written[0].size();
- bytes -= written[0].size();
- ret += written[0].size();
+ offset += written;
+ bytes -= written;
+ ret += written;
}
return ret;
}
diff --git a/include/llfio/v2.0/directory_handle.hpp b/include/llfio/v2.0/directory_handle.hpp
index 9dbafd20..3385e041 100644
--- a/include/llfio/v2.0/directory_handle.hpp
+++ b/include/llfio/v2.0/directory_handle.hpp
@@ -153,7 +153,7 @@ public:
is allocated internally and returned in the buffers returned which needs to not be destructed until one
is no longer using any items within (leafnames are views onto the original kernel data).
*/
- constexpr io_request(buffers_type _buffers, path_view_type _glob = {}, filter _filtering = filter::fastdeleted, span<char> _kernelbuffer = {})
+ /*constexpr*/ io_request(buffers_type _buffers, path_view_type _glob = {}, filter _filtering = filter::fastdeleted, span<char> _kernelbuffer = {})
: buffers(std::move(_buffers))
, glob(_glob)
, filtering(_filtering)
diff --git a/include/llfio/v2.0/io_handle.hpp b/include/llfio/v2.0/io_handle.hpp
index 55aa1f21..ce17e4df 100644
--- a/include/llfio/v2.0/io_handle.hpp
+++ b/include/llfio/v2.0/io_handle.hpp
@@ -228,7 +228,7 @@ public:
_bytes_transferred = 0;
for(auto &i : this->value())
{
- _bytes_transferred += i.second;
+ _bytes_transferred += i.size();
}
}
return _bytes_transferred;
@@ -307,12 +307,17 @@ public:
LLFIO_HEADERS_ONLY_VIRTUAL_SPEC io_result<buffers_type> read(io_request<buffers_type> reqs, deadline d = deadline()) noexcept;
//! \overload
LLFIO_MAKE_FREE_FUNCTION
- io_result<buffers_type> read(extent_type offset, std::initializer_list<buffer_type> lst, deadline d = deadline()) noexcept
+ io_result<size_type> read(extent_type offset, std::initializer_list<buffer_type> lst, deadline d = deadline()) noexcept
{
buffer_type *_reqs = reinterpret_cast<buffer_type *>(alloca(sizeof(buffer_type) * lst.size()));
memcpy(_reqs, lst.begin(), sizeof(buffer_type) * lst.size());
io_request<buffers_type> reqs(buffers_type(_reqs, lst.size()), offset);
- return read(reqs, d);
+ auto ret = read(reqs, d);
+ if(ret)
+ {
+ return ret.bytes_transferred();
+ }
+ return ret.error();
}
/*! \brief Write data to the open handle.
@@ -340,12 +345,17 @@ public:
LLFIO_HEADERS_ONLY_VIRTUAL_SPEC io_result<const_buffers_type> write(io_request<const_buffers_type> reqs, deadline d = deadline()) noexcept;
//! \overload
LLFIO_MAKE_FREE_FUNCTION
- io_result<const_buffers_type> write(extent_type offset, std::initializer_list<const_buffer_type> lst, deadline d = deadline()) noexcept
+ io_result<size_type> write(extent_type offset, std::initializer_list<const_buffer_type> lst, deadline d = deadline()) noexcept
{
const_buffer_type *_reqs = reinterpret_cast<const_buffer_type *>(alloca(sizeof(const_buffer_type) * lst.size()));
memcpy(_reqs, lst.begin(), sizeof(const_buffer_type) * lst.size());
io_request<const_buffers_type> reqs(const_buffers_type(_reqs, lst.size()), offset);
- return write(reqs, d);
+ auto ret = write(reqs, d);
+ if(ret)
+ {
+ return ret.bytes_transferred();
+ }
+ return ret.error();
}
/*! \brief Issue a write reordering barrier such that writes preceding the barrier will reach storage
@@ -560,7 +570,7 @@ inline io_handle::io_result<io_handle::buffers_type> read(io_handle &self, io_ha
return self.read(std::forward<decltype(reqs)>(reqs), std::forward<decltype(d)>(d));
}
//! \overload
-inline io_handle::io_result<io_handle::buffers_type> read(io_handle &self, io_handle::extent_type offset, std::initializer_list<io_handle::buffer_type> lst, deadline d = deadline()) noexcept
+inline io_handle::io_result<io_handle::size_type> read(io_handle &self, io_handle::extent_type offset, std::initializer_list<io_handle::buffer_type> lst, deadline d = deadline()) noexcept
{
return self.read(std::forward<decltype(offset)>(offset), std::forward<decltype(lst)>(lst), std::forward<decltype(d)>(d));
}
@@ -591,7 +601,7 @@ inline io_handle::io_result<io_handle::const_buffers_type> write(io_handle &self
return self.write(std::forward<decltype(reqs)>(reqs), std::forward<decltype(d)>(d));
}
//! \overload
-inline io_handle::io_result<io_handle::const_buffers_type> write(io_handle &self, io_handle::extent_type offset, std::initializer_list<io_handle::const_buffer_type> lst, deadline d = deadline()) noexcept
+inline io_handle::io_result<io_handle::size_type> write(io_handle &self, io_handle::extent_type offset, std::initializer_list<io_handle::const_buffer_type> lst, deadline d = deadline()) noexcept
{
return self.write(std::forward<decltype(offset)>(offset), std::forward<decltype(lst)>(lst), std::forward<decltype(d)>(d));
}
diff --git a/test/tests/map_handle_create_close/runner.cpp b/test/tests/map_handle_create_close/runner.cpp
index 2198e7b8..41e2ec71 100644
--- a/test/tests/map_handle_create_close/runner.cpp
+++ b/test/tests/map_handle_create_close/runner.cpp
@@ -115,13 +115,15 @@ template <class U> inline void map_handle_create_close_(U &&f)
// Make sure maph's read() does what it is supposed to
if (use_file_backing)
{
- auto b = maph.read(0, { {nullptr, 20} }).value();
+ map_handle::buffer_type req{ nullptr, 20 };
+ auto b = maph.read({ req, 0 }).value();
KERNELTEST_CHECK(testreturn, b[0].data() == addr);
KERNELTEST_CHECK(testreturn, b[0].size() == 19); // reads do not read more than the backing length
}
else
{
- auto b = maph.read(5, { {nullptr, 5000} }).value();
+ map_handle::buffer_type req{ nullptr, 5000 };
+ auto b = maph.read({ req, 5 }).value();
KERNELTEST_CHECK(testreturn, b[0].data() == addr+5); // NOLINT
KERNELTEST_CHECK(testreturn, b[0].size() == 4091);
}