Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/windirstat/llfio.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Doxyfile2
-rw-r--r--cmake/headers.cmake1
-rw-r--r--include/kvstore/kvstore.hpp600
-rw-r--r--include/llfio/revision.hpp6
-rw-r--r--include/llfio/v2.0/algorithm/handle_adapter/combining.hpp32
-rw-r--r--include/llfio/v2.0/algorithm/handle_adapter/xor.hpp134
-rw-r--r--include/llfio/v2.0/async_file_handle.hpp6
-rw-r--r--test/tests/handle_adapter_xor.cpp32
8 files changed, 736 insertions, 77 deletions
diff --git a/Doxyfile b/Doxyfile
index 798606b9..c93ab944 100644
--- a/Doxyfile
+++ b/Doxyfile
@@ -769,8 +769,10 @@ WARN_LOGFILE = doxygen_warnings.log
# Note: If this tag is empty the current directory is searched.
INPUT = include/llfio \
+ include/kvstore \
include/llfio/v2.0 \
include/llfio/v2.0/algorithm \
+ include/llfio/v2.0/algorithm/handle_adapter \
include/llfio/v2.0/algorithm/shared_fs_mutex \
release_notes.md Build.md
diff --git a/cmake/headers.cmake b/cmake/headers.cmake
index 93bfcd15..9a091b8f 100644
--- a/cmake/headers.cmake
+++ b/cmake/headers.cmake
@@ -1,6 +1,7 @@
# DO NOT EDIT, GENERATED BY SCRIPT
set(llfio_HEADERS
"include/llfio/v2.0/deadline.h"
+ "include/kvstore/kvstore.hpp"
"include/llfio.hpp"
"include/llfio/llfio.hpp"
"include/llfio/ntkernel-error-category/include/config.hpp"
diff --git a/include/kvstore/kvstore.hpp b/include/kvstore/kvstore.hpp
new file mode 100644
index 00000000..d21eff8e
--- /dev/null
+++ b/include/kvstore/kvstore.hpp
@@ -0,0 +1,600 @@
+/* Standard key-value store for C++
+(C) 2018 Niall Douglas <http://www.nedproductions.biz/> (24 commits)
+File Created: Nov 2018
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License in the accompanying file
+Licence.txt or at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+
+Distributed under the Boost Software License, Version 1.0.
+ (See accompanying file Licence.txt or copy at
+ http://www.boost.org/LICENSE_1_0.txt)
+*/
+
+#ifndef KVSTORE_HPP
+#define KVSTORE_HPP
+
+#include "../llfio/v2.0/file_handle.hpp"
+
+#ifdef __has_include
+#if __has_include("../llfio/v2.0/quickcpplib/include/memory_resource.hpp")
+#include "../llfio/v2.0/quickcpplib/include/memory_resource.hpp"
+#else
+#include "quickcpplib/include/memory_resource.hpp"
+#endif
+#elif __PCPP_ALWAYS_TRUE__
+#include "quickcpplib/include/memory_resource.hpp"
+#else
+#include "../llfio/v2.0/quickcpplib/include/memory_resource.hpp"
+#endif
+
+//! \file kvstore.hpp Provides the abstract interface for a key-value store.
+
+#if defined(LLFIO_UNSTABLE_VERSION) && !defined(LLFIO_DISABLE_ABI_PERMUTATION)
+#include "../revision.hpp"
+#define KVSTORE_V1 (QUICKCPPLIB_BIND_NAMESPACE_VERSION(kvstore_v1, LLFIO_PREVIOUS_COMMIT_UNIQUE))
+#else
+#define KVSTORE_V1 (QUICKCPPLIB_BIND_NAMESPACE_VERSION(kvstore_v1))
+#endif
+/*! \def KVSTORE_V1
+\ingroup config
+\brief The namespace configuration of this kv store v1. Consists of a sequence
+of bracketed tokens later fused by the preprocessor into namespace and C++ module names.
+*/
+#if DOXYGEN_IS_IN_THE_HOUSE
+//! The kv store namespace
+namespace kvstore_v1_xxx
+{
+}
+/*! \brief The namespace of this kv store v1 which will be some unknown inline
+namespace starting with `v1_` inside the `kvstore` namespace.
+\ingroup config
+*/
+#define KVSTORE_V1_NAMESPACE kvstore_v1_xxx
+/*! \brief Expands into the appropriate namespace markup to enter the kv store v1 namespace.
+\ingroup config
+*/
+#define KVSTORE_V1_NAMESPACE_BEGIN \
+ namespace kvstore_v1_xxx \
+ {
+/*! \brief Expands into the appropriate namespace markup to enter the C++ module
+exported kv store v1 namespace.
+\ingroup config
+*/
+#define KVSTORE_V1_NAMESPACE_EXPORT_BEGIN \
+ export namespace kvstore_v1_xxx \
+ {
+/*! \brief Expands into the appropriate namespace markup to exit the kv store v1 namespace.
+\ingroup config
+*/
+#define KVSTORE_V1_NAMESPACE_END }
+#elif defined(GENERATING_LLFIO_MODULE_INTERFACE)
+#define KVSTORE_V1_NAMESPACE QUICKCPPLIB_BIND_NAMESPACE(KVSTORE_V1)
+#define KVSTORE_V1_NAMESPACE_BEGIN QUICKCPPLIB_BIND_NAMESPACE_BEGIN(KVSTORE_V1)
+#define KVSTORE_V1_NAMESPACE_EXPORT_BEGIN QUICKCPPLIB_BIND_NAMESPACE_EXPORT_BEGIN(KVSTORE_V1)
+#define KVSTORE_V1_NAMESPACE_END QUICKCPPLIB_BIND_NAMESPACE_END(KVSTORE_V1)
+#else
+#define KVSTORE_V1_NAMESPACE QUICKCPPLIB_BIND_NAMESPACE(KVSTORE_V1)
+#define KVSTORE_V1_NAMESPACE_BEGIN QUICKCPPLIB_BIND_NAMESPACE_BEGIN(KVSTORE_V1)
+#define KVSTORE_V1_NAMESPACE_EXPORT_BEGIN QUICKCPPLIB_BIND_NAMESPACE_BEGIN(KVSTORE_V1)
+#define KVSTORE_V1_NAMESPACE_END QUICKCPPLIB_BIND_NAMESPACE_END(KVSTORE_V1)
+#endif
+
+KVSTORE_V1_NAMESPACE_BEGIN
+
+namespace llfio = LLFIO_V2_NAMESPACE;
+template <class T> using span = llfio::span<T>;
+using llfio::byte;
+template <class T> using result = llfio::result<T>;
+
+namespace detail
+{
+ template <class T> struct extract_span_type
+ {
+ using type = void;
+ };
+ template <class T> struct extract_span_type<span<T>>
+ {
+ using type = T;
+ };
+}
+
+//! Traits
+namespace traits
+{
+ namespace detail
+ {
+ // From http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4436.pdf
+ namespace impl
+ {
+ template <typename... Ts> struct make_void
+ {
+ using type = void;
+ };
+ template <typename... Ts> using void_t = typename make_void<Ts...>::type;
+ template <class...> struct types
+ {
+ using type = types;
+ };
+ template <template <class...> class T, class types, class = void> struct test_apply
+ {
+ using type = void;
+ };
+ template <template <class...> class T, class... Ts> struct test_apply<T, types<Ts...>, void_t<T<Ts...>>>
+ {
+ using type = T<Ts...>;
+ };
+ };
+ template <template <class...> class T, class... Ts> using test_apply = impl::test_apply<T, impl::types<Ts...>>;
+ template <class T, class... Args> span<byte> _do_attach_object_instance(T &, span<byte> b) { return in_place_attach<T>(b); }
+ template <class T, class... Args> span<byte> _do_detach_object_instance(T &, span<byte> b) { return in_place_detach<T>(b); }
+
+ template <class T, class... Args> using get_attach_result = decltype(_do_attach_object_instance(std::declval<T>(), std::declval<Args>()...));
+ template <class... Args> using safe_get_attach_result = test_apply<get_attach_result, Args...>;
+ template <class T, class... Args> using get_detach_result = decltype(_do_detach_object_instance(std::declval<T>(), std::declval<Args>()...));
+ template <class... Args> using safe_get_detach_result = test_apply<get_detach_result, Args...>;
+ }
+
+ /*! \brief True if a type is trivially attachable i.e. requires no extra work to attach.
+
+ A type is trivially attachable if it does not contain non-null pointers or references.
+ Determining what elements are inside UDTs requires Reflection to be available, so on
+ compilers without Reflection, this is true only for fundamental types.
+ */
+ template <class T> struct is_trivially_attachable
+ {
+ static constexpr bool value = std::is_fundamental<T>::value && !std::is_void<T>::value;
+ };
+ template <class T> static constexpr bool is_trivially_attachable_v = is_trivially_attachable<T>::value;
+ /*! \brief True if a type is trivially attachable, or has defined an ADL discovered free
+ function of the form `span<byte> in_place_attach<T>(span<byte>)`.
+
+ The most common custom object attachment action is to initialise all pointers and references
+ with something valid for the running C++ program. When `in_place_attach<T>` is called, the
+ lifetime of `T` has not yet begun, so any vptr etc. will be invalid.
+ */
+ template <class T, class AttachResultType = typename detail::safe_get_attach_result<T, span<byte>>::type> struct is_attachable
+ {
+ static constexpr bool value = is_trivially_attachable<T>::value || std::is_same<AttachResultType, span<byte>>::value;
+ };
+ template <class T> static constexpr bool is_attachable_v = is_attachable<T>::value;
+
+ /*! \brief True if a type is trivially detachable i.e. requires no extra work to detach.
+
+ A type is trivially detachable if it does not contain non-null pointers or references, and
+ has a trivial destructor.
+ Determining what elements are inside UDTs requires Reflection to be available, so on
+ compilers without Reflection, this is true only for fundamental types.
+ */
+ template <class T> struct is_trivially_detachable
+ {
+ static constexpr bool value = std::is_fundamental<T>::value && !std::is_void<T>::value;
+ };
+ template <class T> static constexpr bool is_trivially_detachable_v = is_trivially_detachable<T>::value;
+ /*! \brief True if a type is trivially detachable, or has defined an ADL discovered free
+ function of the form `span<byte> in_place_detach<T>(span<byte>)`.
+
+ The most common custom object detachment action is to reset all pointers and references
+ to all bits zero. When `in_place_detach<T>` is called, the lifetime of `T` has ended, so
+ any vptr etc. will be invalid.
+ */
+ template <class T, class DetachResultType = typename detail::safe_get_detach_result<T, span<byte>>::type> struct is_detachable
+ {
+ static constexpr bool value = is_trivially_detachable<T>::value || std::is_same<DetachResultType, span<byte>>::value;
+ };
+ template <class T> static constexpr bool is_detachable_v = is_detachable<T>::value;
+}
+
+/*! The error codes specific to `basic_key_value_store`. Note the `std::errc` equivalent
+codes may also be returned e.g. `std::errc::insufficient_disk_space`.
+*/
+enum class kvstore_errc
+{
+ success = 0,
+ invalid_uri, //!< The URI is not in an understood format.
+ unsupported_uri, //!< The URI specified an unsupported scheme or mechanism.
+ unsupported_integrity, //!< The requested integrity level is not available for this device URI.
+ transaction_aborted_collision, //!< The transaction could not be committed due to dependent key update.
+};
+
+/*! \brief Information about an available key value store implementation.
+*/
+struct basic_key_value_store_info
+{
+ //! The type of the UTF-8 URI used by this store
+ // using uri_type = std::basic_string<char8_t>;
+ using uri_type = std::basic_string<char>;
+ //! The value extent type used by this store
+ using extent_type = llfio::file_handle::extent_type;
+ //! The memory extent type used by this store
+ using size_type = llfio::file_handle::size_type;
+ //! The handle type used by this store
+ using handle_type = llfio::file_handle;
+ //! The mode used by this store
+ using mode = handle_type::mode;
+ //! The creation used by this store
+ using creation = handle_type::creation;
+ //! The kernel caching used by this store
+ using caching = handle_type::caching;
+
+ const char *name; //!< The name of this store implementation
+ size_type min_key_size; //!< The minimum key size, in bytes, supported.
+ size_type max_key_size; //!< The maximum key size, in bytes, supported.
+ extent_type min_value_size; //!< The minimum value size, in bytes, supported.
+ extent_type max_value_size; //!< The maximum value size, in bytes, supported.
+ //! Features requested, or provided by, this store.
+ QUICKCPPLIB_BITFIELD_BEGIN(features)
+ {
+ none = 1U << 0U, //!< Bare key-value store. Very likely to choose a hardware implementation, if one is available.
+ shared_memory = 1U << 1U, //!< Use a single shared memory region for storage. Note that keys and value sizes freeze after URI fetch.
+ history = 1U << 2U, //!< A certain amount of history of previous valid states of the store is kept such that a previously valid state can always be restored after sudden power loss.
+ stable_values = 1U << 3U, //!< Updates do not appear to modify any pinned value.
+ stable_keys = 1U << 4U, //!< Updates do not appear to remove any pinned keys.
+ update_deltas = 1U << 5U, //!< In-place partial updates are recorded as change deltas.
+ atomic_snapshots = 1U << 6U, //!< The ability to pin the value of more than one key in an atomic snapshot.
+ atomic_transactions = 1U << 7U //!< The ability to update many items with dependencies on other items as a single, all-or-nothing, change.
+ }
+ QUICKCPPLIB_BITFIELD_END(features)
+ features features; //!< The features this store implementation provides
+ /*! Examine the specified URI for suitability for this store implementation.
+ Zero means that the URI location is suitable, but the format at that location
+ is not compatible. Negative means the URI location is not possible.
+ Higher positive scores outrank other positive scores.
+ */
+ int (*score)(const uri_type &uri, handle_type::mode, handle_type::creation creation);
+ /*! Construct a store implementation.
+ */
+ result<basic_key_value_store> (*create)(const basic_key_value_store::uri_type &uri, size_type key_size, features _features, mode _mode, creation _creation, caching _caching);
+};
+
+/*! \class basic_key_value_store
+\brief A possibly hardware-implemented basic key-value store.
+
+\warning This class has not been implemented nor will be any time soon. It is here for various folk to study the API design.
+
+Reference document https://www.snia.org/sites/default/files/technical_work/PublicReview/KV%20Storage%20API%200.16.pdf
+*/
+class basic_key_value_store
+{
+public:
+ //! The key type of this store
+ using key_type = span<const byte>;
+ //! The value type of this store
+ using value_type = span<byte>;
+
+ //! The type of the UTF-8 URI used by this store
+ using uri_type = basic_key_value_store_info::uri_type;
+ //! The device extent type used by this store
+ using capacity_type = QUICKCPPLIB_NAMESPACE::integers128::uint128;
+ //! The value extent type used by this store
+ using extent_type = llfio::file_handle::extent_type;
+ //! The memory extent type used by this store
+ using size_type = llfio::file_handle::size_type;
+ //! The allocator type used by this store
+ using allocator_type = QUICKCPPLIB_NAMESPACE::pmr::polymorphic_allocator<byte>;
+
+ //! The handle type used by this store
+ using handle_type = llfio::file_handle;
+ //! The mode used by this store
+ using mode = handle_type::mode;
+ //! The creation used by this store
+ using creation = handle_type::creation;
+ //! The kernel caching used by this store
+ using caching = handle_type::caching;
+ //! The buffer type used by this store
+ using buffer_type = handle_type::buffer_type;
+ //! The const buffer type used by this store
+ using const_buffer_type = handle_type::const_buffer_type;
+ //! The buffers type used by this store
+ using buffers_type = handle_type::buffers_type;
+ //! The const buffers type used by this store
+ using const_buffers_type = handle_type::const_buffers_type;
+ //! The i/o request type used by this store
+ template <class T> using io_request = handle_type::io_request<T>;
+ //! The i/o result type used by this store
+ template <class T> using io_result = handle_type::io_result<T>;
+
+ //! Features requested, or provided by, this store.
+ using features = struct basic_key_value_store_info::features;
+
+protected:
+ uri_type _uri{};
+ bool _frozen{false};
+ size_type _key_size{0}, _value_size{0}, _key_index_size{0};
+ capacity_type _items_quota{0}, _bytes_quota{0};
+ allocator_type _allocator{};
+
+ // Cannot be copied
+ basic_key_value_store(const basic_key_value_store &) = delete;
+ basic_key_value_store &operator=(const basic_key_value_store &) = delete;
+ //! Move constructor
+ basic_key_value_store(basic_key_value_store &&o) noexcept = default;
+ //! Move assignment
+ basic_key_value_store &operator=(basic_key_value_store &&o) noexcept = default;
+
+public:
+ virtual ~basic_key_value_store() {}
+
+ /*! Fetch the URI for this store. Note that if `features::shared_memory`, this will freeze
+ the keys and value sizes in place from henceforth onwards. Only the values will be mutable.
+ */
+ virtual result<uri_type> uri() noexcept = 0;
+ //! Returns the number of bytes in the key for this store.
+ size_type key_size() const noexcept { return _key_size; }
+ //! Returns the number of bytes in the value for this store. Zero means variably sized.
+ size_type value_size() const noexcept { return _value_size; }
+ //! True if the keys and values sizes are immutable, and only values may be changed.
+ bool frozen() const { return _frozen; }
+
+ //! True if the store is currently empty.
+ virtual bool empty() const noexcept = 0;
+ //! Returns the maximum number of key-values which could be potentially stored (estimated if using variably sized values).
+ virtual result<capacity_type> max_size() const noexcept = 0;
+ //! Sets the maximum number of key-values which could be potentially stored.
+ virtual result<void> max_size(capacity_type quota) noexcept = 0;
+ //! Returns the current number of key-values currently in the store.
+ virtual result<capacity_type> size() const noexcept = 0;
+ //! Returns the maximum number of bytes which could be potentially stored.
+ virtual result<capacity_type> max_bytes_stored() const noexcept = 0;
+ //! Sets the maximum number of bytes which could be potentially stored.
+ virtual result<void> max_bytes_stored(capacity_type quota) noexcept = 0;
+ //! Returns the current number of bytes stored.
+ virtual result<capacity_type> bytes_stored() const noexcept = 0;
+ //! Returns the maximum value size in bytes supported by this store.
+ virtual result<extent_type> max_value_size() const noexcept = 0;
+
+ //! Access the allocator used for this store.
+ allocator_type &allocator() noexcept { return _allocator; }
+ //! Access the allocator used for this store.
+ const allocator_type &allocator() const noexcept { return _allocator; }
+
+ //! Retrieves any MSB key index size, in bytes.
+ size_type key_index_size() const noexcept { return _key_index_size; }
+ /*! Sets a MSB key index size, in bytes. Some store implementations may only permit this if
+ the store is empty. Setting a MSB key index causes filtering and search operations based on the key
+ index to be constant rather than linear time.
+ */
+ virtual result<void> key_index_size(size_type bytes) noexcept = 0;
+
+ //! Clears the store, possibly more quickly than deleting every key. This call may be racy.
+ virtual result<void> clear() noexcept = 0;
+ //! The state type for performing a filtered match
+ using filter_state_type = uint64_t;
+ /*! Returns the first or next key in the store matching the given filter. This call is racy, and
+ filter processing may take as much time as iterating the entire contents of the store unless
+ the mask **exactly** matches any key index, in which case it shall be constant time. Note
+ that some hardware devices perform filter matching on-device, which does not change the
+ algorithmic complexity, but may benefit from greatly increased on-device bandwidth.
+
+ To begin a match, pass a default initialised `filter_state_type`. An error matching
+ `errc::no_such_file_or_directory` will be returned if no more keys match. The default
+ initialised mask and bits causes matching of all keys in the store.
+ */
+ virtual result<void> match(filter_state_type &state, key_type mask = {}, key_type bits = {}) noexcept = 0;
+
+ /*! Returns a handle type which gives access to a key's value. The lifetime of the
+ returned handle *may* pin the key's value at the time of retrieval if this store
+ has `features::stable_values`.
+
+ This function avoids reading any of the value where possible. It may internally
+ use memory maps, and thus return scatter buffers to elsewhere memory, same as
+ `llfio::mapped_file_handle`.
+
+ The ability to open a value for write access depends on the lack of `features::stable_values`,
+ or presence of `features::update_deltas`. Stable values without update deltas
+ will not permit the value to be opened for write access.
+
+ This call is not racy with respect to its value if `features::stable_values`.
+ Individual key opens are racy with respect to one another i.e. other threads
+ or processes may modify values concurrently, thus causing a sequence of key
+ opens to refer to disjoint moments in time. Use `snapshot` if you don't want
+ this.
+
+ \note The handle type returned implements `llfio::file_handle`, but may be a
+ lightweight synthetic handle with no kernel resources backing it. In particular,
+ not all operations may be implemented e.g. byte range locks.
+ */
+ virtual result<handle_type> open(key_type key, mode _mode = mode::read) noexcept = 0;
+ /*! Scatter reads some or all of a key's value into the supplied buffers,
+ possibly returning completely different buffers to memory elsewhere similar
+ to `llfio::mapped_file_handle`.
+
+ This call *may* be more efficient than `open()`, or it may be implemented entirely
+ in terms of `open()`. This call is unavoidably racy, and it may read different
+ values for a key each time it is called.
+ */
+ virtual io_result<buffers_type> read(io_request<buffers_type> reqs, key_type key, llfio::deadline d = llfio::deadline()) noexcept = 0;
+ /*! Gather writes some or all of a key's value from the supplied buffers,
+ returning buffers written. For stores with `features::stable_values` but without
+ `features::update_deltas`, the offset must be zero, and the gather list is assumed
+ to reflect the whole of the value to be written.
+
+ This call *may* be more efficient than `open()`, or it may be implemented entirely
+ in terms of `open()`. This call is unavoidably racy, and it may write into different
+ values for a key each time it is called.
+ */
+ virtual io_result<const_buffers_type> write(key_type key, io_request<const_buffers_type> reqs, llfio::deadline d = llfio::deadline()) noexcept = 0;
+
+
+ /*! Returns a snapshot of the key store's values at a single moment in time i.e. all the values
+ appear as-if in a single moment of time with no changes appearing to values after the snapshot.
+ In this sense, the snapshot is *atomic*. Values snapshotted are pinned to the moment of the
+ snapshot.
+
+ Some store implementations do not implement both `features::stable_values` and
+ `features::stable_keys`, if so then creating a snapshot must read all the values for all the
+ keys during snapshot creation (which can be more efficient than reading each key-value at a
+ time). Otherwise values are not read, rather a note is taken of their current state,
+ and values are only fetched on demand.
+
+ Some store implementations do not implement `features::stable_keys`, if so a snapshot's
+ list of keys may be affected by subsequent changes to the main store. In other words,
+ the keys in a snapshot may match those of the main store, only the values for what keys
+ are not deleted in the main store remain pinned.
+
+ Note that if a store does not have `features::history`, one may find that retrieving a
+ value from a snapshot may no longer be able to retrieve that value as it has since been
+ replaced. In this situation you will receive an error matching `errc::no_such_file_or_directory`.
+
+ It is never possible to read a value from a snapshot and get an updated value instead
+ of the snapshotted value. This is why not all stores implement `features::snapshot`, as
+ a store must implement some mechanism of knowing when a value has been updated since
+ it was pinned.
+
+ If a store implementation does not implement `features::atomic_snapshot`, this function returns
+ an error code comparing equal to `errc::operation_not_supported`.
+ */
+ virtual result<basic_key_value_store> snapshot() noexcept = 0;
+
+ class transaction;
+ /*! Begin a transaction on this key value store.
+
+ Transactions are a superset of snapshots in that they also allow the modification of
+ multiple items in a store in an atomic, all-or-nothing, operation. Reading values
+ marks that item as a *dependency* i.e. if that item's value is updated between the
+ moment of opening the transaction and committing the transaction, the transaction will abort.
+ You can mark values as dependencies without actually reading them using `dependencies()`.
+
+ If a store implementation does not implement `features::atomic_transactions`, this function returns
+ an error code comparing equal to `errc::operation_not_supported`.
+ */
+ virtual result<transaction> begin_transaction() noexcept = 0;
+};
+
+class basic_key_value_store::transaction : public basic_key_value_store
+{
+public:
+ //! Indicate that there is a dependency on the snapshotted values of these keys without fetching any values.
+ virtual result<void> dependencies(span<key_type> keys) noexcept = 0;
+ //! Try to commit this transaction, failing if any dependencies have been updated.
+ virtual result<void> commit() noexcept = 0;
+};
+
+/*! \brief Create a new key value store, or open or truncate an existing key value store, using the given URI.
+
+Query the system and/or process registry of key value store providers for an implementation capable of using
+a store at `uri` with the specified key size and features. Guaranteed built-in providers are:
+
+- `file://` based providers:
+
+ - `directory_simple_legacy`: A very simple FAT-compatible file based store based on turning the key into
+ hexadecimal, chopping it into chunks of four (`0x0000 - 0xffff`) such that a hex key `0x01234567890abcdef`'s
+ value would be stored in the path `store/01234/5678/90ab/cdef`. Key updates are first written into
+ `stage/01234/5678/90ab/cdef`, then once fully written they are atomically renamed into `store`.
+ This store implements only `features::stable_values`, and is very widely compatible, including with
+ networked drives. Its major downside is potential allocation wastage for many small sized values too
+ big to be stored in the inode, but substantially below the allocation granularity.
+
+ - `directory_modern`: A much more complex file based store which implements `features::history`,
+ `features::stable_values`, `features::stable_keys`, `features::update_deltas`, `features::atomic_snapshots`
+ and `features::atomic_transactions`.
+
+ Inside the file `monotonic` is a 64 bit monotonic count representing time. Updated values are kept at
+ `store/01234/5678/90ab/cdef/values/count`, where the monotonic count is atomically incremented every update.
+ `store/01234/5678/90ab/cdef/deltas` is where 4Kb update deltas are kept if the value is larger than 64Kb.
+ `store/01234/5678/90ab/cdef/latest` is the count of the latest value, its size, and if deltas need to be
+ applied.
+
+ - `single_file`: Initially all keys and values are kept in memory. Upon first URI fetch after first creation,
+ a single file is created comprising all the keys and values associatively mapped. This single file can be
+ then be mapped as shared memory into multiple processes, thus enabling multiple concurrent C++ programs to
+ collaborate on a shared store where only values are mutable (and not resizeable). Only one process may pin
+ a value at a time concurrently.
+
+URIs may of course specify other sources of key value store than on the file system. Third parties may have
+registered system-wide implementations available to all programs. The local process may have registered
+additional implementations as well.
+*/
+LLFIO_HEADERS_ONLY_FUNC_SPEC result<basic_key_value_store> create_kvstore(const basic_key_value_store::uri_type &uri, //
+ basic_key_value_store::size_type key_size, //
+ basic_key_value_store::features _features, //
+ basic_key_value_store::mode _mode = basic_key_value_store::mode::write, //
+ basic_key_value_store::creation _creation = basic_key_value_store::creation::if_needed, //
+ basic_key_value_store::caching _caching = basic_key_value_store::caching::all);
+/*! \brief Open an existing key value store. A convenience overload for `create_kvstore()`.
+*/
+LLFIO_HEADERS_ONLY_FUNC_SPEC result<basic_key_value_store> open_kvstore(const basic_key_value_store::uri_type &uri, //
+ basic_key_value_store::mode _mode = basic_key_value_store::mode::write, //
+ basic_key_value_store::caching _caching = basic_key_value_store::caching::all);
+
+/*! \brief Fill an array with information about all the key value stores available to this process.
+*/
+LLFIO_HEADERS_ONLY_FUNC_SPEC result<span<basic_key_value_store_info>> enumerate_kvstores(span<basic_key_value_store_info> lst);
+
+#if 0
+/*! \brief A possibly hardware-implemented basic key-value store.
+\tparam KeyType An integer type used to uniquely identify the mapped value. Ideally keep
+these on a DMA boundary (`std::hardware_constructive_interference_size`).
+\tparam MappedType A type for which `traits::is_attachable` or `traits::is_detachable`
+is true, or else a span of such type to indicate the mapped type may be of variable size.
+If `MappedType` is of fixed size, and if
+`sizeof(KeyType) + sizeof(MappedType) <= std::hardware_constructive_interference_size`,
+a much more efficient implementation may be used. Ideally keep the address of the mapped
+value type on a DMA boundary (`std::hardware_constructive_interference_size`).
+*/
+LLFIO_TEMPLATE(class KeyType, class MappedType = span<byte>)
+LLFIO_TREQUIRES(LLFIO_TPRED(std::is_integral<KeyType>::value //
+ && (traits::is_attachable_v<MappedType> || traits::is_detachable_v<MappedType> //
+ || traits::is_attachable_v<typename detail::extract_span_type<MappedType>::type> || traits::is_detachable_v<typename detail::extract_span_type<MappedType>::type>) ))
+class key_value_store : public basic_key_value_store
+{
+public:
+ //! The key type of this store
+ using key_type = KeyType;
+ //! The value type of this store
+ using value_type = MappedType;
+
+ //! Key iterator
+ class const_iterator
+ {
+ basic_key_value_store *_parent;
+ key_type _mask, _bits;
+
+ public:
+ using value_type = key_type;
+
+ const_iterator();
+ value_type operator*() const;
+ const_iterator operator++();
+ };
+ //! Key iterator
+ using iterator = const_iterator;
+
+ /*! Returns the "first" key in the store matching the given filter. This call is racy, and
+ filter processing may take as much time as iterating the entire contents of the store unless
+ the mask **exactly** matches any key index, in which case it shall be constant time. Note
+ that some hardware devices perform filter matching on-device, which does not change the
+ algorithmic complexity, but may benefit from greatly increased on-device bandwidth.
+ */
+ virtual result<const_iterator> begin(key_type mask = {}, key_type bits = {}) noexcept = 0;
+ //! Returns a null const iterator with which to detect the end of an iteration.
+ const_iterator end() noexcept;
+};
+
+/*! \brief Returns a reference to this program's address space key value store.
+
+All running C++ programs have an address space key value store which comprises the binary executable
+code and memory mapped sections which form that C++ program. The keys are pointer sized, and are the
+addresses in memory to which that section is mapped into memory.
+
+This key value store is synthesised from live kernel data, and thus will change over time.
+*/
+LLFIO_HEADERS_ONLY_FUNC_SPEC const key_value_store<void *, llfio::section_handle> &address_space();
+
+#endif
+
+
+KVSTORE_V1_NAMESPACE_END
+
+#endif
diff --git a/include/llfio/revision.hpp b/include/llfio/revision.hpp
index cf755644..cccb3941 100644
--- a/include/llfio/revision.hpp
+++ b/include/llfio/revision.hpp
@@ -1,4 +1,4 @@
// Note the second line of this file must ALWAYS be the git SHA, third line ALWAYS the git SHA update time
-#define LLFIO_PREVIOUS_COMMIT_REF a5e0bb3b263de680ce1b05504042f81714e8ac15
-#define LLFIO_PREVIOUS_COMMIT_DATE "2018-11-12 09:46:38 +00:00"
-#define LLFIO_PREVIOUS_COMMIT_UNIQUE a5e0bb3b
+#define LLFIO_PREVIOUS_COMMIT_REF 8953c3a9fcd4b54ab6cefb26567a562cadcb1c36
+#define LLFIO_PREVIOUS_COMMIT_DATE "2018-11-17 16:23:14 +00:00"
+#define LLFIO_PREVIOUS_COMMIT_UNIQUE 8953c3a9
diff --git a/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp b/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp
index c15962c9..56553f4f 100644
--- a/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp
+++ b/include/llfio/v2.0/algorithm/handle_adapter/combining.hpp
@@ -174,17 +174,23 @@ namespace algorithm
bytes += b.size();
}
// If less than page size, use stack, else use free pages
- buffer_type buffers[2] = {{(byte *) ((bytes <= utils::page_size()) ? alloca(bytes) : nullptr), bytes}, {(byte *) ((_have_source && bytes <= utils::page_size()) ? alloca(bytes) : nullptr), bytes}};
+ buffer_type buffers[2] = {{(byte *) ((bytes <= utils::page_size()) ? alloca(bytes + 64) : nullptr), bytes}, {(byte *) ((_have_source && bytes <= utils::page_size()) ? alloca(bytes + 64) : nullptr), bytes}};
map_handle buffersh;
- if(buffers[0].data() == nullptr)
+ if(buffers[0].data() != nullptr)
{
- bytes = (bytes + alignof(std::max_align_t) - 1) & ~(alignof(std::max_align_t) - 1);
- OUTCOME_TRY(_, map_handle::map(bytes * (1 + _have_source)));
+ // Adjust to 64 byte multiple
+ buffers[0] = buffer_type((byte *) (((uintptr_t) buffers[0].data() + 63) & ~63), bytes);
+ buffers[1] = buffer_type((byte *) (((uintptr_t) buffers[1].data() + 63) & ~63), bytes);
+ }
+ else
+ {
+ auto _bytes = (bytes + 63) & ~63;
+ OUTCOME_TRY(_, map_handle::map(_bytes * (1 + _have_source)));
buffersh = std::move(_);
buffers[0] = buffer_type{buffersh.address(), bytes};
if(_have_source)
{
- buffers[1] = buffer_type{buffersh.address() + bytes, bytes};
+ buffers[1] = buffer_type{buffersh.address() + _bytes, bytes};
}
}
@@ -245,17 +251,23 @@ namespace algorithm
bytes += b.size();
}
// If less than page size, use stack, else use free pages
- buffer_type buffers[2] = {{(byte *) ((bytes <= utils::page_size()) ? alloca(bytes) : nullptr), bytes}, {(byte *) ((_have_source && bytes <= utils::page_size()) ? alloca(bytes) : nullptr), bytes}};
+ buffer_type buffers[2] = {{(byte *) ((bytes <= utils::page_size()) ? alloca(bytes + 64) : nullptr), bytes}, {(byte *) ((_have_source && bytes <= utils::page_size()) ? alloca(bytes + 64) : nullptr), bytes}};
map_handle buffersh;
- if(buffers[0].data() == nullptr)
+ if(buffers[0].data() != nullptr)
+ {
+ // Adjust to 64 byte multiple
+ buffers[0] = buffer_type((byte *) (((uintptr_t) buffers[0].data() + 63) & ~63), bytes);
+ buffers[1] = buffer_type((byte *) (((uintptr_t) buffers[1].data() + 63) & ~63), bytes);
+ }
+ else
{
- bytes = (bytes + alignof(std::max_align_t) - 1) & ~(alignof(std::max_align_t) - 1);
- OUTCOME_TRY(_, map_handle::map(bytes * (1 + _have_source)));
+ auto _bytes = (bytes + 63) & ~63;
+ OUTCOME_TRY(_, map_handle::map(_bytes * (1 + _have_source)));
buffersh = std::move(_);
buffers[0] = buffer_type{buffers[0].data(), bytes};
if(_have_source)
{
- buffers[1] = buffer_type{buffers[1].data() + bytes, bytes};
+ buffers[1] = buffer_type{buffers[1].data() + _bytes, bytes};
}
}
buffer_type tempbuffers[2] = {buffers[0], buffers[1]};
diff --git a/include/llfio/v2.0/algorithm/handle_adapter/xor.hpp b/include/llfio/v2.0/algorithm/handle_adapter/xor.hpp
index d0d0efa6..be20cda9 100644
--- a/include/llfio/v2.0/algorithm/handle_adapter/xor.hpp
+++ b/include/llfio/v2.0/algorithm/handle_adapter/xor.hpp
@@ -46,40 +46,60 @@ namespace algorithm
static result<buffer_type> do_read(buffer_type out, buffer_type t, buffer_type s) noexcept
{
- // out is the constraint here
+ // Clamp out to smallest of inputs
+ if(t.size() < out.size())
+ {
+ out = buffer_type(out.data(), t.size());
+ }
+ if(s.size() < out.size())
+ {
+ out = buffer_type(out.data(), s.size());
+ }
for(size_t idx = 0; idx < out.size();)
{
- // If addresses happen to align, we can do this fast
- uintptr_t remain1 = ((uintptr_t) out.data() + idx) & 63;
- uintptr_t remain2 = ((uintptr_t) t.data() + idx) & 63;
- uintptr_t remain3 = ((uintptr_t) s.data() + idx) & 63;
- if(remain1 == remain2 && remain2 == remain3)
{
- while(out.size() - idx >= 64 && remain1 == 0)
+ // If addresses happen to align to SIMD, hint very strongly to use SIMD
+ uintptr_t remain1 = ((uintptr_t) out.data() + idx) & 63;
+ uintptr_t remain2 = ((uintptr_t) t.data() + idx) & 63;
+ uintptr_t remain3 = ((uintptr_t) s.data() + idx) & 63;
+ if(remain1 == 0 && remain2 == 0 && remain3 == 0)
{
- uint64_t *a = (uint64_t *) (out.data() + idx);
- const uint64_t *b = (const uint64_t *) (t.data() + idx);
- const uint64_t *c = (const uint64_t *) (s.data() + idx);
- a[0] = b[0] ^ c[0];
- a[1] = b[1] ^ c[1];
- a[2] = b[2] ^ c[2];
- a[3] = b[3] ^ c[3];
- a[4] = b[4] ^ c[4];
- a[5] = b[5] ^ c[5];
- a[6] = b[6] ^ c[6];
- a[7] = b[7] ^ c[7];
- idx += 64;
+ while(out.size() - idx >= 64)
+ {
+ uint64_t *a = (uint64_t *) (out.data() + idx);
+ const uint64_t *b = (const uint64_t *) (t.data() + idx);
+ const uint64_t *c = (const uint64_t *) (s.data() + idx);
+ a[0] = b[0] ^ c[0];
+ a[1] = b[1] ^ c[1];
+ a[2] = b[2] ^ c[2];
+ a[3] = b[3] ^ c[3];
+ a[4] = b[4] ^ c[4];
+ a[5] = b[5] ^ c[5];
+ a[6] = b[6] ^ c[6];
+ a[7] = b[7] ^ c[7];
+ idx += 64;
+ }
}
- while(out.size() - idx >= 8 && (remain1 & 7) == 0)
+ }
+ {
+ // If addresses happen to align to registers, use that
+ uintptr_t remain1 = ((uintptr_t) out.data() + idx) & (sizeof(uintptr_t) - 1);
+ uintptr_t remain2 = ((uintptr_t) t.data() + idx) & (sizeof(uintptr_t) - 1);
+ uintptr_t remain3 = ((uintptr_t) s.data() + idx) & (sizeof(uintptr_t) - 1);
+ if(remain1 == 0 && remain2 == 0 && remain3 == 0)
{
- uint64_t *a = (uint64_t *) (out.data() + idx);
- const uint64_t *b = (const uint64_t *) (t.data() + idx);
- const uint64_t *c = (const uint64_t *) (s.data() + idx);
- a[0] = b[0] ^ c[0];
- idx += 8;
+ while(out.size() - idx >= sizeof(uintptr_t))
+ {
+ uintptr_t *a = (uintptr_t *) (out.data() + idx);
+ const uintptr_t *b = (const uintptr_t *) (t.data() + idx);
+ const uintptr_t *c = (const uintptr_t *) (s.data() + idx);
+ a[0] = b[0] ^ c[0];
+ idx += sizeof(uintptr_t);
+ }
}
}
- if(out.size() - idx > 0)
+ // Otherwise byte work
+ while(out.size() - idx > 0)
{
uint8_t *a = (uint8_t *) (out.data() + idx);
const uint8_t *b = (const uint8_t *) (t.data() + idx);
@@ -96,37 +116,49 @@ namespace algorithm
// in is the constraint here
for(size_t idx = 0; idx < in.size();)
{
- // If addresses happen to align, we can do this fast
- uintptr_t remain1 = ((uintptr_t) t.data() + idx) & 63;
- uintptr_t remain2 = ((uintptr_t) s.data() + idx) & 63;
- uintptr_t remain3 = ((uintptr_t) in.data() + idx) & 63;
- if(remain1 == remain2 && remain2 == remain3)
{
- while(in.size() - idx >= 64 && remain1 == 0)
+ // If addresses happen to align to SIMD, hint very strongly to use SIMD
+ uintptr_t remain1 = ((uintptr_t) t.data() + idx) & 63;
+ uintptr_t remain2 = ((uintptr_t) s.data() + idx) & 63;
+ uintptr_t remain3 = ((uintptr_t) in.data() + idx) & 63;
+ if(remain1 == 0 && remain2 == 0 && remain3 == 0)
{
- uint64_t *a = (uint64_t *) (t.data() + idx);
- const uint64_t *b = (const uint64_t *) (s.data() + idx);
- const uint64_t *c = (const uint64_t *) (in.data() + idx);
- a[0] = b[0] ^ c[0];
- a[1] = b[1] ^ c[1];
- a[2] = b[2] ^ c[2];
- a[3] = b[3] ^ c[3];
- a[4] = b[4] ^ c[4];
- a[5] = b[5] ^ c[5];
- a[6] = b[6] ^ c[6];
- a[7] = b[7] ^ c[7];
- idx += 64;
+ while(in.size() - idx >= 64)
+ {
+ uint64_t *a = (uint64_t *) (t.data() + idx);
+ const uint64_t *b = (const uint64_t *) (s.data() + idx);
+ const uint64_t *c = (const uint64_t *) (in.data() + idx);
+ a[0] = b[0] ^ c[0];
+ a[1] = b[1] ^ c[1];
+ a[2] = b[2] ^ c[2];
+ a[3] = b[3] ^ c[3];
+ a[4] = b[4] ^ c[4];
+ a[5] = b[5] ^ c[5];
+ a[6] = b[6] ^ c[6];
+ a[7] = b[7] ^ c[7];
+ idx += 64;
+ }
}
- while(in.size() - idx >= 8 && (remain1 & 7) == 0)
+ }
+ {
+ // If addresses happen to align to registers, use that
+ uintptr_t remain1 = ((uintptr_t) t.data() + idx) & (sizeof(uintptr_t) - 1);
+ uintptr_t remain2 = ((uintptr_t) s.data() + idx) & (sizeof(uintptr_t) - 1);
+ uintptr_t remain3 = ((uintptr_t) in.data() + idx) & (sizeof(uintptr_t) - 1);
+ if(remain1 == 0 && remain2 == 0 && remain3 == 0)
{
- uint64_t *a = (uint64_t *) (t.data() + idx);
- const uint64_t *b = (const uint64_t *) (s.data() + idx);
- const uint64_t *c = (const uint64_t *) (in.data() + idx);
- a[0] = b[0] ^ c[0];
- idx += 8;
+ while(in.size() - idx >= sizeof(uintptr_t))
+ {
+ uintptr_t *a = (uintptr_t *) (t.data() + idx);
+ const uintptr_t *b = (const uintptr_t *) (s.data() + idx);
+ const uintptr_t *c = (const uintptr_t *) (in.data() + idx);
+ a[0] = b[0] ^ c[0];
+ idx += sizeof(uintptr_t);
+ }
}
}
- if(in.size() - idx > 0)
+ // Otherwise byte work
+ while(in.size() - idx > 0)
{
uint8_t *a = (uint8_t *) (t.data() + idx);
const uint8_t *b = (const uint8_t *) (s.data() + idx);
diff --git a/include/llfio/v2.0/async_file_handle.hpp b/include/llfio/v2.0/async_file_handle.hpp
index 9022707f..88dbcf14 100644
--- a/include/llfio/v2.0/async_file_handle.hpp
+++ b/include/llfio/v2.0/async_file_handle.hpp
@@ -22,14 +22,14 @@ Distributed under the Boost Software License, Version 1.0.
http://www.boost.org/LICENSE_1_0.txt)
*/
+#ifndef LLFIO_ASYNC_FILE_HANDLE_H
+#define LLFIO_ASYNC_FILE_HANDLE_H
+
#include "file_handle.hpp"
#include "io_service.hpp"
//! \file async_file_handle.hpp Provides async_file_handle
-#ifndef LLFIO_ASYNC_FILE_HANDLE_H
-#define LLFIO_ASYNC_FILE_HANDLE_H
-
LLFIO_V2_NAMESPACE_EXPORT_BEGIN
namespace detail
diff --git a/test/tests/handle_adapter_xor.cpp b/test/tests/handle_adapter_xor.cpp
index b05b48e2..de1d1d91 100644
--- a/test/tests/handle_adapter_xor.cpp
+++ b/test/tests/handle_adapter_xor.cpp
@@ -46,13 +46,13 @@ static inline void TestXorHandleAdapterWorks()
// Make temp inode have same contents as random file
h2.truncate(testbytes).value();
h1.read(0, {{h2.address(), testbytes}}).value();
-
+
// Configure the XOR handle adapter for the two handles
algorithm::xor_handle_adapter<mapped_file_handle, fast_random_file_handle> h(&h2, &h1);
BOOST_CHECK(h.is_readable());
BOOST_CHECK(h.is_writable());
BOOST_CHECK(h.maximum_extent().value() == testbytes);
-
+
// Read the whole of the XOR handle adapter, it should return all bits zero
mapped<byte> tempbuffer(testbytes);
BOOST_CHECK(h.read(0, {{tempbuffer.data(), tempbuffer.size()}}).value() == testbytes);
@@ -61,14 +61,15 @@ static inline void TestXorHandleAdapterWorks()
uint64_t *p = (uint64_t *) tempbuffer.data();
BOOST_CHECK(p[n] == 0);
}
+ memset(tempbuffer.data(), 0, tempbuffer.size());
-#if 0
- // Now make sure that random read offsets and lengths match exactly
+ // Now make sure that random read offsets and lengths are also all bits zero
small_prng rand;
- for(size_t n = 0; n < 10000; n++)
+ for(size_t i = 0; i < 10000; i++)
{
- byte buffer[512];
- size_t offset = rand() % testbytes, length = rand() % 512;
+ byte buffer[8192];
+ size_t offset = rand() % testbytes, length = rand() % 8192;
+ memset(buffer, 1, 8192);
auto bytesread = h.read(offset, {{buffer, length}}).value();
if(offset + length > testbytes)
{
@@ -78,9 +79,20 @@ static inline void TestXorHandleAdapterWorks()
{
BOOST_CHECK(bytesread == length);
}
- BOOST_CHECK(!memcmp(buffer, store.begin() + offset, bytesread));
+ size_t n = 0;
+ uint8_t *p = (uint8_t *) tempbuffer.data();
+ for(; n < bytesread; n += 8)
+ {
+ uint64_t *_p = (uint64_t *) (&p[n]);
+ BOOST_CHECK(_p[0] == 0);
+ }
+ for(; n < bytesread; n++)
+ {
+ BOOST_CHECK(p[n] == 0);
+ }
}
-#endif
+
+ // TODO Test writing works
}
#if 0
@@ -134,4 +146,4 @@ static inline void TestFastRandomFileHandlePerformance()
#endif
KERNELTEST_TEST_KERNEL(integration, llfio, xor_handle_adapter, works, "Tests that the xor handle adapter works as expected", TestXorHandleAdapterWorks())
-//KERNELTEST_TEST_KERNEL(integration, llfio, fast_random_file_handle, performance, "Tests the performance of the fast random file handle", TestFastRandomFileHandlePerformance())
+// KERNELTEST_TEST_KERNEL(integration, llfio, fast_random_file_handle, performance, "Tests the performance of the fast random file handle", TestFastRandomFileHandlePerformance())