Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/windirstat/llfio.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNiall Douglas (s [underscore] sourceforge {at} nedprod [dot] com) <spamtrap@nedprod.com>2016-09-12 10:22:55 +0300
committerNiall Douglas (s [underscore] sourceforge {at} nedprod [dot] com) <spamtrap@nedprod.com>2016-09-12 10:22:55 +0300
commiteca88f4db5dc8638fc7283a877a4befe31929b02 (patch)
treee80224cfa69814b503042cee199019c7cdf4f322
parent47ce04266cfa69f094f63522e32311392bb557d8 (diff)
Minor fixes from writing CppCon slides
-rw-r--r--Readme.md4
-rw-r--r--graphs/benchmark_locking 2x i5 M540 @ 2.53Ghz Win10 4.125Gb bandwidth.xlsxbin48211 -> 48511 bytes
-rw-r--r--graphs/benchmark_locking 4x i7-3770K @ 3.50Ghz Win10 20.075Gb bandwidth.xlsxbin62444 -> 62419 bytes
-rw-r--r--graphs/benchmark_locking core count comparison.xlsxbin0 -> 91730 bytes
-rw-r--r--include/boost/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp9
5 files changed, 8 insertions, 5 deletions
diff --git a/Readme.md b/Readme.md
index d1d8b187..ef3ef88c 100644
--- a/Readme.md
+++ b/Readme.md
@@ -7,13 +7,13 @@ Tarballs of source and prebuilt binaries with all unit tests passing: https://de
CppCon 2016 todos:
-- Fix remaining test failures.
+- All time based kernel tests need to use soak test based API.
- Raise the sanitisers on per-commit CI via ctest.
- Rename all ParseProjectVersionFromHpp etc to parse_project_version_from_hpp etc
- DLL library edition appears to not be encoding extended error code detail because
it's not sharing a single ringbuffer_log. Hard to fix given Outcome could be being
used by multiple libraries as a header only library, need to figure out some global
-fix e.g. named shared memory.
+fix e.g. named shared memory. Make log disc stored while we are at it.
diff --git a/graphs/benchmark_locking 2x i5 M540 @ 2.53Ghz Win10 4.125Gb bandwidth.xlsx b/graphs/benchmark_locking 2x i5 M540 @ 2.53Ghz Win10 4.125Gb bandwidth.xlsx
index 52cc7b05..d466fa3a 100644
--- a/graphs/benchmark_locking 2x i5 M540 @ 2.53Ghz Win10 4.125Gb bandwidth.xlsx
+++ b/graphs/benchmark_locking 2x i5 M540 @ 2.53Ghz Win10 4.125Gb bandwidth.xlsx
Binary files differ
diff --git a/graphs/benchmark_locking 4x i7-3770K @ 3.50Ghz Win10 20.075Gb bandwidth.xlsx b/graphs/benchmark_locking 4x i7-3770K @ 3.50Ghz Win10 20.075Gb bandwidth.xlsx
index e47578da..25dd106f 100644
--- a/graphs/benchmark_locking 4x i7-3770K @ 3.50Ghz Win10 20.075Gb bandwidth.xlsx
+++ b/graphs/benchmark_locking 4x i7-3770K @ 3.50Ghz Win10 20.075Gb bandwidth.xlsx
Binary files differ
diff --git a/graphs/benchmark_locking core count comparison.xlsx b/graphs/benchmark_locking core count comparison.xlsx
new file mode 100644
index 00000000..7e3ad378
--- /dev/null
+++ b/graphs/benchmark_locking core count comparison.xlsx
Binary files differ
diff --git a/include/boost/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp b/include/boost/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
index d4217de1..83a974c8 100644
--- a/include/boost/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
+++ b/include/boost/afio/v2.0/algorithm/shared_fs_mutex/memory_map.hpp
@@ -77,7 +77,7 @@ namespace algorithm
- In the lightly contended case, an order of magnitude faster than any other `shared_fs_mutex` algorithm.
Caveats:
- - A transition between mapped and fallback locks will block forever until all current mapped memory users
+ - A transition between mapped and fallback locks will not complete until all current mapped memory users
have realised the transition has happened. This can take a very significant amount of time if a lock user
does not regularly lock its locks.
\todo It should be possible to auto early out from a memory_map transition by scanning the memory map for any
@@ -263,8 +263,8 @@ namespace algorithm
(void) _;
}
// Convert exclusive whole file lock into lock in use
- BOOST_OUTCOME_FILTER_ERROR(lockinuse2, ret.lock(_lockinuseoffset, 1, false));
BOOST_OUTCOME_FILTER_ERROR(mapinuse2, ret.lock(_mapinuseoffset, 1, false));
+ BOOST_OUTCOME_FILTER_ERROR(lockinuse2, ret.lock(_lockinuseoffset, 1, false));
mapinuse = std::move(mapinuse2);
lockinuse = std::move(lockinuse2);
}
@@ -289,9 +289,10 @@ namespace algorithm
unsigned exclusive : 1;
};
// Create a cache of entities to their indices, eliding collisions where necessary
- static span<_entity_idx> _hash_entities(_entity_idx *entity_to_idx, entities_type &entities)
+ static span<_entity_idx> _hash_entities(_entity_idx alignas(16) * entity_to_idx, entities_type &entities)
{
_entity_idx *ep = entity_to_idx;
+ //! \todo memory_map::_hash_entities needs to hash x16, x8 and x4 at a time to encourage auto vectorisation
for(size_t n = 0; n < entities.size(); n++)
{
ep->value = hasher_type()(entities[n].value) % _container_entries;
@@ -340,6 +341,7 @@ namespace algorithm
else
end_utc = (d).to_time_point();
}
+ // alloca() always returns 16 byte aligned addresses
span<_entity_idx> entity_to_idx(_hash_entities((_entity_idx *) alloca(sizeof(_entity_idx) * out.entities.size()), out.entities));
_hash_index_type &index = _index();
// Fire this if an error occurs
@@ -402,6 +404,7 @@ namespace algorithm
virtual void unlock(entities_type entities, unsigned long long hint) noexcept override final
{
BOOST_AFIO_LOG_FUNCTION_CALL(this);
+ //! \todo memory_map::unlock() degrade is racy when single instance being used by multiple threads
if(_have_degraded)
{
if(_fallbacklock)