diff options
author | Bartosz Taudul <wolf.pld@gmail.com> | 2019-08-13 01:56:57 +0300 |
---|---|---|
committer | Bartosz Taudul <wolf.pld@gmail.com> | 2019-08-13 14:10:37 +0300 |
commit | 9417ad994de87e134ab5a558997e23d67cb73d0f (patch) | |
tree | 25a52172f8ae6fe2a35e66f3b2d9b9243b5bb019 /server/TracyWorker.cpp | |
parent | 1c937ad9bb9bf9d3fcb4daebc444a15ab8a36fc3 (diff) |
Save/load context switch data.
Diffstat (limited to 'server/TracyWorker.cpp')
-rw-r--r-- | server/TracyWorker.cpp | 76 |
1 files changed, 75 insertions, 1 deletions
diff --git a/server/TracyWorker.cpp b/server/TracyWorker.cpp index 8acf780e..6e778e54 100644 --- a/server/TracyWorker.cpp +++ b/server/TracyWorker.cpp @@ -304,10 +304,14 @@ Worker::Worker( FileRead& f, EventType::Type eventMask ) { s_loadProgress.total.store( 8, std::memory_order_relaxed ); } - else + else if( fileVer <= FileVersion( 0, 5, 0 ) ) { s_loadProgress.total.store( 9, std::memory_order_relaxed ); } + else + { + s_loadProgress.total.store( 10, std::memory_order_relaxed ); + } s_loadProgress.subTotal.store( 0, std::memory_order_relaxed ); s_loadProgress.progress.store( LoadProgress::Initialization, std::memory_order_relaxed ); @@ -1141,6 +1145,50 @@ Worker::Worker( FileRead& f, EventType::Type eventMask ) } } + if( fileVer >= FileVersion( 0, 5, 1 ) ) + { + s_loadProgress.subTotal.store( 0, std::memory_order_relaxed ); + s_loadProgress.progress.store( LoadProgress::ContextSwitches, std::memory_order_relaxed ); + + if( eventMask & EventType::ContextSwitches ) + { + f.Read( sz ); + s_loadProgress.subTotal.store( sz, std::memory_order_relaxed ); + m_data.ctxSwitch.reserve( sz ); + for( uint64_t i=0; i<sz; i++ ) + { + s_loadProgress.subProgress.store( i, std::memory_order_relaxed ); + uint64_t thread, csz; + f.Read2( thread, csz ); + auto data = m_slab.AllocInit<ContextSwitch>(); + data->v.reserve_exact( csz, m_slab ); + int64_t refTime = 0; + auto ptr = data->v.data(); + for( uint64_t j=0; j<csz; j++ ) + { + ptr->start = ReadTimeOffset( f, refTime ); + ptr->end = ReadTimeOffset( f, refTime ); + f.Read( &ptr->cpu, sizeof( ptr->cpu ) + sizeof( ptr->reason ) + sizeof( ptr->state ) ); + ptr++; + } + m_data.ctxSwitch.emplace( thread, data ); + } + } + else + { + f.Read( sz ); + s_loadProgress.subTotal.store( sz, std::memory_order_relaxed ); + for( uint64_t i=0; i<sz; i++ ) + { + s_loadProgress.subProgress.store( i, std::memory_order_relaxed ); + f.Skip( sizeof( uint64_t ) ); + uint64_t csz; + f.Read( csz ); + f.Skip( csz * sizeof( ContextSwitchData ) ); + } + } + } + s_loadProgress.total.store( 0, std::memory_order_relaxed ); m_loadTime = std::chrono::duration_cast<std::chrono::nanoseconds>( std::chrono::high_resolution_clock::now() - loadStart ).count(); @@ -4426,6 +4474,32 @@ void Worker::Write( FileWrite& f ) const auto image = UnpackFrameImage( *fi ); f.Write( image, fi->w * fi->h / 2 ); } + + // Only save context switches relevant to active threads. + std::vector<flat_hash_map<uint64_t, ContextSwitch*, nohash<uint64_t>>::const_iterator> ctxValid; + ctxValid.reserve( m_data.ctxSwitch.size() ); + for( auto it = m_data.ctxSwitch.begin(); it != m_data.ctxSwitch.end(); ++it ) + { + if( m_data.threadMap.find( it->first ) != m_data.threadMap.end() ) + { + ctxValid.emplace_back( it ); + } + } + sz = ctxValid.size(); + f.Write( &sz, sizeof( sz ) ); + for( auto& ctx : ctxValid ) + { + f.Write( &ctx->first, sizeof( ctx->first ) ); + sz = ctx->second->v.size(); + f.Write( &sz, sizeof( sz ) ); + int64_t refTime = 0; + for( auto& cs : ctx->second->v ) + { + WriteTimeOffset( f, refTime, cs.start ); + WriteTimeOffset( f, refTime, cs.end ); + f.Write( &cs.cpu, sizeof( cs.cpu ) + sizeof( cs.reason ) + sizeof( cs.state ) ); + } + } } void Worker::WriteTimeline( FileWrite& f, const Vector<ZoneEvent*>& vec, int64_t& refTime ) |