Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLukas Stockner <lukas.stockner@freenet.de>2018-11-29 04:06:30 +0300
committerLukas Stockner <lukas.stockner@freenet.de>2018-11-29 04:45:24 +0300
commit7fa6f72084b1364cddfbef4f06bbb244210d6967 (patch)
treeae4f682248bd5ba4f716ff60c6dbd67c5684b3d2 /intern/cycles/util
parentfb057153b05555606d801d1e942113d40ec15cec (diff)
Cycles: Add sample-based runtime profiler that measures time spent in various parts of the CPU kernel
This commit adds a sample-based profiler that runs during CPU rendering and collects statistics on time spent in different parts of the kernel (ray intersection, shader evaluation etc.) as well as time spent per material and object. The results are currently not exposed in the user interface or per Python yet, to see the stats on the console pass the "--cycles-print-stats" argument to Cycles (e.g. "./blender -- --cycles-print-stats"). Unfortunately, there is no clear way to extend this functionality to CUDA or OpenCL, so it is CPU-only for now. Reviewers: brecht, sergey, swerner Reviewed By: brecht, swerner Differential Revision: https://developer.blender.org/D3892
Diffstat (limited to 'intern/cycles/util')
-rw-r--r--intern/cycles/util/CMakeLists.txt2
-rw-r--r--intern/cycles/util/util_profiling.cpp178
-rw-r--r--intern/cycles/util/util_profiling.h175
-rw-r--r--intern/cycles/util/util_stats.h3
4 files changed, 358 insertions, 0 deletions
diff --git a/intern/cycles/util/CMakeLists.txt b/intern/cycles/util/CMakeLists.txt
index 77d47984ee7..92dfc9fa85d 100644
--- a/intern/cycles/util/CMakeLists.txt
+++ b/intern/cycles/util/CMakeLists.txt
@@ -17,6 +17,7 @@ set(SRC
util_md5.cpp
util_murmurhash.cpp
util_path.cpp
+ util_profiling.cpp
util_string.cpp
util_simd.cpp
util_system.cpp
@@ -71,6 +72,7 @@ set(SRC_HEADERS
util_optimization.h
util_param.h
util_path.h
+ util_profiling.h
util_progress.h
util_projection.h
util_queue.h
diff --git a/intern/cycles/util/util_profiling.cpp b/intern/cycles/util/util_profiling.cpp
new file mode 100644
index 00000000000..30aaef69310
--- /dev/null
+++ b/intern/cycles/util/util_profiling.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2011-2018 Blender Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "util/util_algorithm.h"
+#include "util/util_profiling.h"
+#include "util/util_set.h"
+
+CCL_NAMESPACE_BEGIN
+
+Profiler::Profiler()
+ : do_stop_worker(true), worker(NULL)
+{
+}
+
+Profiler::~Profiler()
+{
+ assert(worker == NULL);
+}
+
+void Profiler::run()
+{
+ uint64_t updates = 0;
+ auto start_time = std::chrono::system_clock::now();
+ while(!do_stop_worker) {
+ thread_scoped_lock lock(mutex);
+ foreach(ProfilingState *state, states) {
+ uint32_t cur_event = state->event;
+ int32_t cur_shader = state->shader;
+ int32_t cur_object = state->object;
+
+ /* The state reads/writes should be atomic, but just to be sure
+ * check the values for validity anyways. */
+ if(cur_event < PROFILING_NUM_EVENTS) {
+ event_samples[cur_event]++;
+ }
+
+ if(cur_shader >= 0 && cur_shader < shader_samples.size()) {
+ /* Only consider the active shader during events whose runtime significantly depends on it. */
+ if(((cur_event >= PROFILING_SHADER_EVAL ) && (cur_event <= PROFILING_SUBSURFACE)) ||
+ ((cur_event >= PROFILING_CLOSURE_EVAL) && (cur_event <= PROFILING_CLOSURE_VOLUME_SAMPLE))) {
+ shader_samples[cur_shader]++;
+ }
+ }
+
+ if(cur_object >= 0 && cur_object < object_samples.size()) {
+ object_samples[cur_object]++;
+ }
+ }
+ lock.unlock();
+
+ /* Relative waits always overshoot a bit, so just waiting 1ms every
+ * time would cause the sampling to drift over time.
+ * By keeping track of the absolute time, the wait times correct themselves -
+ * if one wait overshoots a lot, the next one will be shorter to compensate. */
+ updates++;
+ std::this_thread::sleep_until(start_time + updates*std::chrono::milliseconds(1));
+ }
+}
+
+void Profiler::reset(int num_shaders, int num_objects)
+{
+ bool running = (worker != NULL);
+ if(running) {
+ stop();
+ }
+
+ /* Resize and clear the accumulation vectors. */
+ shader_hits.assign(num_shaders, 0);
+ object_hits.assign(num_objects, 0);
+
+ event_samples.assign(PROFILING_NUM_EVENTS, 0);
+ shader_samples.assign(num_shaders, 0);
+ object_samples.assign(num_objects, 0);
+
+ if(running) {
+ start();
+ }
+}
+
+void Profiler::start()
+{
+ assert(worker == NULL);
+ do_stop_worker = false;
+ worker = new thread(function_bind(&Profiler::run, this));
+}
+
+void Profiler::stop()
+{
+ if(worker != NULL) {
+ do_stop_worker = true;
+
+ worker->join();
+ delete worker;
+ worker = NULL;
+ }
+}
+
+void Profiler::add_state(ProfilingState *state)
+{
+ thread_scoped_lock lock(mutex);
+
+ /* Add the ProfilingState from the list of sampled states. */
+ assert(std::find(states.begin(), states.end(), state) == states.end());
+ states.push_back(state);
+
+ /* Resize thread-local hit counters. */
+ state->shader_hits.assign(shader_hits.size(), 0);
+ state->object_hits.assign(object_hits.size(), 0);
+
+ /* Initialize the state. */
+ state->event = PROFILING_UNKNOWN;
+ state->shader = -1;
+ state->object = -1;
+ state->active = true;
+}
+
+void Profiler::remove_state(ProfilingState *state)
+{
+ thread_scoped_lock lock(mutex);
+
+ /* Remove the ProfilingState from the list of sampled states. */
+ states.erase(std::remove(states.begin(), states.end(), state), states.end());
+ state->active = false;
+
+ /* Merge thread-local hit counters. */
+ assert(shader_hits.size() == state->shader_hits.size());
+ for(int i = 0; i < shader_hits.size(); i++) {
+ shader_hits[i] += state->shader_hits[i];
+ }
+
+ assert(object_hits.size() == state->object_hits.size());
+ for(int i = 0; i < object_hits.size(); i++) {
+ object_hits[i] += state->object_hits[i];
+ }
+}
+
+uint64_t Profiler::get_event(ProfilingEvent event)
+{
+ assert(worker == NULL);
+ return event_samples[event];
+}
+
+bool Profiler::get_shader(int shader, uint64_t &samples, uint64_t &hits)
+{
+ assert(worker == NULL);
+ if(shader_samples[shader] == 0) {
+ return false;
+ }
+ samples = shader_samples[shader];
+ hits = shader_hits[shader];
+ return true;
+}
+
+bool Profiler::get_object(int object, uint64_t &samples, uint64_t &hits)
+{
+ assert(worker == NULL);
+ if(object_samples[object] == 0) {
+ return false;
+ }
+ samples = object_samples[object];
+ hits = object_hits[object];
+ return true;
+}
+
+CCL_NAMESPACE_END
diff --git a/intern/cycles/util/util_profiling.h b/intern/cycles/util/util_profiling.h
new file mode 100644
index 00000000000..4460402b12d
--- /dev/null
+++ b/intern/cycles/util/util_profiling.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2011-2018 Blender Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __UTIL_PROFILING_H__
+#define __UTIL_PROFILING_H__
+
+#include <atomic>
+
+#include "util/util_foreach.h"
+#include "util/util_map.h"
+#include "util/util_thread.h"
+#include "util/util_vector.h"
+
+CCL_NAMESPACE_BEGIN
+
+enum ccl_try_align(16) ProfilingEvent : uint32_t {
+ PROFILING_UNKNOWN,
+ PROFILING_RAY_SETUP,
+ PROFILING_PATH_INTEGRATE,
+ PROFILING_SCENE_INTERSECT,
+ PROFILING_INDIRECT_EMISSION,
+ PROFILING_VOLUME,
+ PROFILING_SHADER_SETUP,
+ PROFILING_SHADER_EVAL,
+ PROFILING_SHADER_APPLY,
+ PROFILING_AO,
+ PROFILING_SUBSURFACE,
+ PROFILING_CONNECT_LIGHT,
+ PROFILING_SURFACE_BOUNCE,
+ PROFILING_WRITE_RESULT,
+
+ PROFILING_INTERSECT,
+ PROFILING_INTERSECT_LOCAL,
+ PROFILING_INTERSECT_SHADOW_ALL,
+ PROFILING_INTERSECT_VOLUME,
+ PROFILING_INTERSECT_VOLUME_ALL,
+
+ PROFILING_CLOSURE_EVAL,
+ PROFILING_CLOSURE_SAMPLE,
+ PROFILING_CLOSURE_VOLUME_EVAL,
+ PROFILING_CLOSURE_VOLUME_SAMPLE,
+
+ PROFILING_DENOISING,
+ PROFILING_DENOISING_CONSTRUCT_TRANSFORM,
+ PROFILING_DENOISING_RECONSTRUCT,
+ PROFILING_DENOISING_DIVIDE_SHADOW,
+ PROFILING_DENOISING_NON_LOCAL_MEANS,
+ PROFILING_DENOISING_COMBINE_HALVES,
+ PROFILING_DENOISING_GET_FEATURE,
+ PROFILING_DENOISING_DETECT_OUTLIERS,
+
+ PROFILING_NUM_EVENTS,
+};
+
+/* Contains the current execution state of a worker thread.
+ * These values are constantly updated by the worker.
+ * Periodically the profiler thread will wake up, read them
+ * and update its internal counters based on it.
+ *
+ * Atomics aren't needed here since we're only doing direct
+ * writes and reads to (4-byte-aligned) uint32_t, which is
+ * guaranteed to be atomic on x86 since the 486.
+ * Memory ordering is not guaranteed but does not matter.
+ *
+ * And even on other architectures, the extremely rare corner
+ * case of reading an intermediate state could at worst result
+ * in a single incorrect sample. */
+struct ProfilingState {
+ volatile uint32_t event = PROFILING_UNKNOWN;
+ volatile int32_t shader = -1;
+ volatile int32_t object = -1;
+ volatile bool active = false;
+
+ vector<uint64_t> shader_hits;
+ vector<uint64_t> object_hits;
+};
+
+class Profiler {
+public:
+ Profiler();
+ ~Profiler();
+
+ void reset(int num_shaders, int num_objects);
+
+ void start();
+ void stop();
+
+ void add_state(ProfilingState *state);
+ void remove_state(ProfilingState *state);
+
+ uint64_t get_event(ProfilingEvent event);
+ bool get_shader(int shader, uint64_t &samples, uint64_t &hits);
+ bool get_object(int object, uint64_t &samples, uint64_t &hits);
+
+protected:
+ void run();
+
+ /* Tracks how often the worker was in each ProfilingEvent while sampling,
+ * so multiplying the values by the sample frequency (currently 1ms)
+ * gives the approximate time spent in each state. */
+ vector<uint64_t> event_samples;
+ vector<uint64_t> shader_samples;
+ vector<uint64_t> object_samples;
+
+ /* Tracks the total amounts every object/shader was hit.
+ * Used to evaluate relative cost, written by the render thread.
+ * Indexed by the shader and object IDs that the kernel also uses
+ * to index __object_flag and __shaders. */
+ vector<uint64_t> shader_hits;
+ vector<uint64_t> object_hits;
+
+ volatile bool do_stop_worker;
+ thread *worker;
+
+ thread_mutex mutex;
+ vector<ProfilingState*> states;
+};
+
+class ProfilingHelper {
+public:
+ ProfilingHelper(ProfilingState *state, ProfilingEvent event)
+ : state(state)
+ {
+ previous_event = state->event;
+ state->event = event;
+ }
+
+ inline void set_event(ProfilingEvent event)
+ {
+ state->event = event;
+ }
+
+ inline void set_shader(int shader)
+ {
+ state->shader = shader;
+ if(state->active) {
+ assert(shader < state->shader_hits.size());
+ state->shader_hits[shader]++;
+ }
+ }
+
+ inline void set_object(int object)
+ {
+ state->object = object;
+ if(state->active) {
+ assert(object < state->object_hits.size());
+ state->object_hits[object]++;
+ }
+ }
+
+ ~ProfilingHelper()
+ {
+ state->event = previous_event;
+ }
+private:
+ ProfilingState *state;
+ uint32_t previous_event;
+};
+
+CCL_NAMESPACE_END
+
+#endif /* __UTIL_PROFILING_H__ */
diff --git a/intern/cycles/util/util_stats.h b/intern/cycles/util/util_stats.h
index 0ba58422a67..1128bc3ced1 100644
--- a/intern/cycles/util/util_stats.h
+++ b/intern/cycles/util/util_stats.h
@@ -18,6 +18,7 @@
#define __UTIL_STATS_H__
#include "util/util_atomic.h"
+#include "util/util_profiling.h"
CCL_NAMESPACE_BEGIN
@@ -40,6 +41,8 @@ public:
size_t mem_used;
size_t mem_peak;
+
+ Profiler profiler;
};
CCL_NAMESPACE_END