Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht@blender.org>2021-10-24 15:19:19 +0300
committerBrecht Van Lommel <brecht@blender.org>2021-10-26 16:37:04 +0300
commitfd25e883e2807a151f673b87c152a59701a0df80 (patch)
tree9441933f32ba2672ca71c58842342a9c525e123e /intern/cycles/util/profiling.h
parentd7d40745fa09061a3117bd3669c5a46bbf611eae (diff)
Cycles: remove prefix from source code file names
Remove prefix of filenames that is the same as the folder name. This used to help when #includes were using individual files, but now they are always relative to the cycles root directory and so the prefixes are redundant. For patches and branches, git merge and rebase should be able to detect the renames and move over code to the right file.
Diffstat (limited to 'intern/cycles/util/profiling.h')
-rw-r--r--intern/cycles/util/profiling.h180
1 files changed, 180 insertions, 0 deletions
diff --git a/intern/cycles/util/profiling.h b/intern/cycles/util/profiling.h
new file mode 100644
index 00000000000..b30aac90879
--- /dev/null
+++ b/intern/cycles/util/profiling.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2011-2018 Blender Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __UTIL_PROFILING_H__
+#define __UTIL_PROFILING_H__
+
+#include <atomic>
+
+#include "util/map.h"
+#include "util/thread.h"
+#include "util/vector.h"
+
+CCL_NAMESPACE_BEGIN
+
+enum ProfilingEvent : uint32_t {
+ PROFILING_UNKNOWN,
+ PROFILING_RAY_SETUP,
+
+ PROFILING_INTERSECT_CLOSEST,
+ PROFILING_INTERSECT_SUBSURFACE,
+ PROFILING_INTERSECT_SHADOW,
+ PROFILING_INTERSECT_VOLUME_STACK,
+
+ PROFILING_SHADE_SURFACE_SETUP,
+ PROFILING_SHADE_SURFACE_EVAL,
+ PROFILING_SHADE_SURFACE_DIRECT_LIGHT,
+ PROFILING_SHADE_SURFACE_INDIRECT_LIGHT,
+ PROFILING_SHADE_SURFACE_AO,
+ PROFILING_SHADE_SURFACE_PASSES,
+
+ PROFILING_SHADE_VOLUME_SETUP,
+ PROFILING_SHADE_VOLUME_INTEGRATE,
+ PROFILING_SHADE_VOLUME_DIRECT_LIGHT,
+ PROFILING_SHADE_VOLUME_INDIRECT_LIGHT,
+
+ PROFILING_SHADE_SHADOW_SETUP,
+ PROFILING_SHADE_SHADOW_SURFACE,
+ PROFILING_SHADE_SHADOW_VOLUME,
+
+ PROFILING_SHADE_LIGHT_SETUP,
+ PROFILING_SHADE_LIGHT_EVAL,
+
+ PROFILING_NUM_EVENTS,
+};
+
+/* Contains the current execution state of a worker thread.
+ * These values are constantly updated by the worker.
+ * Periodically the profiler thread will wake up, read them
+ * and update its internal counters based on it.
+ *
+ * Atomics aren't needed here since we're only doing direct
+ * writes and reads to (4-byte-aligned) uint32_t, which is
+ * guaranteed to be atomic on x86 since the 486.
+ * Memory ordering is not guaranteed but does not matter.
+ *
+ * And even on other architectures, the extremely rare corner
+ * case of reading an intermediate state could at worst result
+ * in a single incorrect sample. */
+struct ProfilingState {
+ volatile uint32_t event = PROFILING_UNKNOWN;
+ volatile int32_t shader = -1;
+ volatile int32_t object = -1;
+ volatile bool active = false;
+
+ vector<uint64_t> shader_hits;
+ vector<uint64_t> object_hits;
+};
+
+class Profiler {
+ public:
+ Profiler();
+ ~Profiler();
+
+ void reset(int num_shaders, int num_objects);
+
+ void start();
+ void stop();
+
+ void add_state(ProfilingState *state);
+ void remove_state(ProfilingState *state);
+
+ uint64_t get_event(ProfilingEvent event);
+ bool get_shader(int shader, uint64_t &samples, uint64_t &hits);
+ bool get_object(int object, uint64_t &samples, uint64_t &hits);
+
+ protected:
+ void run();
+
+ /* Tracks how often the worker was in each ProfilingEvent while sampling,
+ * so multiplying the values by the sample frequency (currently 1ms)
+ * gives the approximate time spent in each state. */
+ vector<uint64_t> event_samples;
+ vector<uint64_t> shader_samples;
+ vector<uint64_t> object_samples;
+
+ /* Tracks the total amounts every object/shader was hit.
+ * Used to evaluate relative cost, written by the render thread.
+ * Indexed by the shader and object IDs that the kernel also uses
+ * to index __object_flag and __shaders. */
+ vector<uint64_t> shader_hits;
+ vector<uint64_t> object_hits;
+
+ volatile bool do_stop_worker;
+ thread *worker;
+
+ thread_mutex mutex;
+ vector<ProfilingState *> states;
+};
+
+class ProfilingHelper {
+ public:
+ ProfilingHelper(ProfilingState *state, ProfilingEvent event) : state(state)
+ {
+ previous_event = state->event;
+ state->event = event;
+ }
+
+ ~ProfilingHelper()
+ {
+ state->event = previous_event;
+ }
+
+ inline void set_event(ProfilingEvent event)
+ {
+ state->event = event;
+ }
+
+ protected:
+ ProfilingState *state;
+ uint32_t previous_event;
+};
+
+class ProfilingWithShaderHelper : public ProfilingHelper {
+ public:
+ ProfilingWithShaderHelper(ProfilingState *state, ProfilingEvent event)
+ : ProfilingHelper(state, event)
+ {
+ }
+
+ ~ProfilingWithShaderHelper()
+ {
+ state->object = -1;
+ state->shader = -1;
+ }
+
+ inline void set_shader(int object, int shader)
+ {
+ if (state->active) {
+ state->shader = shader;
+ state->object = object;
+
+ if (shader >= 0) {
+ assert(shader < state->shader_hits.size());
+ state->shader_hits[shader]++;
+ }
+
+ if (object >= 0) {
+ assert(object < state->object_hits.size());
+ state->object_hits[object]++;
+ }
+ }
+ }
+};
+
+CCL_NAMESPACE_END
+
+#endif /* __UTIL_PROFILING_H__ */