Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht@blender.org>2021-09-20 18:59:20 +0300
committerBrecht Van Lommel <brecht@blender.org>2021-09-21 15:55:54 +0300
commit08031197250aeecbaca3803254e6f25b8c7b7b37 (patch)
tree6fe7ab045f0dc0a423d6557c4073f34309ef4740 /intern/cycles/kernel/integrator/integrator_state_flow.h
parentfa6b1007bad065440950cd67deb16a04f368856f (diff)
Cycles: merge of cycles-x branch, a major update to the renderer
This includes much improved GPU rendering performance, viewport interactivity, new shadow catcher, revamped sampling settings, subsurface scattering anisotropy, new GPU volume sampling, improved PMJ sampling pattern, and more. Some features have also been removed or changed, breaking backwards compatibility. Including the removal of the OpenCL backend, for which alternatives are under development. Release notes and code docs: https://wiki.blender.org/wiki/Reference/Release_Notes/3.0/Cycles https://wiki.blender.org/wiki/Source/Render/Cycles Credits: * Sergey Sharybin * Brecht Van Lommel * Patrick Mours (OptiX backend) * Christophe Hery (subsurface scattering anisotropy) * William Leeson (PMJ sampling pattern) * Alaska (various fixes and tweaks) * Thomas Dinges (various fixes) For the full commit history, see the cycles-x branch. This squashes together all the changes since intermediate changes would often fail building or tests. Ref T87839, T87837, T87836 Fixes T90734, T89353, T80267, T80267, T77185, T69800
Diffstat (limited to 'intern/cycles/kernel/integrator/integrator_state_flow.h')
-rw-r--r--intern/cycles/kernel/integrator/integrator_state_flow.h144
1 files changed, 144 insertions, 0 deletions
diff --git a/intern/cycles/kernel/integrator/integrator_state_flow.h b/intern/cycles/kernel/integrator/integrator_state_flow.h
new file mode 100644
index 00000000000..8477efd7b66
--- /dev/null
+++ b/intern/cycles/kernel/integrator/integrator_state_flow.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2011-2021 Blender Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "kernel/kernel_types.h"
+#include "util/util_atomic.h"
+
+CCL_NAMESPACE_BEGIN
+
+/* Control Flow
+ *
+ * Utilities for control flow between kernels. The implementation may differ per device
+ * or even be handled on the host side. To abstract such differences, experiment with
+ * different implementations and for debugging, this is abstracted using macros.
+ *
+ * There is a main path for regular path tracing camera for path tracing. Shadows for next
+ * event estimation branch off from this into their own path, that may be computed in
+ * parallel while the main path continues.
+ *
+ * Each kernel on the main path must call one of these functions. These may not be called
+ * multiple times from the same kernel.
+ *
+ * INTEGRATOR_PATH_INIT(next_kernel)
+ * INTEGRATOR_PATH_NEXT(current_kernel, next_kernel)
+ * INTEGRATOR_PATH_TERMINATE(current_kernel)
+ *
+ * For the shadow path similar functions are used, and again each shadow kernel must call
+ * one of them, and only once.
+ */
+
+#define INTEGRATOR_PATH_IS_TERMINATED (INTEGRATOR_STATE(path, queued_kernel) == 0)
+#define INTEGRATOR_SHADOW_PATH_IS_TERMINATED (INTEGRATOR_STATE(shadow_path, queued_kernel) == 0)
+
+#ifdef __KERNEL_GPU__
+
+# define INTEGRATOR_PATH_INIT(next_kernel) \
+ atomic_fetch_and_add_uint32(&kernel_integrator_state.queue_counter->num_queued[next_kernel], \
+ 1); \
+ INTEGRATOR_STATE_WRITE(path, queued_kernel) = next_kernel;
+# define INTEGRATOR_PATH_NEXT(current_kernel, next_kernel) \
+ atomic_fetch_and_sub_uint32( \
+ &kernel_integrator_state.queue_counter->num_queued[current_kernel], 1); \
+ atomic_fetch_and_add_uint32(&kernel_integrator_state.queue_counter->num_queued[next_kernel], \
+ 1); \
+ INTEGRATOR_STATE_WRITE(path, queued_kernel) = next_kernel;
+# define INTEGRATOR_PATH_TERMINATE(current_kernel) \
+ atomic_fetch_and_sub_uint32( \
+ &kernel_integrator_state.queue_counter->num_queued[current_kernel], 1); \
+ INTEGRATOR_STATE_WRITE(path, queued_kernel) = 0;
+
+# define INTEGRATOR_SHADOW_PATH_INIT(next_kernel) \
+ atomic_fetch_and_add_uint32(&kernel_integrator_state.queue_counter->num_queued[next_kernel], \
+ 1); \
+ INTEGRATOR_STATE_WRITE(shadow_path, queued_kernel) = next_kernel;
+# define INTEGRATOR_SHADOW_PATH_NEXT(current_kernel, next_kernel) \
+ atomic_fetch_and_sub_uint32( \
+ &kernel_integrator_state.queue_counter->num_queued[current_kernel], 1); \
+ atomic_fetch_and_add_uint32(&kernel_integrator_state.queue_counter->num_queued[next_kernel], \
+ 1); \
+ INTEGRATOR_STATE_WRITE(shadow_path, queued_kernel) = next_kernel;
+# define INTEGRATOR_SHADOW_PATH_TERMINATE(current_kernel) \
+ atomic_fetch_and_sub_uint32( \
+ &kernel_integrator_state.queue_counter->num_queued[current_kernel], 1); \
+ INTEGRATOR_STATE_WRITE(shadow_path, queued_kernel) = 0;
+
+# define INTEGRATOR_PATH_INIT_SORTED(next_kernel, key) \
+ { \
+ const int key_ = key; \
+ atomic_fetch_and_add_uint32( \
+ &kernel_integrator_state.queue_counter->num_queued[next_kernel], 1); \
+ INTEGRATOR_STATE_WRITE(path, queued_kernel) = next_kernel; \
+ INTEGRATOR_STATE_WRITE(path, shader_sort_key) = key_; \
+ atomic_fetch_and_add_uint32(&kernel_integrator_state.sort_key_counter[next_kernel][key_], \
+ 1); \
+ }
+# define INTEGRATOR_PATH_NEXT_SORTED(current_kernel, next_kernel, key) \
+ { \
+ const int key_ = key; \
+ atomic_fetch_and_sub_uint32( \
+ &kernel_integrator_state.queue_counter->num_queued[current_kernel], 1); \
+ atomic_fetch_and_add_uint32( \
+ &kernel_integrator_state.queue_counter->num_queued[next_kernel], 1); \
+ INTEGRATOR_STATE_WRITE(path, queued_kernel) = next_kernel; \
+ INTEGRATOR_STATE_WRITE(path, shader_sort_key) = key_; \
+ atomic_fetch_and_add_uint32(&kernel_integrator_state.sort_key_counter[next_kernel][key_], \
+ 1); \
+ }
+
+#else
+
+# define INTEGRATOR_PATH_INIT(next_kernel) \
+ INTEGRATOR_STATE_WRITE(path, queued_kernel) = next_kernel;
+# define INTEGRATOR_PATH_INIT_SORTED(next_kernel, key) \
+ { \
+ INTEGRATOR_STATE_WRITE(path, queued_kernel) = next_kernel; \
+ (void)key; \
+ }
+# define INTEGRATOR_PATH_NEXT(current_kernel, next_kernel) \
+ { \
+ INTEGRATOR_STATE_WRITE(path, queued_kernel) = next_kernel; \
+ (void)current_kernel; \
+ }
+# define INTEGRATOR_PATH_TERMINATE(current_kernel) \
+ { \
+ INTEGRATOR_STATE_WRITE(path, queued_kernel) = 0; \
+ (void)current_kernel; \
+ }
+# define INTEGRATOR_PATH_NEXT_SORTED(current_kernel, next_kernel, key) \
+ { \
+ INTEGRATOR_STATE_WRITE(path, queued_kernel) = next_kernel; \
+ (void)key; \
+ (void)current_kernel; \
+ }
+
+# define INTEGRATOR_SHADOW_PATH_INIT(next_kernel) \
+ INTEGRATOR_STATE_WRITE(shadow_path, queued_kernel) = next_kernel;
+# define INTEGRATOR_SHADOW_PATH_NEXT(current_kernel, next_kernel) \
+ { \
+ INTEGRATOR_STATE_WRITE(shadow_path, queued_kernel) = next_kernel; \
+ (void)current_kernel; \
+ }
+# define INTEGRATOR_SHADOW_PATH_TERMINATE(current_kernel) \
+ { \
+ INTEGRATOR_STATE_WRITE(shadow_path, queued_kernel) = 0; \
+ (void)current_kernel; \
+ }
+
+#endif
+
+CCL_NAMESPACE_END