Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht>2021-10-17 17:10:10 +0300
committerBrecht Van Lommel <brecht@blender.org>2021-10-18 20:02:10 +0300
commit1df3b51988852fa8ee6b530a64aa23346db9acd4 (patch)
treedd79dba4c8ff8bb8474cc399e9d1b308d845e0cb /intern/cycles/kernel/device/gpu/kernel.h
parent44c3bb729be42d6d67eaf8918d7cbcb2ff0b315d (diff)
Cycles: replace integrator state argument macros
* Rename struct KernelGlobals to struct KernelGlobalsCPU * Add KernelGlobals, IntegratorState and ConstIntegratorState typedefs that every device can define in its own way. * Remove INTEGRATOR_STATE_ARGS and INTEGRATOR_STATE_PASS macros and replace with these new typedefs. * Add explicit state argument to INTEGRATOR_STATE and similar macros In preparation for decoupling main and shadow paths. Differential Revision: https://developer.blender.org/D12888
Diffstat (limited to 'intern/cycles/kernel/device/gpu/kernel.h')
-rw-r--r--intern/cycles/kernel/device/gpu/kernel.h24
1 files changed, 12 insertions, 12 deletions
diff --git a/intern/cycles/kernel/device/gpu/kernel.h b/intern/cycles/kernel/device/gpu/kernel.h
index 21901215757..56beaf1fd91 100644
--- a/intern/cycles/kernel/device/gpu/kernel.h
+++ b/intern/cycles/kernel/device/gpu/kernel.h
@@ -51,8 +51,8 @@ ccl_gpu_kernel(GPU_KERNEL_BLOCK_NUM_THREADS, GPU_KERNEL_MAX_REGISTERS)
const int state = ccl_gpu_global_id_x();
if (state < num_states) {
- INTEGRATOR_STATE_WRITE(path, queued_kernel) = 0;
- INTEGRATOR_STATE_WRITE(shadow_path, queued_kernel) = 0;
+ INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = 0;
+ INTEGRATOR_STATE_WRITE(state, shadow_path, queued_kernel) = 0;
}
}
@@ -244,7 +244,7 @@ extern "C" __global__ void __launch_bounds__(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_B
{
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
num_states, indices, num_indices, [kernel](const int state) {
- return (INTEGRATOR_STATE(path, queued_kernel) == kernel);
+ return (INTEGRATOR_STATE(state, path, queued_kernel) == kernel);
});
}
@@ -256,7 +256,7 @@ extern "C" __global__ void __launch_bounds__(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_B
{
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
num_states, indices, num_indices, [kernel](const int state) {
- return (INTEGRATOR_STATE(shadow_path, queued_kernel) == kernel);
+ return (INTEGRATOR_STATE(state, shadow_path, queued_kernel) == kernel);
});
}
@@ -265,8 +265,8 @@ extern "C" __global__ void __launch_bounds__(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_B
{
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
num_states, indices, num_indices, [](const int state) {
- return (INTEGRATOR_STATE(path, queued_kernel) != 0) ||
- (INTEGRATOR_STATE(shadow_path, queued_kernel) != 0);
+ return (INTEGRATOR_STATE(state, path, queued_kernel) != 0) ||
+ (INTEGRATOR_STATE(state, shadow_path, queued_kernel) != 0);
});
}
@@ -278,8 +278,8 @@ extern "C" __global__ void __launch_bounds__(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_B
{
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
num_states, indices + indices_offset, num_indices, [](const int state) {
- return (INTEGRATOR_STATE(path, queued_kernel) == 0) &&
- (INTEGRATOR_STATE(shadow_path, queued_kernel) == 0);
+ return (INTEGRATOR_STATE(state, path, queued_kernel) == 0) &&
+ (INTEGRATOR_STATE(state, shadow_path, queued_kernel) == 0);
});
}
@@ -289,8 +289,8 @@ extern "C" __global__ void __launch_bounds__(GPU_PARALLEL_SORTED_INDEX_DEFAULT_B
{
gpu_parallel_sorted_index_array<GPU_PARALLEL_SORTED_INDEX_DEFAULT_BLOCK_SIZE>(
num_states, indices, num_indices, key_prefix_sum, [kernel](const int state) {
- return (INTEGRATOR_STATE(path, queued_kernel) == kernel) ?
- INTEGRATOR_STATE(path, shader_sort_key) :
+ return (INTEGRATOR_STATE(state, path, queued_kernel) == kernel) ?
+ INTEGRATOR_STATE(state, path, shader_sort_key) :
GPU_PARALLEL_SORTED_INDEX_INACTIVE_KEY;
});
}
@@ -304,8 +304,8 @@ extern "C" __global__ void __launch_bounds__(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_B
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
num_states, indices, num_indices, [num_active_paths](const int state) {
return (state >= num_active_paths) &&
- ((INTEGRATOR_STATE(path, queued_kernel) != 0) ||
- (INTEGRATOR_STATE(shadow_path, queued_kernel) != 0));
+ ((INTEGRATOR_STATE(state, path, queued_kernel) != 0) ||
+ (INTEGRATOR_STATE(state, shadow_path, queued_kernel) != 0));
});
}