Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brechtvanlommel@gmail.com>2017-09-27 00:42:36 +0300
committerBrecht Van Lommel <brechtvanlommel@gmail.com>2017-10-04 22:11:14 +0300
commit5b7d6ea54b2fc35b8b12c667f5bf9a1c9c46d5c2 (patch)
tree99a9ca07d5366b164dfdf267ad1ed3691d2d7d57 /intern/cycles/kernel/kernels/cuda
parent660e8e59e7b4265324a8fba7ae716f84a73c6c64 (diff)
Code refactor: add WorkTile struct for passing work to kernel.
This makes sharing some code between mega/split in following commits a bit easier, and also paves the way for rendering multiple tiles later.
Diffstat (limited to 'intern/cycles/kernel/kernels/cuda')
-rw-r--r--intern/cycles/kernel/kernels/cuda/kernel.cu26
1 files changed, 16 insertions, 10 deletions
diff --git a/intern/cycles/kernel/kernels/cuda/kernel.cu b/intern/cycles/kernel/kernels/cuda/kernel.cu
index dc343cb387a..4d100634421 100644
--- a/intern/cycles/kernel/kernels/cuda/kernel.cu
+++ b/intern/cycles/kernel/kernels/cuda/kernel.cu
@@ -20,6 +20,7 @@
#include "kernel/kernel_compat_cuda.h"
#include "kernel_config.h"
+
#include "kernel/kernel_math.h"
#include "kernel/kernel_types.h"
#include "kernel/kernel_globals.h"
@@ -27,32 +28,37 @@
#include "kernel/kernel_path.h"
#include "kernel/kernel_path_branched.h"
#include "kernel/kernel_bake.h"
+#include "kernel/kernel_work_stealing.h"
/* kernels */
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
-kernel_cuda_path_trace(float *buffer, uint *rng_state, int sample, int sx, int sy, int sw, int sh, int offset, int stride)
+kernel_cuda_path_trace(WorkTile *tile, uint total_work_size)
{
- int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
- int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
+ int work_index = ccl_global_id(0);
+
+ if(work_index < total_work_size) {
+ uint x, y, sample;
+ get_work_pixel(tile, work_index, &x, &y, &sample);
- if(x < sx + sw && y < sy + sh) {
KernelGlobals kg;
- kernel_path_trace(&kg, buffer, rng_state, sample, x, y, offset, stride);
+ kernel_path_trace(&kg, tile->buffer, tile->rng_state, sample, x, y, tile->offset, tile->stride);
}
}
#ifdef __BRANCHED_PATH__
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_BRANCHED_MAX_REGISTERS)
-kernel_cuda_branched_path_trace(float *buffer, uint *rng_state, int sample, int sx, int sy, int sw, int sh, int offset, int stride)
+kernel_cuda_branched_path_trace(WorkTile *tile, uint total_work_size)
{
- int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
- int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
+ int work_index = ccl_global_id(0);
+
+ if(work_index < total_work_size) {
+ uint x, y, sample;
+ get_work_pixel(tile, work_index, &x, &y, &sample);
- if(x < sx + sw && y < sy + sh) {
KernelGlobals kg;
- kernel_branched_path_trace(&kg, buffer, rng_state, sample, x, y, offset, stride);
+ kernel_branched_path_trace(&kg, tile->buffer, tile->rng_state, sample, x, y, tile->offset, tile->stride);
}
}
#endif