Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--intern/cycles/device/CMakeLists.txt3
-rw-r--r--intern/cycles/device/device_split_kernel.cpp283
-rw-r--r--intern/cycles/device/device_split_kernel.h126
-rw-r--r--intern/cycles/device/opencl/opencl.h58
-rw-r--r--intern/cycles/device/opencl/opencl_base.cpp4
-rw-r--r--intern/cycles/device/opencl/opencl_mega.cpp3
-rw-r--r--intern/cycles/device/opencl/opencl_split.cpp1402
-rw-r--r--intern/cycles/kernel/CMakeLists.txt2
-rw-r--r--intern/cycles/kernel/kernel_compat_opencl.h10
-rw-r--r--intern/cycles/kernel/kernel_globals.h2
-rw-r--r--intern/cycles/kernel/kernel_passes.h12
-rw-r--r--intern/cycles/kernel/kernel_queues.h33
-rw-r--r--intern/cycles/kernel/kernel_shadow.h6
-rw-r--r--intern/cycles/kernel/kernel_types.h6
-rw-r--r--intern/cycles/kernel/kernel_work_stealing.h211
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel.cl18
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel_background_buffer_update.cl109
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel_data_init.cl64
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel_direct_lighting.cl71
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel_holdout_emission_blurring_pathtermination_ao.cl107
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel_lamp_emission.cl64
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel_next_iteration_setup.cl98
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel_queue_enqueue.cl92
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel_scene_intersect.cl64
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel_shader_eval.cl52
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel_shadow_blocked.cl49
-rw-r--r--intern/cycles/kernel/kernels/opencl/kernel_sum_all_radiance.cl22
-rw-r--r--intern/cycles/kernel/split/kernel_background_buffer_update.h169
-rw-r--r--intern/cycles/kernel/split/kernel_data_init.h255
-rw-r--r--intern/cycles/kernel/split/kernel_direct_lighting.h78
-rw-r--r--intern/cycles/kernel/split/kernel_holdout_emission_blurring_pathtermination_ao.h149
-rw-r--r--intern/cycles/kernel/split/kernel_lamp_emission.h58
-rw-r--r--intern/cycles/kernel/split/kernel_next_iteration_setup.h118
-rw-r--r--intern/cycles/kernel/split/kernel_queue_enqueue.h102
-rw-r--r--intern/cycles/kernel/split/kernel_scene_intersect.h58
-rw-r--r--intern/cycles/kernel/split/kernel_shader_eval.h58
-rw-r--r--intern/cycles/kernel/split/kernel_shadow_blocked.h56
-rw-r--r--intern/cycles/kernel/split/kernel_split_common.h4
-rw-r--r--intern/cycles/kernel/split/kernel_split_data.h153
-rw-r--r--intern/cycles/kernel/split/kernel_sum_all_radiance.h56
-rw-r--r--intern/cycles/util/util_types.h20
41 files changed, 1734 insertions, 2571 deletions
diff --git a/intern/cycles/device/CMakeLists.txt b/intern/cycles/device/CMakeLists.txt
index 966ff5e52ba..a2373451696 100644
--- a/intern/cycles/device/CMakeLists.txt
+++ b/intern/cycles/device/CMakeLists.txt
@@ -3,6 +3,7 @@ set(INC
.
../graph
../kernel
+ ../kernel/split
../kernel/svm
../kernel/osl
../util
@@ -33,6 +34,7 @@ set(SRC
device_cuda.cpp
device_multi.cpp
device_opencl.cpp
+ device_split_kernel.cpp
device_task.cpp
)
@@ -56,6 +58,7 @@ set(SRC_HEADERS
device_memory.h
device_intern.h
device_network.h
+ device_split_kernel.h
device_task.h
)
diff --git a/intern/cycles/device/device_split_kernel.cpp b/intern/cycles/device/device_split_kernel.cpp
new file mode 100644
index 00000000000..cf43e499d0f
--- /dev/null
+++ b/intern/cycles/device/device_split_kernel.cpp
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2011-2016 Blender Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "device_split_kernel.h"
+
+#include "kernel_types.h"
+#include "kernel_split_data.h"
+
+#include "util_time.h"
+
+CCL_NAMESPACE_BEGIN
+
+static const double alpha = 0.1; /* alpha for rolling average */
+
+DeviceSplitKernel::DeviceSplitKernel(Device *device) : device(device)
+{
+ current_max_closure = -1;
+ first_tile = true;
+
+ avg_time_per_sample = 0.0;
+}
+
+DeviceSplitKernel::~DeviceSplitKernel()
+{
+ device->mem_free(split_data);
+ device->mem_free(ray_state);
+ device->mem_free(use_queues_flag);
+ device->mem_free(queue_index);
+ device->mem_free(work_pool_wgs);
+
+ delete kernel_scene_intersect;
+ delete kernel_lamp_emission;
+ delete kernel_queue_enqueue;
+ delete kernel_background_buffer_update;
+ delete kernel_shader_eval;
+ delete kernel_holdout_emission_blurring_pathtermination_ao;
+ delete kernel_direct_lighting;
+ delete kernel_shadow_blocked;
+ delete kernel_next_iteration_setup;
+ delete kernel_sum_all_radiance;
+}
+
+bool DeviceSplitKernel::load_kernels(const DeviceRequestedFeatures& requested_features)
+{
+#define LOAD_KERNEL(name) \
+ kernel_##name = get_split_kernel_function(#name, requested_features); \
+ if(!kernel_##name) { \
+ return false; \
+ }
+
+ LOAD_KERNEL(scene_intersect);
+ LOAD_KERNEL(lamp_emission);
+ LOAD_KERNEL(queue_enqueue);
+ LOAD_KERNEL(background_buffer_update);
+ LOAD_KERNEL(shader_eval);
+ LOAD_KERNEL(holdout_emission_blurring_pathtermination_ao);
+ LOAD_KERNEL(direct_lighting);
+ LOAD_KERNEL(shadow_blocked);
+ LOAD_KERNEL(next_iteration_setup);
+ LOAD_KERNEL(sum_all_radiance);
+
+#undef LOAD_KERNEL
+
+ current_max_closure = requested_features.max_closure;
+
+ return true;
+}
+
+size_t DeviceSplitKernel::max_elements_for_max_buffer_size(size_t max_buffer_size, size_t passes_size)
+{
+ size_t size_per_element = split_data_buffer_size(1024, current_max_closure, passes_size) / 1024;
+ return max_buffer_size / size_per_element;
+}
+
+bool DeviceSplitKernel::path_trace(DeviceTask *task,
+ RenderTile& tile,
+ device_memory& kgbuffer,
+ device_memory& kernel_data)
+{
+ if(device->have_error()) {
+ return false;
+ }
+
+ /* Get local size */
+ size_t local_size[2];
+ {
+ int2 lsize = split_kernel_local_size();
+ local_size[0] = lsize[0];
+ local_size[1] = lsize[1];
+ }
+
+ /* Calculate per_thread_output_buffer_size. */
+ size_t per_thread_output_buffer_size = task->passes_size;
+
+ /* Set gloabl size */
+ size_t global_size[2];
+ {
+ int2 gsize = split_kernel_global_size(task);
+
+ /* Make sure that set work size is a multiple of local
+ * work size dimensions.
+ */
+ global_size[0] = round_up(gsize[0], local_size[0]);
+ global_size[1] = round_up(gsize[1], local_size[1]);
+ }
+
+ /* Number of elements in the global state buffer */
+ int num_global_elements = global_size[0] * global_size[1];
+
+ /* Allocate all required global memory once. */
+ if(first_tile) {
+ first_tile = false;
+
+ /* Calculate max groups */
+
+ /* Denotes the maximum work groups possible w.r.t. current requested tile size. */
+ unsigned int max_work_groups = num_global_elements / WORK_POOL_SIZE + 1;
+
+ /* Allocate work_pool_wgs memory. */
+ work_pool_wgs.resize(max_work_groups * sizeof(unsigned int));
+ device->mem_alloc(work_pool_wgs, MEM_READ_WRITE);
+
+ queue_index.resize(NUM_QUEUES * sizeof(int));
+ device->mem_alloc(queue_index, MEM_READ_WRITE);
+
+ use_queues_flag.resize(sizeof(char));
+ device->mem_alloc(use_queues_flag, MEM_READ_WRITE);
+
+ ray_state.resize(num_global_elements);
+ device->mem_alloc(ray_state, MEM_READ_WRITE);
+
+ split_data.resize(split_data_buffer_size(num_global_elements,
+ current_max_closure,
+ per_thread_output_buffer_size));
+ device->mem_alloc(split_data, MEM_READ_WRITE);
+ }
+
+#define ENQUEUE_SPLIT_KERNEL(name, global_size, local_size) \
+ if(device->have_error()) { \
+ return false; \
+ } \
+ if(!kernel_##name->enqueue(KernelDimensions(global_size, local_size), kgbuffer, kernel_data)) { \
+ return false; \
+ }
+
+ tile.sample = tile.start_sample;
+
+ /* for exponential increase between tile updates */
+ int time_multiplier = 1;
+
+ while(tile.sample < tile.start_sample + tile.num_samples) {
+ /* to keep track of how long it takes to run a number of samples */
+ double start_time = time_dt();
+
+ /* initial guess to start rolling average */
+ const int initial_num_samples = 1;
+ /* approx number of samples per second */
+ int samples_per_second = (avg_time_per_sample > 0.0) ?
+ int(double(time_multiplier) / avg_time_per_sample) + 1 : initial_num_samples;
+
+ RenderTile subtile = tile;
+ subtile.start_sample = tile.sample;
+ subtile.num_samples = min(samples_per_second, tile.start_sample + tile.num_samples - tile.sample);
+
+ if(device->have_error()) {
+ return false;
+ }
+
+ /* reset state memory here as global size for data_init
+ * kernel might not be large enough to do in kernel
+ */
+ device->mem_zero(work_pool_wgs);
+ device->mem_zero(split_data);
+
+ if(!enqueue_split_kernel_data_init(KernelDimensions(global_size, local_size),
+ subtile,
+ num_global_elements,
+ kgbuffer,
+ kernel_data,
+ split_data,
+ ray_state,
+ queue_index,
+ use_queues_flag,
+ work_pool_wgs
+ ))
+ {
+ return false;
+ }
+
+ bool activeRaysAvailable = true;
+
+ while(activeRaysAvailable) {
+ /* Twice the global work size of other kernels for
+ * ckPathTraceKernel_shadow_blocked_direct_lighting. */
+ size_t global_size_shadow_blocked[2];
+ global_size_shadow_blocked[0] = global_size[0] * 2;
+ global_size_shadow_blocked[1] = global_size[1];
+
+ /* Do path-iteration in host [Enqueue Path-iteration kernels. */
+ for(int PathIter = 0; PathIter < 16; PathIter++) {
+ ENQUEUE_SPLIT_KERNEL(scene_intersect, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(lamp_emission, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(queue_enqueue, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(background_buffer_update, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(shader_eval, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(holdout_emission_blurring_pathtermination_ao, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(direct_lighting, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(shadow_blocked, global_size_shadow_blocked, local_size);
+ ENQUEUE_SPLIT_KERNEL(next_iteration_setup, global_size, local_size);
+
+ if(task->get_cancel()) {
+ return true;
+ }
+ }
+
+ /* Decide if we should exit path-iteration in host. */
+ device->mem_copy_from(ray_state, 0, global_size[0] * global_size[1] * sizeof(char), 1, 1);
+
+ activeRaysAvailable = false;
+
+ for(int rayStateIter = 0; rayStateIter < global_size[0] * global_size[1]; ++rayStateIter) {
+ if(int8_t(ray_state.get_data()[rayStateIter]) != RAY_INACTIVE) {
+ /* Not all rays are RAY_INACTIVE. */
+ activeRaysAvailable = true;
+ break;
+ }
+ }
+
+ if(task->get_cancel()) {
+ return true;
+ }
+ }
+
+ double time_per_sample = ((time_dt()-start_time) / subtile.num_samples);
+
+ if(avg_time_per_sample == 0.0) {
+ /* start rolling average */
+ avg_time_per_sample = time_per_sample;
+ }
+ else {
+ avg_time_per_sample = alpha*time_per_sample + (1.0-alpha)*avg_time_per_sample;
+ }
+
+ size_t sum_all_radiance_local_size[2] = {16, 16};
+ size_t sum_all_radiance_global_size[2];
+ sum_all_radiance_global_size[0] = round_up(tile.w, sum_all_radiance_local_size[0]);
+ sum_all_radiance_global_size[1] = round_up(tile.h, sum_all_radiance_local_size[1]);
+
+ ENQUEUE_SPLIT_KERNEL(sum_all_radiance,
+ sum_all_radiance_global_size,
+ sum_all_radiance_local_size);
+
+#undef ENQUEUE_SPLIT_KERNEL
+
+ tile.sample += subtile.num_samples;
+ task->update_progress(&tile, tile.w*tile.h*subtile.num_samples);
+
+ time_multiplier = min(time_multiplier << 1, 10);
+
+ if(task->get_cancel()) {
+ return true;
+ }
+ }
+
+ return true;
+}
+
+CCL_NAMESPACE_END
+
+
diff --git a/intern/cycles/device/device_split_kernel.h b/intern/cycles/device/device_split_kernel.h
new file mode 100644
index 00000000000..b3106fd5632
--- /dev/null
+++ b/intern/cycles/device/device_split_kernel.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2011-2016 Blender Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __DEVICE_SPLIT_KERNEL_H__
+#define __DEVICE_SPLIT_KERNEL_H__
+
+#include "device.h"
+#include "buffers.h"
+
+CCL_NAMESPACE_BEGIN
+
+/* When allocate global memory in chunks. We may not be able to
+ * allocate exactly "CL_DEVICE_MAX_MEM_ALLOC_SIZE" bytes in chunks;
+ * Since some bytes may be needed for aligning chunks of memory;
+ * This is the amount of memory that we dedicate for that purpose.
+ */
+#define DATA_ALLOCATION_MEM_FACTOR 5000000 //5MB
+
+/* Types used for split kernel */
+
+class KernelDimensions {
+public:
+ size_t global_size[2];
+ size_t local_size[2];
+
+ KernelDimensions(size_t global_size_[2], size_t local_size_[2])
+ {
+ memcpy(global_size, global_size_, sizeof(global_size));
+ memcpy(local_size, local_size_, sizeof(local_size));
+ }
+};
+
+class SplitKernelFunction {
+public:
+ virtual ~SplitKernelFunction() {}
+
+ /* enqueue the kernel, returns false if there is an error */
+ virtual bool enqueue(const KernelDimensions& dim, device_memory& kg, device_memory& data) = 0;
+};
+
+class DeviceSplitKernel {
+private:
+ Device *device;
+
+ SplitKernelFunction *kernel_scene_intersect;
+ SplitKernelFunction *kernel_lamp_emission;
+ SplitKernelFunction *kernel_queue_enqueue;
+ SplitKernelFunction *kernel_background_buffer_update;
+ SplitKernelFunction *kernel_shader_eval;
+ SplitKernelFunction *kernel_holdout_emission_blurring_pathtermination_ao;
+ SplitKernelFunction *kernel_direct_lighting;
+ SplitKernelFunction *kernel_shadow_blocked;
+ SplitKernelFunction *kernel_next_iteration_setup;
+ SplitKernelFunction *kernel_sum_all_radiance;
+
+ /* Global memory variables [porting]; These memory is used for
+ * co-operation between different kernels; Data written by one
+ * kernel will be available to another kernel via this global
+ * memory.
+ */
+ device_memory split_data;
+ device_vector<uchar> ray_state;
+ device_memory queue_index; /* Array of size num_queues * sizeof(int) that tracks the size of each queue. */
+
+ /* Flag to make sceneintersect and lampemission kernel use queues. */
+ device_memory use_queues_flag;
+
+ /* Approximate time it takes to complete one sample */
+ double avg_time_per_sample;
+
+ /* Work pool with respect to each work group. */
+ device_memory work_pool_wgs;
+
+ /* clos_max value for which the kernels have been loaded currently. */
+ int current_max_closure;
+
+ /* Marked True in constructor and marked false at the end of path_trace(). */
+ bool first_tile;
+
+public:
+ explicit DeviceSplitKernel(Device* device);
+ virtual ~DeviceSplitKernel();
+
+ bool load_kernels(const DeviceRequestedFeatures& requested_features);
+ bool path_trace(DeviceTask *task,
+ RenderTile& rtile,
+ device_memory& kgbuffer,
+ device_memory& kernel_data);
+
+ size_t max_elements_for_max_buffer_size(size_t max_buffer_size, size_t passes_size);
+
+ virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
+ RenderTile& rtile,
+ int num_global_elements,
+ device_memory& kernel_globals,
+ device_memory& kernel_data_,
+ device_memory& split_data,
+ device_memory& ray_state,
+ device_memory& queue_index,
+ device_memory& use_queues_flag,
+ device_memory& work_pool_wgs) = 0;
+
+ virtual SplitKernelFunction* get_split_kernel_function(string kernel_name, const DeviceRequestedFeatures&) = 0;
+ virtual int2 split_kernel_local_size() = 0;
+ virtual int2 split_kernel_global_size(DeviceTask *task) = 0;
+};
+
+CCL_NAMESPACE_END
+
+#endif /* __DEVICE_SPLIT_KERNEL_H__ */
+
+
+
diff --git a/intern/cycles/device/opencl/opencl.h b/intern/cycles/device/opencl/opencl.h
index 4023ba89a10..73d245fe4dc 100644
--- a/intern/cycles/device/opencl/opencl.h
+++ b/intern/cycles/device/opencl/opencl.h
@@ -26,29 +26,8 @@
CCL_NAMESPACE_BEGIN
-#define CL_MEM_PTR(p) ((cl_mem)(uintptr_t)(p))
-
-/* Macro declarations used with split kernel */
-
-/* Macro to enable/disable work-stealing */
-#define __WORK_STEALING__
-
-#define SPLIT_KERNEL_LOCAL_SIZE_X 64
-#define SPLIT_KERNEL_LOCAL_SIZE_Y 1
-
-/* This value may be tuned according to the scene we are rendering.
- *
- * Modifying PATH_ITER_INC_FACTOR value proportional to number of expected
- * ray-bounces will improve performance.
- */
-#define PATH_ITER_INC_FACTOR 8
-/* When allocate global memory in chunks. We may not be able to
- * allocate exactly "CL_DEVICE_MAX_MEM_ALLOC_SIZE" bytes in chunks;
- * Since some bytes may be needed for aligning chunks of memory;
- * This is the amount of memory that we dedicate for that purpose.
- */
-#define DATA_ALLOCATION_MEM_FACTOR 5000000 //5MB
+#define CL_MEM_PTR(p) ((cl_mem)(uintptr_t)(p))
struct OpenCLPlatformDevice {
OpenCLPlatformDevice(cl_platform_id platform_id,
@@ -266,7 +245,7 @@ public:
/* Has to be implemented by the real device classes.
* The base device will then load all these programs. */
- virtual void load_kernels(const DeviceRequestedFeatures& requested_features,
+ virtual bool load_kernels(const DeviceRequestedFeatures& requested_features,
vector<OpenCLProgram*> &programs) = 0;
void mem_alloc(device_memory& mem, MemoryType type);
@@ -326,16 +305,39 @@ protected:
class ArgumentWrapper {
public:
- ArgumentWrapper() : size(0), pointer(NULL) {}
- template <typename T>
+ ArgumentWrapper() : size(0), pointer(NULL)
+ {
+ }
+
+ ArgumentWrapper(device_memory& argument) : size(sizeof(void*)),
+ pointer((void*)(&argument.device_pointer))
+ {
+ }
+
+ template<typename T>
+ ArgumentWrapper(device_vector<T>& argument) : size(sizeof(void*)),
+ pointer((void*)(&argument.device_pointer))
+ {
+ }
+
+ template<typename T>
ArgumentWrapper(T& argument) : size(sizeof(argument)),
- pointer(&argument) { }
+ pointer(&argument)
+ {
+ }
+
ArgumentWrapper(int argument) : size(sizeof(int)),
int_value(argument),
- pointer(&int_value) { }
+ pointer(&int_value)
+ {
+ }
+
ArgumentWrapper(float argument) : size(sizeof(float)),
float_value(argument),
- pointer(&float_value) { }
+ pointer(&float_value)
+ {
+ }
+
size_t size;
int int_value;
float float_value;
diff --git a/intern/cycles/device/opencl/opencl_base.cpp b/intern/cycles/device/opencl/opencl_base.cpp
index 7fa14eee70c..0f51d8e2d22 100644
--- a/intern/cycles/device/opencl/opencl_base.cpp
+++ b/intern/cycles/device/opencl/opencl_base.cpp
@@ -211,7 +211,9 @@ bool OpenCLDeviceBase::load_kernels(const DeviceRequestedFeatures& requested_fea
vector<OpenCLProgram*> programs;
programs.push_back(&base_program);
/* Call actual class to fill the vector with its programs. */
- load_kernels(requested_features, programs);
+ if(!load_kernels(requested_features, programs)) {
+ return false;
+ }
/* Parallel compilation is supported by Cycles, but currently all OpenCL frameworks
* serialize the calls internally, so it's not much use right now.
diff --git a/intern/cycles/device/opencl/opencl_mega.cpp b/intern/cycles/device/opencl/opencl_mega.cpp
index 6ea7619e022..049e332272b 100644
--- a/intern/cycles/device/opencl/opencl_mega.cpp
+++ b/intern/cycles/device/opencl/opencl_mega.cpp
@@ -43,11 +43,12 @@ public:
return true;
}
- virtual void load_kernels(const DeviceRequestedFeatures& /*requested_features*/,
+ virtual bool load_kernels(const DeviceRequestedFeatures& /*requested_features*/,
vector<OpenCLProgram*> &programs)
{
path_trace_program.add_kernel(ustring("path_trace"));
programs.push_back(&path_trace_program);
+ return true;
}
~OpenCLDeviceMegaKernel()
diff --git a/intern/cycles/device/opencl/opencl_split.cpp b/intern/cycles/device/opencl/opencl_split.cpp
index 3c3c2150128..0b015a5db41 100644
--- a/intern/cycles/device/opencl/opencl_split.cpp
+++ b/intern/cycles/device/opencl/opencl_split.cpp
@@ -21,1285 +21,301 @@
#include "buffers.h"
#include "kernel_types.h"
+#include "kernel_split_data.h"
+#include "device_split_kernel.h"
+
+#include "util_logging.h"
#include "util_md5.h"
#include "util_path.h"
#include "util_time.h"
CCL_NAMESPACE_BEGIN
-/* TODO(sergey): This is to keep tile split on OpenCL level working
- * for now, since without this view-port render does not work as it
- * should.
- *
- * Ideally it'll be done on the higher level, but we need to get ready
- * for merge rather soon, so let's keep split logic private here in
- * the file.
- */
-class SplitRenderTile : public RenderTile {
-public:
- SplitRenderTile()
- : RenderTile(),
- buffer_offset_x(0),
- buffer_offset_y(0),
- rng_state_offset_x(0),
- rng_state_offset_y(0),
- buffer_rng_state_stride(0) {}
+class OpenCLSplitKernel;
- explicit SplitRenderTile(RenderTile& tile)
- : RenderTile(),
- buffer_offset_x(0),
- buffer_offset_y(0),
- rng_state_offset_x(0),
- rng_state_offset_y(0),
- buffer_rng_state_stride(0)
- {
- x = tile.x;
- y = tile.y;
- w = tile.w;
- h = tile.h;
- start_sample = tile.start_sample;
- num_samples = tile.num_samples;
- sample = tile.sample;
- resolution = tile.resolution;
- offset = tile.offset;
- stride = tile.stride;
- buffer = tile.buffer;
- rng_state = tile.rng_state;
- buffers = tile.buffers;
+static string get_build_options(OpenCLDeviceBase *device, const DeviceRequestedFeatures& requested_features)
+{
+ string build_options = "-D__SPLIT_KERNEL__ ";
+ build_options += requested_features.get_build_options();
+
+ /* Set compute device build option. */
+ cl_device_type device_type;
+ device->ciErr = clGetDeviceInfo(device->cdDevice,
+ CL_DEVICE_TYPE,
+ sizeof(cl_device_type),
+ &device_type,
+ NULL);
+ assert(device->ciErr == CL_SUCCESS);
+ if(device_type == CL_DEVICE_TYPE_GPU) {
+ build_options += " -D__COMPUTE_DEVICE_GPU__";
}
- /* Split kernel is device global memory constrained;
- * hence split kernel cant render big tile size's in
- * one go. If the user sets a big tile size (big tile size
- * is a term relative to the available device global memory),
- * we split the tile further and then call path_trace on
- * each of those split tiles. The following variables declared,
- * assist in achieving that purpose
- */
- int buffer_offset_x;
- int buffer_offset_y;
- int rng_state_offset_x;
- int rng_state_offset_y;
- int buffer_rng_state_stride;
-};
+ return build_options;
+}
/* OpenCLDeviceSplitKernel's declaration/definition. */
class OpenCLDeviceSplitKernel : public OpenCLDeviceBase
{
public:
- /* Kernel declaration. */
+ DeviceSplitKernel *split_kernel;
OpenCLProgram program_data_init;
- OpenCLProgram program_scene_intersect;
- OpenCLProgram program_lamp_emission;
- OpenCLProgram program_queue_enqueue;
- OpenCLProgram program_background_buffer_update;
- OpenCLProgram program_shader_eval;
- OpenCLProgram program_holdout_emission_blurring_pathtermination_ao;
- OpenCLProgram program_direct_lighting;
- OpenCLProgram program_shadow_blocked;
- OpenCLProgram program_next_iteration_setup;
- OpenCLProgram program_sum_all_radiance;
-
- /* Global memory variables [porting]; These memory is used for
- * co-operation between different kernels; Data written by one
- * kernel will be available to another kernel via this global
- * memory.
- */
- cl_mem rng_coop;
- cl_mem throughput_coop;
- cl_mem L_transparent_coop;
- cl_mem PathRadiance_coop;
- cl_mem Ray_coop;
- cl_mem PathState_coop;
- cl_mem Intersection_coop;
- cl_mem kgbuffer; /* KernelGlobals buffer. */
-
- /* Global buffers for ShaderData. */
- cl_mem sd; /* ShaderData used in the main path-iteration loop. */
- cl_mem sd_DL_shadow; /* ShaderData used in Direct Lighting and
- * shadow_blocked kernel.
- */
-
- /* Global memory required for shadow blocked and accum_radiance. */
- cl_mem BSDFEval_coop;
- cl_mem ISLamp_coop;
- cl_mem LightRay_coop;
- cl_mem AOAlpha_coop;
- cl_mem AOBSDF_coop;
- cl_mem AOLightRay_coop;
- cl_mem Intersection_coop_shadow;
-
-#ifdef WITH_CYCLES_DEBUG
- /* DebugData memory */
- cl_mem debugdata_coop;
-#endif
-
- /* Global state array that tracks ray state. */
- cl_mem ray_state;
-
- /* Per sample buffers. */
- cl_mem per_sample_output_buffers;
-
- /* Denotes which sample each ray is being processed for. */
- cl_mem work_array;
- /* Queue */
- cl_mem Queue_data; /* Array of size queuesize * num_queues * sizeof(int). */
- cl_mem Queue_index; /* Array of size num_queues * sizeof(int);
- * Tracks the size of each queue.
- */
+ OpenCLDeviceSplitKernel(DeviceInfo& info, Stats &stats, bool background_);
- /* Flag to make sceneintersect and lampemission kernel use queues. */
- cl_mem use_queues_flag;
-
- /* Amount of memory in output buffer associated with one pixel/thread. */
- size_t per_thread_output_buffer_size;
-
- /* Total allocatable available device memory. */
- size_t total_allocatable_memory;
-
- /* host version of ray_state; Used in checking host path-iteration
- * termination.
- */
- char *hostRayStateArray;
-
- /* Number of path-iterations to be done in one shot. */
- unsigned int PathIteration_times;
-
-#ifdef __WORK_STEALING__
- /* Work pool with respect to each work group. */
- cl_mem work_pool_wgs;
-
- /* Denotes the maximum work groups possible w.r.t. current tile size. */
- unsigned int max_work_groups;
-#endif
+ ~OpenCLDeviceSplitKernel()
+ {
+ task_pool.stop();
- /* clos_max value for which the kernels have been loaded currently. */
- int current_max_closure;
+ /* Release kernels */
+ program_data_init.release();
- /* Marked True in constructor and marked false at the end of path_trace(). */
- bool first_tile;
+ delete split_kernel;
+ }
- OpenCLDeviceSplitKernel(DeviceInfo& info, Stats &stats, bool background_)
- : OpenCLDeviceBase(info, stats, background_)
+ virtual bool load_kernels(const DeviceRequestedFeatures& requested_features,
+ vector<OpenCLDeviceBase::OpenCLProgram*> &programs)
{
- background = background_;
-
- /* Initialize cl_mem variables. */
- kgbuffer = NULL;
- sd = NULL;
- sd_DL_shadow = NULL;
-
- rng_coop = NULL;
- throughput_coop = NULL;
- L_transparent_coop = NULL;
- PathRadiance_coop = NULL;
- Ray_coop = NULL;
- PathState_coop = NULL;
- Intersection_coop = NULL;
- ray_state = NULL;
+ program_data_init = OpenCLDeviceBase::OpenCLProgram(this,
+ "split_data_init",
+ "kernel_data_init.cl",
+ get_build_options(this, requested_features));
+ program_data_init.add_kernel(ustring("path_trace_data_init"));
+ programs.push_back(&program_data_init);
+
+ return split_kernel->load_kernels(requested_features);
+ }
- AOAlpha_coop = NULL;
- AOBSDF_coop = NULL;
- AOLightRay_coop = NULL;
- BSDFEval_coop = NULL;
- ISLamp_coop = NULL;
- LightRay_coop = NULL;
- Intersection_coop_shadow = NULL;
+ void thread_run(DeviceTask *task)
+ {
+ if(task->type == DeviceTask::FILM_CONVERT) {
+ film_convert(*task, task->buffer, task->rgba_byte, task->rgba_half);
+ }
+ else if(task->type == DeviceTask::SHADER) {
+ shader(*task);
+ }
+ else if(task->type == DeviceTask::PATH_TRACE) {
+ RenderTile tile;
-#ifdef WITH_CYCLES_DEBUG
- debugdata_coop = NULL;
-#endif
+ /* Copy dummy KernelGlobals related to OpenCL from kernel_globals.h to
+ * fetch its size.
+ */
+ typedef struct KernelGlobals {
+ ccl_constant KernelData *data;
+#define KERNEL_TEX(type, ttype, name) \
+ ccl_global type *name;
+#include "kernel_textures.h"
+#undef KERNEL_TEX
+ void *sd_input;
+ void *isect_shadow;
+ SplitData split_data;
+ SplitParams split_param_data;
+ } KernelGlobals;
- work_array = NULL;
+ /* Allocate buffer for kernel globals */
+ device_memory kgbuffer;
+ kgbuffer.resize(sizeof(KernelGlobals));
+ mem_alloc(kgbuffer, MEM_READ_WRITE);
- /* Queue. */
- Queue_data = NULL;
- Queue_index = NULL;
- use_queues_flag = NULL;
+ /* Keep rendering tiles until done. */
+ while(task->acquire_tile(this, tile)) {
+ split_kernel->path_trace(task,
+ tile,
+ kgbuffer,
+ *const_mem_map["__data"]);
- per_sample_output_buffers = NULL;
+ /* Complete kernel execution before release tile. */
+ /* This helps in multi-device render;
+ * The device that reaches the critical-section function
+ * release_tile waits (stalling other devices from entering
+ * release_tile) for all kernels to complete. If device1 (a
+ * slow-render device) reaches release_tile first then it would
+ * stall device2 (a fast-render device) from proceeding to render
+ * next tile.
+ */
+ clFinish(cqCommandQueue);
- per_thread_output_buffer_size = 0;
- hostRayStateArray = NULL;
- PathIteration_times = PATH_ITER_INC_FACTOR;
-#ifdef __WORK_STEALING__
- work_pool_wgs = NULL;
- max_work_groups = 0;
-#endif
- current_max_closure = -1;
- first_tile = true;
+ task->release_tile(tile);
+ }
- /* Get device's maximum memory that can be allocated. */
- ciErr = clGetDeviceInfo(cdDevice,
- CL_DEVICE_MAX_MEM_ALLOC_SIZE,
- sizeof(size_t),
- &total_allocatable_memory,
- NULL);
- assert(ciErr == CL_SUCCESS);
- if(platform_name == "AMD Accelerated Parallel Processing") {
- /* This value is tweak-able; AMD platform does not seem to
- * give maximum performance when all of CL_DEVICE_MAX_MEM_ALLOC_SIZE
- * is considered for further computation.
- */
- total_allocatable_memory /= 2;
+ mem_free(kgbuffer);
}
}
- virtual bool show_samples() const {
- return false;
- }
+protected:
+ /* ** Those guys are for workign around some compiler-specific bugs ** */
- /* Split kernel utility functions. */
- size_t get_tex_size(const char *tex_name)
+ string build_options_for_base_program(
+ const DeviceRequestedFeatures& requested_features)
{
- cl_mem ptr;
- size_t ret_size = 0;
- MemMap::iterator i = mem_map.find(tex_name);
- if(i != mem_map.end()) {
- ptr = CL_MEM_PTR(i->second);
- ciErr = clGetMemObjectInfo(ptr,
- CL_MEM_SIZE,
- sizeof(ret_size),
- &ret_size,
- NULL);
- assert(ciErr == CL_SUCCESS);
- }
- return ret_size;
+ return requested_features.get_build_options();
}
- size_t get_shader_data_size(size_t max_closure)
- {
- /* ShaderData size with variable size ShaderClosure array */
- return sizeof(ShaderData) - (sizeof(ShaderClosure) * (MAX_CLOSURE - max_closure));
- }
+ friend class OpenCLSplitKernel;
+ friend class OpenCLSplitKernelFunction;
+};
- /* Returns size of KernelGlobals structure associated with OpenCL. */
- size_t get_KernelGlobals_size()
- {
- /* Copy dummy KernelGlobals related to OpenCL from kernel_globals.h to
- * fetch its size.
- */
- typedef struct KernelGlobals {
- ccl_constant KernelData *data;
-#define KERNEL_TEX(type, ttype, name) \
- ccl_global type *name;
-#include "kernel_textures.h"
-#undef KERNEL_TEX
- void *sd_input;
- void *isect_shadow;
- } KernelGlobals;
+class OpenCLSplitKernelFunction : public SplitKernelFunction {
+public:
+ OpenCLDeviceSplitKernel* device;
+ OpenCLDeviceBase::OpenCLProgram program;
- return sizeof(KernelGlobals);
- }
+ OpenCLSplitKernelFunction(OpenCLDeviceSplitKernel* device) : device(device) {}
+ ~OpenCLSplitKernelFunction() { program.release(); }
- virtual void load_kernels(const DeviceRequestedFeatures& requested_features,
- vector<OpenCLProgram*> &programs)
+ virtual bool enqueue(const KernelDimensions& dim, device_memory& kg, device_memory& data)
{
- string build_options = "-D__SPLIT_KERNEL__ ";
-#ifdef __WORK_STEALING__
- build_options += "-D__WORK_STEALING__ ";
-#endif
- build_options += requested_features.get_build_options();
-
- /* Set compute device build option. */
- cl_device_type device_type;
- ciErr = clGetDeviceInfo(cdDevice,
- CL_DEVICE_TYPE,
- sizeof(cl_device_type),
- &device_type,
- NULL);
- assert(ciErr == CL_SUCCESS);
- if(device_type == CL_DEVICE_TYPE_GPU) {
- build_options += " -D__COMPUTE_DEVICE_GPU__";
+ device->kernel_set_args(program(), 0, kg, data);
+
+ device->ciErr = clEnqueueNDRangeKernel(device->cqCommandQueue,
+ program(),
+ 2,
+ NULL,
+ dim.global_size,
+ dim.local_size,
+ 0,
+ NULL,
+ NULL);
+
+ device->opencl_assert_err(device->ciErr, "clEnqueueNDRangeKernel");
+
+ if(device->ciErr != CL_SUCCESS) {
+ string message = string_printf("OpenCL error: %s in clEnqueueNDRangeKernel()",
+ clewErrorString(device->ciErr));
+ device->opencl_error(message);
+ return false;
}
-#define GLUE(a, b) a ## b
-#define LOAD_KERNEL(name) \
- do { \
- GLUE(program_, name) = OpenCLProgram(this, "split_" #name, "kernel_" #name ".cl", build_options); \
- GLUE(program_, name).add_kernel(ustring("path_trace_" #name)); \
- programs.push_back(&GLUE(program_, name)); \
- } while(false)
-
- LOAD_KERNEL(data_init);
- LOAD_KERNEL(scene_intersect);
- LOAD_KERNEL(lamp_emission);
- LOAD_KERNEL(queue_enqueue);
- LOAD_KERNEL(background_buffer_update);
- LOAD_KERNEL(shader_eval);
- LOAD_KERNEL(holdout_emission_blurring_pathtermination_ao);
- LOAD_KERNEL(direct_lighting);
- LOAD_KERNEL(shadow_blocked);
- LOAD_KERNEL(next_iteration_setup);
- LOAD_KERNEL(sum_all_radiance);
-
-#undef FIND_KERNEL
-#undef GLUE
+ return true;
+ }
+};
- current_max_closure = requested_features.max_closure;
+class OpenCLSplitKernel : public DeviceSplitKernel {
+ OpenCLDeviceSplitKernel *device;
+public:
+ explicit OpenCLSplitKernel(OpenCLDeviceSplitKernel *device) : DeviceSplitKernel(device), device(device) {
}
- ~OpenCLDeviceSplitKernel()
+ virtual SplitKernelFunction* get_split_kernel_function(string kernel_name,
+ const DeviceRequestedFeatures& requested_features)
{
- task_pool.stop();
-
- /* Release kernels */
- program_data_init.release();
- program_scene_intersect.release();
- program_lamp_emission.release();
- program_queue_enqueue.release();
- program_background_buffer_update.release();
- program_shader_eval.release();
- program_holdout_emission_blurring_pathtermination_ao.release();
- program_direct_lighting.release();
- program_shadow_blocked.release();
- program_next_iteration_setup.release();
- program_sum_all_radiance.release();
-
- /* Release global memory */
- release_mem_object_safe(rng_coop);
- release_mem_object_safe(throughput_coop);
- release_mem_object_safe(L_transparent_coop);
- release_mem_object_safe(PathRadiance_coop);
- release_mem_object_safe(Ray_coop);
- release_mem_object_safe(PathState_coop);
- release_mem_object_safe(Intersection_coop);
- release_mem_object_safe(kgbuffer);
- release_mem_object_safe(sd);
- release_mem_object_safe(sd_DL_shadow);
- release_mem_object_safe(ray_state);
- release_mem_object_safe(AOAlpha_coop);
- release_mem_object_safe(AOBSDF_coop);
- release_mem_object_safe(AOLightRay_coop);
- release_mem_object_safe(BSDFEval_coop);
- release_mem_object_safe(ISLamp_coop);
- release_mem_object_safe(LightRay_coop);
- release_mem_object_safe(Intersection_coop_shadow);
-#ifdef WITH_CYCLES_DEBUG
- release_mem_object_safe(debugdata_coop);
-#endif
- release_mem_object_safe(use_queues_flag);
- release_mem_object_safe(Queue_data);
- release_mem_object_safe(Queue_index);
- release_mem_object_safe(work_array);
-#ifdef __WORK_STEALING__
- release_mem_object_safe(work_pool_wgs);
-#endif
- release_mem_object_safe(per_sample_output_buffers);
-
- if(hostRayStateArray != NULL) {
- free(hostRayStateArray);
+ OpenCLSplitKernelFunction* kernel = new OpenCLSplitKernelFunction(device);
+
+ kernel->program = OpenCLDeviceBase::OpenCLProgram(device,
+ "split_" + kernel_name,
+ "kernel_" + kernel_name + ".cl",
+ get_build_options(device, requested_features));
+ kernel->program.add_kernel(ustring("path_trace_" + kernel_name));
+ kernel->program.load();
+
+ if(!kernel->program.is_loaded()) {
+ delete kernel;
+ return NULL;
}
+
+ return kernel;
}
- void path_trace(DeviceTask *task,
- SplitRenderTile& rtile,
- int2 max_render_feasible_tile_size)
+ virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
+ RenderTile& rtile,
+ int num_global_elements,
+ device_memory& kernel_globals,
+ device_memory& kernel_data,
+ device_memory& split_data,
+ device_memory& ray_state,
+ device_memory& queue_index,
+ device_memory& use_queues_flag,
+ device_memory& work_pool_wgs
+ )
{
- /* cast arguments to cl types */
- cl_mem d_data = CL_MEM_PTR(const_mem_map["__data"]->device_pointer);
- cl_mem d_buffer = CL_MEM_PTR(rtile.buffer);
- cl_mem d_rng_state = CL_MEM_PTR(rtile.rng_state);
- cl_int d_x = rtile.x;
- cl_int d_y = rtile.y;
- cl_int d_w = rtile.w;
- cl_int d_h = rtile.h;
- cl_int d_offset = rtile.offset;
- cl_int d_stride = rtile.stride;
-
- /* Make sure that set render feasible tile size is a multiple of local
- * work size dimensions.
- */
- assert(max_render_feasible_tile_size.x % SPLIT_KERNEL_LOCAL_SIZE_X == 0);
- assert(max_render_feasible_tile_size.y % SPLIT_KERNEL_LOCAL_SIZE_Y == 0);
-
- size_t global_size[2];
- size_t local_size[2] = {SPLIT_KERNEL_LOCAL_SIZE_X,
- SPLIT_KERNEL_LOCAL_SIZE_Y};
+ cl_int dQueue_size = dim.global_size[0] * dim.global_size[1];
/* Set the range of samples to be processed for every ray in
* path-regeneration logic.
*/
cl_int start_sample = rtile.start_sample;
cl_int end_sample = rtile.start_sample + rtile.num_samples;
- cl_int num_samples = rtile.num_samples;
-
-#ifdef __WORK_STEALING__
- global_size[0] = (((d_w - 1) / local_size[0]) + 1) * local_size[0];
- global_size[1] = (((d_h - 1) / local_size[1]) + 1) * local_size[1];
- unsigned int num_parallel_samples = 1;
-#else
- global_size[1] = (((d_h - 1) / local_size[1]) + 1) * local_size[1];
- unsigned int num_threads = max_render_feasible_tile_size.x *
- max_render_feasible_tile_size.y;
- unsigned int num_tile_columns_possible = num_threads / global_size[1];
- /* Estimate number of parallel samples that can be
- * processed in parallel.
- */
- unsigned int num_parallel_samples = min(num_tile_columns_possible / d_w,
- rtile.num_samples);
- /* Wavefront size in AMD is 64.
- * TODO(sergey): What about other platforms?
- */
- if(num_parallel_samples >= 64) {
- /* TODO(sergey): Could use generic round-up here. */
- num_parallel_samples = (num_parallel_samples / 64) * 64;
- }
- assert(num_parallel_samples != 0);
-
- global_size[0] = d_w * num_parallel_samples;
-#endif /* __WORK_STEALING__ */
-
- assert(global_size[0] * global_size[1] <=
- max_render_feasible_tile_size.x * max_render_feasible_tile_size.y);
-
- /* Allocate all required global memory once. */
- if(first_tile) {
- size_t num_global_elements = max_render_feasible_tile_size.x *
- max_render_feasible_tile_size.y;
- /* TODO(sergey): This will actually over-allocate if
- * particular kernel does not support multiclosure.
- */
- size_t shaderdata_size = get_shader_data_size(current_max_closure);
-
-#ifdef __WORK_STEALING__
- /* Calculate max groups */
- size_t max_global_size[2];
- size_t tile_x = max_render_feasible_tile_size.x;
- size_t tile_y = max_render_feasible_tile_size.y;
- max_global_size[0] = (((tile_x - 1) / local_size[0]) + 1) * local_size[0];
- max_global_size[1] = (((tile_y - 1) / local_size[1]) + 1) * local_size[1];
- max_work_groups = (max_global_size[0] * max_global_size[1]) /
- (local_size[0] * local_size[1]);
- /* Allocate work_pool_wgs memory. */
- work_pool_wgs = mem_alloc(max_work_groups * sizeof(unsigned int));
-#endif /* __WORK_STEALING__ */
-
- /* Allocate queue_index memory only once. */
- Queue_index = mem_alloc(NUM_QUEUES * sizeof(int));
- use_queues_flag = mem_alloc(sizeof(char));
- kgbuffer = mem_alloc(get_KernelGlobals_size());
-
- /* Create global buffers for ShaderData. */
- sd = mem_alloc(num_global_elements * shaderdata_size);
- sd_DL_shadow = mem_alloc(num_global_elements * 2 * shaderdata_size);
-
- /* Creation of global memory buffers which are shared among
- * the kernels.
- */
- rng_coop = mem_alloc(num_global_elements * sizeof(RNG));
- throughput_coop = mem_alloc(num_global_elements * sizeof(float3));
- L_transparent_coop = mem_alloc(num_global_elements * sizeof(float));
- PathRadiance_coop = mem_alloc(num_global_elements * sizeof(PathRadiance));
- Ray_coop = mem_alloc(num_global_elements * sizeof(Ray));
- PathState_coop = mem_alloc(num_global_elements * sizeof(PathState));
- Intersection_coop = mem_alloc(num_global_elements * sizeof(Intersection));
- AOAlpha_coop = mem_alloc(num_global_elements * sizeof(float3));
- AOBSDF_coop = mem_alloc(num_global_elements * sizeof(float3));
- AOLightRay_coop = mem_alloc(num_global_elements * sizeof(Ray));
- BSDFEval_coop = mem_alloc(num_global_elements * sizeof(BsdfEval));
- ISLamp_coop = mem_alloc(num_global_elements * sizeof(int));
- LightRay_coop = mem_alloc(num_global_elements * sizeof(Ray));
- Intersection_coop_shadow = mem_alloc(2 * num_global_elements * sizeof(Intersection));
-
-#ifdef WITH_CYCLES_DEBUG
- debugdata_coop = mem_alloc(num_global_elements * sizeof(DebugData));
-#endif
-
- ray_state = mem_alloc(num_global_elements * sizeof(char));
-
- hostRayStateArray = (char *)calloc(num_global_elements, sizeof(char));
- assert(hostRayStateArray != NULL && "Can't create hostRayStateArray memory");
-
- Queue_data = mem_alloc(num_global_elements * (NUM_QUEUES * sizeof(int)+sizeof(int)));
- work_array = mem_alloc(num_global_elements * sizeof(unsigned int));
- per_sample_output_buffers = mem_alloc(num_global_elements *
- per_thread_output_buffer_size);
- }
-
- cl_int dQueue_size = global_size[0] * global_size[1];
cl_uint start_arg_index =
- kernel_set_args(program_data_init(),
+ device->kernel_set_args(device->program_data_init(),
0,
- kgbuffer,
- sd_DL_shadow,
- d_data,
- per_sample_output_buffers,
- d_rng_state,
- rng_coop,
- throughput_coop,
- L_transparent_coop,
- PathRadiance_coop,
- Ray_coop,
- PathState_coop,
- Intersection_coop_shadow,
- ray_state);
+ kernel_globals,
+ kernel_data,
+ split_data,
+ num_global_elements,
+ ray_state,
+ rtile.rng_state);
/* TODO(sergey): Avoid map lookup here. */
#define KERNEL_TEX(type, ttype, name) \
- set_kernel_arg_mem(program_data_init(), &start_arg_index, #name);
+ device->set_kernel_arg_mem(device->program_data_init(), &start_arg_index, #name);
#include "kernel_textures.h"
#undef KERNEL_TEX
start_arg_index +=
- kernel_set_args(program_data_init(),
+ device->kernel_set_args(device->program_data_init(),
start_arg_index,
start_sample,
- d_x,
- d_y,
- d_w,
- d_h,
- d_offset,
- d_stride,
- rtile.rng_state_offset_x,
- rtile.rng_state_offset_y,
- rtile.buffer_rng_state_stride,
- Queue_data,
- Queue_index,
+ end_sample,
+ rtile.x,
+ rtile.y,
+ rtile.w,
+ rtile.h,
+ rtile.offset,
+ rtile.stride,
+ queue_index,
dQueue_size,
use_queues_flag,
- work_array,
-#ifdef __WORK_STEALING__
work_pool_wgs,
- num_samples,
-#endif
-#ifdef WITH_CYCLES_DEBUG
- debugdata_coop,
-#endif
- num_parallel_samples);
-
- kernel_set_args(program_scene_intersect(),
- 0,
- kgbuffer,
- d_data,
- rng_coop,
- Ray_coop,
- PathState_coop,
- Intersection_coop,
- ray_state,
- d_w,
- d_h,
- Queue_data,
- Queue_index,
- dQueue_size,
- use_queues_flag,
-#ifdef WITH_CYCLES_DEBUG
- debugdata_coop,
-#endif
- num_parallel_samples);
-
- kernel_set_args(program_lamp_emission(),
- 0,
- kgbuffer,
- d_data,
- throughput_coop,
- PathRadiance_coop,
- Ray_coop,
- PathState_coop,
- Intersection_coop,
- ray_state,
- d_w,
- d_h,
- Queue_data,
- Queue_index,
- dQueue_size,
- use_queues_flag,
- num_parallel_samples);
-
- kernel_set_args(program_queue_enqueue(),
- 0,
- Queue_data,
- Queue_index,
- ray_state,
- dQueue_size);
-
- kernel_set_args(program_background_buffer_update(),
- 0,
- kgbuffer,
- d_data,
- per_sample_output_buffers,
- d_rng_state,
- rng_coop,
- throughput_coop,
- PathRadiance_coop,
- Ray_coop,
- PathState_coop,
- L_transparent_coop,
- ray_state,
- d_w,
- d_h,
- d_x,
- d_y,
- d_stride,
- rtile.rng_state_offset_x,
- rtile.rng_state_offset_y,
- rtile.buffer_rng_state_stride,
- work_array,
- Queue_data,
- Queue_index,
- dQueue_size,
- end_sample,
- start_sample,
-#ifdef __WORK_STEALING__
- work_pool_wgs,
- num_samples,
-#endif
-#ifdef WITH_CYCLES_DEBUG
- debugdata_coop,
-#endif
- num_parallel_samples);
-
- kernel_set_args(program_shader_eval(),
- 0,
- kgbuffer,
- d_data,
- sd,
- rng_coop,
- Ray_coop,
- PathState_coop,
- Intersection_coop,
- ray_state,
- Queue_data,
- Queue_index,
- dQueue_size);
-
- kernel_set_args(program_holdout_emission_blurring_pathtermination_ao(),
- 0,
- kgbuffer,
- d_data,
- sd,
- per_sample_output_buffers,
- rng_coop,
- throughput_coop,
- L_transparent_coop,
- PathRadiance_coop,
- PathState_coop,
- Intersection_coop,
- AOAlpha_coop,
- AOBSDF_coop,
- AOLightRay_coop,
- d_w,
- d_h,
- d_x,
- d_y,
- d_stride,
- ray_state,
- work_array,
- Queue_data,
- Queue_index,
- dQueue_size,
-#ifdef __WORK_STEALING__
- start_sample,
-#endif
- num_parallel_samples);
-
- kernel_set_args(program_direct_lighting(),
- 0,
- kgbuffer,
- d_data,
- sd,
- rng_coop,
- PathState_coop,
- ISLamp_coop,
- LightRay_coop,
- BSDFEval_coop,
- ray_state,
- Queue_data,
- Queue_index,
- dQueue_size);
-
- kernel_set_args(program_shadow_blocked(),
- 0,
- kgbuffer,
- d_data,
- PathState_coop,
- LightRay_coop,
- AOLightRay_coop,
- ray_state,
- Queue_data,
- Queue_index,
- dQueue_size);
-
- kernel_set_args(program_next_iteration_setup(),
- 0,
- kgbuffer,
- d_data,
- sd,
- rng_coop,
- throughput_coop,
- PathRadiance_coop,
- Ray_coop,
- PathState_coop,
- LightRay_coop,
- ISLamp_coop,
- BSDFEval_coop,
- AOLightRay_coop,
- AOBSDF_coop,
- AOAlpha_coop,
- ray_state,
- Queue_data,
- Queue_index,
- dQueue_size,
- use_queues_flag);
-
- kernel_set_args(program_sum_all_radiance(),
- 0,
- d_data,
- d_buffer,
- per_sample_output_buffers,
- num_parallel_samples,
- d_w,
- d_h,
- d_stride,
- rtile.buffer_offset_x,
- rtile.buffer_offset_y,
- rtile.buffer_rng_state_stride,
- start_sample);
-
- /* Macro for Enqueuing split kernels. */
-#define GLUE(a, b) a ## b
-#define ENQUEUE_SPLIT_KERNEL(kernelName, globalSize, localSize) \
- { \
- ciErr = clEnqueueNDRangeKernel(cqCommandQueue, \
- GLUE(program_, \
- kernelName)(), \
- 2, \
- NULL, \
- globalSize, \
- localSize, \
- 0, \
- NULL, \
- NULL); \
- opencl_assert_err(ciErr, "clEnqueueNDRangeKernel"); \
- if(ciErr != CL_SUCCESS) { \
- string message = string_printf("OpenCL error: %s in clEnqueueNDRangeKernel()", \
- clewErrorString(ciErr)); \
- opencl_error(message); \
- return; \
- } \
- } (void) 0
+ rtile.num_samples,
+ rtile.buffer);
/* Enqueue ckPathTraceKernel_data_init kernel. */
- ENQUEUE_SPLIT_KERNEL(data_init, global_size, local_size);
- bool activeRaysAvailable = true;
-
- /* Record number of time host intervention has been made */
- unsigned int numHostIntervention = 0;
- unsigned int numNextPathIterTimes = PathIteration_times;
- bool canceled = false;
- while(activeRaysAvailable) {
- /* Twice the global work size of other kernels for
- * ckPathTraceKernel_shadow_blocked_direct_lighting. */
- size_t global_size_shadow_blocked[2];
- global_size_shadow_blocked[0] = global_size[0] * 2;
- global_size_shadow_blocked[1] = global_size[1];
-
- /* Do path-iteration in host [Enqueue Path-iteration kernels. */
- for(int PathIter = 0; PathIter < PathIteration_times; PathIter++) {
- ENQUEUE_SPLIT_KERNEL(scene_intersect, global_size, local_size);
- ENQUEUE_SPLIT_KERNEL(lamp_emission, global_size, local_size);
- ENQUEUE_SPLIT_KERNEL(queue_enqueue, global_size, local_size);
- ENQUEUE_SPLIT_KERNEL(background_buffer_update, global_size, local_size);
- ENQUEUE_SPLIT_KERNEL(shader_eval, global_size, local_size);
- ENQUEUE_SPLIT_KERNEL(holdout_emission_blurring_pathtermination_ao, global_size, local_size);
- ENQUEUE_SPLIT_KERNEL(direct_lighting, global_size, local_size);
- ENQUEUE_SPLIT_KERNEL(shadow_blocked, global_size_shadow_blocked, local_size);
- ENQUEUE_SPLIT_KERNEL(next_iteration_setup, global_size, local_size);
-
- if(task->get_cancel()) {
- canceled = true;
- break;
- }
- }
-
- /* Read ray-state into Host memory to decide if we should exit
- * path-iteration in host.
- */
- ciErr = clEnqueueReadBuffer(cqCommandQueue,
- ray_state,
- CL_TRUE,
- 0,
- global_size[0] * global_size[1] * sizeof(char),
- hostRayStateArray,
- 0,
- NULL,
- NULL);
- assert(ciErr == CL_SUCCESS);
-
- activeRaysAvailable = false;
-
- for(int rayStateIter = 0;
- rayStateIter < global_size[0] * global_size[1];
- ++rayStateIter)
- {
- if(int8_t(hostRayStateArray[rayStateIter]) != RAY_INACTIVE) {
- /* Not all rays are RAY_INACTIVE. */
- activeRaysAvailable = true;
- break;
- }
- }
-
- if(activeRaysAvailable) {
- numHostIntervention++;
- PathIteration_times = PATH_ITER_INC_FACTOR;
- /* Host intervention done before all rays become RAY_INACTIVE;
- * Set do more initial iterations for the next tile.
- */
- numNextPathIterTimes += PATH_ITER_INC_FACTOR;
- }
-
- if(task->get_cancel()) {
- canceled = true;
- break;
- }
- }
-
- /* Execute SumALLRadiance kernel to accumulate radiance calculated in
- * per_sample_output_buffers into RenderTile's output buffer.
- */
- if(!canceled) {
- size_t sum_all_radiance_local_size[2] = {16, 16};
- size_t sum_all_radiance_global_size[2];
- sum_all_radiance_global_size[0] =
- (((d_w - 1) / sum_all_radiance_local_size[0]) + 1) *
- sum_all_radiance_local_size[0];
- sum_all_radiance_global_size[1] =
- (((d_h - 1) / sum_all_radiance_local_size[1]) + 1) *
- sum_all_radiance_local_size[1];
- ENQUEUE_SPLIT_KERNEL(sum_all_radiance,
- sum_all_radiance_global_size,
- sum_all_radiance_local_size);
- }
-
-#undef ENQUEUE_SPLIT_KERNEL
-#undef GLUE
-
- if(numHostIntervention == 0) {
- /* This means that we are executing kernel more than required
- * Must avoid this for the next sample/tile.
- */
- PathIteration_times = ((numNextPathIterTimes - PATH_ITER_INC_FACTOR) <= 0) ?
- PATH_ITER_INC_FACTOR : numNextPathIterTimes - PATH_ITER_INC_FACTOR;
- }
- else {
- /* Number of path-iterations done for this tile is set as
- * Initial path-iteration times for the next tile
- */
- PathIteration_times = numNextPathIterTimes;
- }
-
- first_tile = false;
- }
-
- /* Calculates the amount of memory that has to be always
- * allocated in order for the split kernel to function.
- * This memory is tile/scene-property invariant (meaning,
- * the value returned by this function does not depend
- * on the user set tile size or scene properties.
- */
- size_t get_invariable_mem_allocated()
- {
- size_t total_invariable_mem_allocated = 0;
- size_t KernelGlobals_size = 0;
-
- KernelGlobals_size = get_KernelGlobals_size();
-
- total_invariable_mem_allocated += KernelGlobals_size; /* KernelGlobals size */
- total_invariable_mem_allocated += NUM_QUEUES * sizeof(unsigned int); /* Queue index size */
- total_invariable_mem_allocated += sizeof(char); /* use_queues_flag size */
-
- return total_invariable_mem_allocated;
- }
-
- /* Calculate the memory that has-to-be/has-been allocated for
- * the split kernel to function.
- */
- size_t get_tile_specific_mem_allocated(const int2 tile_size)
- {
- size_t tile_specific_mem_allocated = 0;
-
- /* Get required tile info */
- unsigned int user_set_tile_w = tile_size.x;
- unsigned int user_set_tile_h = tile_size.y;
-
-#ifdef __WORK_STEALING__
- /* Calculate memory to be allocated for work_pools in
- * case of work_stealing.
- */
- size_t max_global_size[2];
- size_t max_num_work_pools = 0;
- max_global_size[0] =
- (((user_set_tile_w - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_X;
- max_global_size[1] =
- (((user_set_tile_h - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_Y;
- max_num_work_pools =
- (max_global_size[0] * max_global_size[1]) /
- (SPLIT_KERNEL_LOCAL_SIZE_X * SPLIT_KERNEL_LOCAL_SIZE_Y);
- tile_specific_mem_allocated += max_num_work_pools * sizeof(unsigned int);
-#endif
-
- tile_specific_mem_allocated +=
- user_set_tile_w * user_set_tile_h * per_thread_output_buffer_size;
- tile_specific_mem_allocated +=
- user_set_tile_w * user_set_tile_h * sizeof(RNG);
-
- return tile_specific_mem_allocated;
- }
-
- /* Calculates the texture memories and KernelData (d_data) memory
- * that has been allocated.
- */
- size_t get_scene_specific_mem_allocated(cl_mem d_data)
- {
- size_t scene_specific_mem_allocated = 0;
- /* Calculate texture memories. */
-#define KERNEL_TEX(type, ttype, name) \
- scene_specific_mem_allocated += get_tex_size(#name);
-#include "kernel_textures.h"
-#undef KERNEL_TEX
- size_t d_data_size;
- ciErr = clGetMemObjectInfo(d_data,
- CL_MEM_SIZE,
- sizeof(d_data_size),
- &d_data_size,
- NULL);
- assert(ciErr == CL_SUCCESS && "Can't get d_data mem object info");
- scene_specific_mem_allocated += d_data_size;
- return scene_specific_mem_allocated;
- }
-
- /* Calculate the memory required for one thread in split kernel. */
- size_t get_per_thread_memory()
- {
- size_t shaderdata_size = 0;
- /* TODO(sergey): This will actually over-allocate if
- * particular kernel does not support multiclosure.
- */
- shaderdata_size = get_shader_data_size(current_max_closure);
- size_t retval = sizeof(RNG)
- + sizeof(float3) /* Throughput size */
- + sizeof(float) /* L transparent size */
- + sizeof(char) /* Ray state size */
- + sizeof(unsigned int) /* Work element size */
- + sizeof(int) /* ISLamp_size */
- + sizeof(PathRadiance) + sizeof(Ray) + sizeof(PathState)
- + sizeof(Intersection) /* Overall isect */
- + sizeof(Intersection) /* Instersection_coop_AO */
- + sizeof(Intersection) /* Intersection coop DL */
- + shaderdata_size /* Overall ShaderData */
- + (shaderdata_size * 2) /* ShaderData : DL and shadow */
- + sizeof(Ray) + sizeof(BsdfEval)
- + sizeof(float3) /* AOAlpha size */
- + sizeof(float3) /* AOBSDF size */
- + sizeof(Ray)
- + (sizeof(int) * NUM_QUEUES)
- + per_thread_output_buffer_size;
- return retval;
- }
-
- /* Considers the total memory available in the device and
- * and returns the maximum global work size possible.
- */
- size_t get_feasible_global_work_size(int2 tile_size, cl_mem d_data)
- {
- /* Calculate invariably allocated memory. */
- size_t invariable_mem_allocated = get_invariable_mem_allocated();
- /* Calculate tile specific allocated memory. */
- size_t tile_specific_mem_allocated =
- get_tile_specific_mem_allocated(tile_size);
- /* Calculate scene specific allocated memory. */
- size_t scene_specific_mem_allocated =
- get_scene_specific_mem_allocated(d_data);
- /* Calculate total memory available for the threads in global work size. */
- size_t available_memory = total_allocatable_memory
- - invariable_mem_allocated
- - tile_specific_mem_allocated
- - scene_specific_mem_allocated
- - DATA_ALLOCATION_MEM_FACTOR;
- size_t per_thread_memory_required = get_per_thread_memory();
- return (available_memory / per_thread_memory_required);
- }
-
- /* Checks if the device has enough memory to render the whole tile;
- * If not, we should split single tile into multiple tiles of small size
- * and process them all.
- */
- bool need_to_split_tile(unsigned int d_w,
- unsigned int d_h,
- int2 max_render_feasible_tile_size)
- {
- size_t global_size_estimate[2];
- /* TODO(sergey): Such round-ups are in quite few places, need to replace
- * them with an utility macro.
- */
- global_size_estimate[0] =
- (((d_w - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_X;
- global_size_estimate[1] =
- (((d_h - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_Y;
- if((global_size_estimate[0] * global_size_estimate[1]) >
- (max_render_feasible_tile_size.x * max_render_feasible_tile_size.y))
- {
- return true;
- }
- else {
+ device->ciErr = clEnqueueNDRangeKernel(device->cqCommandQueue,
+ device->program_data_init(),
+ 2,
+ NULL,
+ dim.global_size,
+ dim.local_size,
+ 0,
+ NULL,
+ NULL);
+
+ device->opencl_assert_err(device->ciErr, "clEnqueueNDRangeKernel");
+
+ if(device->ciErr != CL_SUCCESS) {
+ string message = string_printf("OpenCL error: %s in clEnqueueNDRangeKernel()",
+ clewErrorString(device->ciErr));
+ device->opencl_error(message);
return false;
}
- }
- /* Considers the scene properties, global memory available in the device
- * and returns a rectanglular tile dimension (approx the maximum)
- * that should render on split kernel.
- */
- int2 get_max_render_feasible_tile_size(size_t feasible_global_work_size)
- {
- int2 max_render_feasible_tile_size;
- int square_root_val = (int)sqrt(feasible_global_work_size);
- max_render_feasible_tile_size.x = square_root_val;
- max_render_feasible_tile_size.y = square_root_val;
- /* Ciel round-off max_render_feasible_tile_size. */
- int2 ceil_render_feasible_tile_size;
- ceil_render_feasible_tile_size.x =
- (((max_render_feasible_tile_size.x - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_X;
- ceil_render_feasible_tile_size.y =
- (((max_render_feasible_tile_size.y - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_Y;
- if(ceil_render_feasible_tile_size.x * ceil_render_feasible_tile_size.y <=
- feasible_global_work_size)
- {
- return ceil_render_feasible_tile_size;
- }
- /* Floor round-off max_render_feasible_tile_size. */
- int2 floor_render_feasible_tile_size;
- floor_render_feasible_tile_size.x =
- (max_render_feasible_tile_size.x / SPLIT_KERNEL_LOCAL_SIZE_X) *
- SPLIT_KERNEL_LOCAL_SIZE_X;
- floor_render_feasible_tile_size.y =
- (max_render_feasible_tile_size.y / SPLIT_KERNEL_LOCAL_SIZE_Y) *
- SPLIT_KERNEL_LOCAL_SIZE_Y;
- return floor_render_feasible_tile_size;
+ return true;
}
- /* Try splitting the current tile into multiple smaller
- * almost-square-tiles.
- */
- int2 get_split_tile_size(RenderTile rtile,
- int2 max_render_feasible_tile_size)
+ virtual int2 split_kernel_local_size()
{
- int2 split_tile_size;
- int num_global_threads = max_render_feasible_tile_size.x *
- max_render_feasible_tile_size.y;
- int d_w = rtile.w;
- int d_h = rtile.h;
- /* Ceil round off d_w and d_h */
- d_w = (((d_w - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_X;
- d_h = (((d_h - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_Y;
- while(d_w * d_h > num_global_threads) {
- /* Halve the longer dimension. */
- if(d_w >= d_h) {
- d_w = d_w / 2;
- d_w = (((d_w - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_X;
- }
- else {
- d_h = d_h / 2;
- d_h = (((d_h - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_Y;
- }
- }
- split_tile_size.x = d_w;
- split_tile_size.y = d_h;
- return split_tile_size;
+ return make_int2(64, 1);
}
- /* Splits existing tile into multiple tiles of tile size split_tile_size. */
- vector<SplitRenderTile> split_tiles(RenderTile rtile, int2 split_tile_size)
+ virtual int2 split_kernel_global_size(DeviceTask *task)
{
- vector<SplitRenderTile> to_path_trace_rtile;
- int d_w = rtile.w;
- int d_h = rtile.h;
- int num_tiles_x = (((d_w - 1) / split_tile_size.x) + 1);
- int num_tiles_y = (((d_h - 1) / split_tile_size.y) + 1);
- /* Buffer and rng_state offset calc. */
- size_t offset_index = rtile.offset + (rtile.x + rtile.y * rtile.stride);
- size_t offset_x = offset_index % rtile.stride;
- size_t offset_y = offset_index / rtile.stride;
- /* Resize to_path_trace_rtile. */
- to_path_trace_rtile.resize(num_tiles_x * num_tiles_y);
- for(int tile_iter_y = 0; tile_iter_y < num_tiles_y; tile_iter_y++) {
- for(int tile_iter_x = 0; tile_iter_x < num_tiles_x; tile_iter_x++) {
- int rtile_index = tile_iter_y * num_tiles_x + tile_iter_x;
- to_path_trace_rtile[rtile_index].rng_state_offset_x = offset_x + tile_iter_x * split_tile_size.x;
- to_path_trace_rtile[rtile_index].rng_state_offset_y = offset_y + tile_iter_y * split_tile_size.y;
- to_path_trace_rtile[rtile_index].buffer_offset_x = offset_x + tile_iter_x * split_tile_size.x;
- to_path_trace_rtile[rtile_index].buffer_offset_y = offset_y + tile_iter_y * split_tile_size.y;
- to_path_trace_rtile[rtile_index].start_sample = rtile.start_sample;
- to_path_trace_rtile[rtile_index].num_samples = rtile.num_samples;
- to_path_trace_rtile[rtile_index].sample = rtile.sample;
- to_path_trace_rtile[rtile_index].resolution = rtile.resolution;
- to_path_trace_rtile[rtile_index].offset = rtile.offset;
- to_path_trace_rtile[rtile_index].buffers = rtile.buffers;
- to_path_trace_rtile[rtile_index].buffer = rtile.buffer;
- to_path_trace_rtile[rtile_index].rng_state = rtile.rng_state;
- to_path_trace_rtile[rtile_index].x = rtile.x + (tile_iter_x * split_tile_size.x);
- to_path_trace_rtile[rtile_index].y = rtile.y + (tile_iter_y * split_tile_size.y);
- to_path_trace_rtile[rtile_index].buffer_rng_state_stride = rtile.stride;
- /* Fill width and height of the new render tile. */
- to_path_trace_rtile[rtile_index].w = (tile_iter_x == (num_tiles_x - 1)) ?
- (d_w - (tile_iter_x * split_tile_size.x)) /* Border tile */
- : split_tile_size.x;
- to_path_trace_rtile[rtile_index].h = (tile_iter_y == (num_tiles_y - 1)) ?
- (d_h - (tile_iter_y * split_tile_size.y)) /* Border tile */
- : split_tile_size.y;
- to_path_trace_rtile[rtile_index].stride = to_path_trace_rtile[rtile_index].w;
- }
- }
- return to_path_trace_rtile;
- }
-
- void thread_run(DeviceTask *task)
- {
- if(task->type == DeviceTask::FILM_CONVERT) {
- film_convert(*task, task->buffer, task->rgba_byte, task->rgba_half);
- }
- else if(task->type == DeviceTask::SHADER) {
- shader(*task);
- }
- else if(task->type == DeviceTask::PATH_TRACE) {
- RenderTile tile;
- bool initialize_data_and_check_render_feasibility = false;
- bool need_to_split_tiles_further = false;
- int2 max_render_feasible_tile_size;
- size_t feasible_global_work_size;
- const int2 tile_size = task->requested_tile_size;
- /* Keep rendering tiles until done. */
- while(task->acquire_tile(this, tile)) {
- if(!initialize_data_and_check_render_feasibility) {
- /* Initialize data. */
- /* Calculate per_thread_output_buffer_size. */
- size_t output_buffer_size = 0;
- ciErr = clGetMemObjectInfo((cl_mem)tile.buffer,
- CL_MEM_SIZE,
- sizeof(output_buffer_size),
- &output_buffer_size,
- NULL);
- assert(ciErr == CL_SUCCESS && "Can't get tile.buffer mem object info");
- /* This value is different when running on AMD and NV. */
- if(background) {
- /* In offline render the number of buffer elements
- * associated with tile.buffer is the current tile size.
- */
- per_thread_output_buffer_size =
- output_buffer_size / (tile.w * tile.h);
- }
- else {
- /* interactive rendering, unlike offline render, the number of buffer elements
- * associated with tile.buffer is the entire viewport size.
- */
- per_thread_output_buffer_size =
- output_buffer_size / (tile.buffers->params.width *
- tile.buffers->params.height);
- }
- /* Check render feasibility. */
- feasible_global_work_size = get_feasible_global_work_size(
- tile_size,
- CL_MEM_PTR(const_mem_map["__data"]->device_pointer));
- max_render_feasible_tile_size =
- get_max_render_feasible_tile_size(
- feasible_global_work_size);
- need_to_split_tiles_further =
- need_to_split_tile(tile_size.x,
- tile_size.y,
- max_render_feasible_tile_size);
- initialize_data_and_check_render_feasibility = true;
- }
- if(need_to_split_tiles_further) {
- int2 split_tile_size =
- get_split_tile_size(tile,
- max_render_feasible_tile_size);
- vector<SplitRenderTile> to_path_trace_render_tiles =
- split_tiles(tile, split_tile_size);
- /* Print message to console */
- if(background && (to_path_trace_render_tiles.size() > 1)) {
- fprintf(stderr, "Message : Tiles need to be split "
- "further inside path trace (due to insufficient "
- "device-global-memory for split kernel to "
- "function) \n"
- "The current tile of dimensions %dx%d is split "
- "into tiles of dimension %dx%d for render \n",
- tile.w, tile.h,
- split_tile_size.x,
- split_tile_size.y);
- }
- /* Process all split tiles. */
- for(int tile_iter = 0;
- tile_iter < to_path_trace_render_tiles.size();
- ++tile_iter)
- {
- path_trace(task,
- to_path_trace_render_tiles[tile_iter],
- max_render_feasible_tile_size);
- }
- }
- else {
- /* No splitting required; process the entire tile at once. */
- /* Render feasible tile size is user-set-tile-size itself. */
- max_render_feasible_tile_size.x =
- (((tile_size.x - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_X;
- max_render_feasible_tile_size.y =
- (((tile_size.y - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
- SPLIT_KERNEL_LOCAL_SIZE_Y;
- /* buffer_rng_state_stride is stride itself. */
- SplitRenderTile split_tile(tile);
- split_tile.buffer_rng_state_stride = tile.stride;
- path_trace(task, split_tile, max_render_feasible_tile_size);
- }
- tile.sample = tile.start_sample + tile.num_samples;
+ size_t max_buffer_size;
+ clGetDeviceInfo(device->cdDevice, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(size_t), &max_buffer_size, NULL);
- /* Complete kernel execution before release tile. */
- /* This helps in multi-device render;
- * The device that reaches the critical-section function
- * release_tile waits (stalling other devices from entering
- * release_tile) for all kernels to complete. If device1 (a
- * slow-render device) reaches release_tile first then it would
- * stall device2 (a fast-render device) from proceeding to render
- * next tile.
- */
- clFinish(cqCommandQueue);
-
- task->release_tile(tile);
- }
- }
- }
-
-protected:
- cl_mem mem_alloc(size_t bufsize, cl_mem_flags mem_flag = CL_MEM_READ_WRITE)
- {
- cl_mem ptr;
- assert(bufsize != 0);
- ptr = clCreateBuffer(cxContext, mem_flag, bufsize, NULL, &ciErr);
- opencl_assert_err(ciErr, "clCreateBuffer");
- return ptr;
+ size_t num_elements = max_elements_for_max_buffer_size(max_buffer_size / 2, task->passes_size);
+ int2 global_size = make_int2(round_down((int)sqrt(num_elements), 64), (int)sqrt(num_elements));
+ return global_size;
}
+};
- /* ** Those guys are for workign around some compiler-specific bugs ** */
+OpenCLDeviceSplitKernel::OpenCLDeviceSplitKernel(DeviceInfo& info, Stats &stats, bool background_)
+: OpenCLDeviceBase(info, stats, background_)
+{
+ split_kernel = new OpenCLSplitKernel(this);
- string build_options_for_base_program(
- const DeviceRequestedFeatures& requested_features)
- {
- return requested_features.get_build_options();
- }
-};
+ background = background_;
+}
Device *opencl_create_split_device(DeviceInfo& info, Stats& stats, bool background)
{
diff --git a/intern/cycles/kernel/CMakeLists.txt b/intern/cycles/kernel/CMakeLists.txt
index 29e0f44841e..cc1ad5ce48c 100644
--- a/intern/cycles/kernel/CMakeLists.txt
+++ b/intern/cycles/kernel/CMakeLists.txt
@@ -195,10 +195,12 @@ set(SRC_SPLIT_HEADERS
split/kernel_holdout_emission_blurring_pathtermination_ao.h
split/kernel_lamp_emission.h
split/kernel_next_iteration_setup.h
+ split/kernel_queue_enqueue.h
split/kernel_scene_intersect.h
split/kernel_shader_eval.h
split/kernel_shadow_blocked.h
split/kernel_split_common.h
+ split/kernel_split_data.h
split/kernel_sum_all_radiance.h
)
diff --git a/intern/cycles/kernel/kernel_compat_opencl.h b/intern/cycles/kernel/kernel_compat_opencl.h
index f076e3a7d37..6c963dea4f5 100644
--- a/intern/cycles/kernel/kernel_compat_opencl.h
+++ b/intern/cycles/kernel/kernel_compat_opencl.h
@@ -39,6 +39,7 @@
#define ccl_constant __constant
#define ccl_global __global
#define ccl_local __local
+#define ccl_local_param __local
#define ccl_private __private
#define ccl_restrict restrict
#define ccl_align(n) __attribute__((aligned(n)))
@@ -49,6 +50,15 @@
# define ccl_addr_space
#endif
+#define ccl_local_id(d) get_local_id(d)
+#define ccl_global_id(d) get_global_id(d)
+
+#define ccl_local_size(d) get_local_size(d)
+#define ccl_global_size(d) get_global_size(d)
+
+#define ccl_group_id(d) get_group_id(d)
+#define ccl_num_groups(d) get_num_groups(d)
+
/* Selective nodes compilation. */
#ifndef __NODES_MAX_GROUP__
# define __NODES_MAX_GROUP__ NODE_GROUP_LEVEL_MAX
diff --git a/intern/cycles/kernel/kernel_globals.h b/intern/cycles/kernel/kernel_globals.h
index 2b52a2d2f48..e994836f6a2 100644
--- a/intern/cycles/kernel/kernel_globals.h
+++ b/intern/cycles/kernel/kernel_globals.h
@@ -105,6 +105,8 @@ typedef ccl_addr_space struct KernelGlobals {
# ifdef __SPLIT_KERNEL__
ShaderData *sd_input;
Intersection *isect_shadow;
+ SplitData split_data;
+ SplitParams split_param_data;
# endif
} KernelGlobals;
diff --git a/intern/cycles/kernel/kernel_passes.h b/intern/cycles/kernel/kernel_passes.h
index 7aec47e4957..7790cce067b 100644
--- a/intern/cycles/kernel/kernel_passes.h
+++ b/intern/cycles/kernel/kernel_passes.h
@@ -19,16 +19,16 @@ CCL_NAMESPACE_BEGIN
ccl_device_inline void kernel_write_pass_float(ccl_global float *buffer, int sample, float value)
{
ccl_global float *buf = buffer;
-#if defined(__SPLIT_KERNEL__) && defined(__WORK_STEALING__)
+#if defined(__SPLIT_KERNEL__)
atomic_add_and_fetch_float(buf, value);
#else
*buf = (sample == 0)? value: *buf + value;
-#endif // __SPLIT_KERNEL__ && __WORK_STEALING__
+#endif /* __SPLIT_KERNEL__ */
}
ccl_device_inline void kernel_write_pass_float3(ccl_global float *buffer, int sample, float3 value)
{
-#if defined(__SPLIT_KERNEL__) && defined(__WORK_STEALING__)
+#if defined(__SPLIT_KERNEL__)
ccl_global float *buf_x = buffer + 0;
ccl_global float *buf_y = buffer + 1;
ccl_global float *buf_z = buffer + 2;
@@ -39,12 +39,12 @@ ccl_device_inline void kernel_write_pass_float3(ccl_global float *buffer, int sa
#else
ccl_global float3 *buf = (ccl_global float3*)buffer;
*buf = (sample == 0)? value: *buf + value;
-#endif // __SPLIT_KERNEL__ && __WORK_STEALING__
+#endif /* __SPLIT_KERNEL__ */
}
ccl_device_inline void kernel_write_pass_float4(ccl_global float *buffer, int sample, float4 value)
{
-#if defined(__SPLIT_KERNEL__) && defined(__WORK_STEALING__)
+#if defined(__SPLIT_KERNEL__)
ccl_global float *buf_x = buffer + 0;
ccl_global float *buf_y = buffer + 1;
ccl_global float *buf_z = buffer + 2;
@@ -57,7 +57,7 @@ ccl_device_inline void kernel_write_pass_float4(ccl_global float *buffer, int sa
#else
ccl_global float4 *buf = (ccl_global float4*)buffer;
*buf = (sample == 0)? value: *buf + value;
-#endif // __SPLIT_KERNEL__ && __WORK_STEALING__
+#endif /* __SPLIT_KERNEL__ */
}
ccl_device_inline void kernel_write_data_passes(KernelGlobals *kg, ccl_global float *buffer, PathRadiance *L,
diff --git a/intern/cycles/kernel/kernel_queues.h b/intern/cycles/kernel/kernel_queues.h
index cf5614b8a86..2e63909a38c 100644
--- a/intern/cycles/kernel/kernel_queues.h
+++ b/intern/cycles/kernel/kernel_queues.h
@@ -17,6 +17,8 @@
#ifndef __KERNEL_QUEUE_H__
#define __KERNEL_QUEUE_H__
+CCL_NAMESPACE_BEGIN
+
/*
* Queue utility functions for split kernel
*/
@@ -35,7 +37,8 @@ ccl_device void enqueue_ray_index(
ccl_global int *queue_index) /* Array of size num_queues; Used for atomic increment. */
{
/* This thread's queue index. */
- int my_queue_index = atomic_inc(&queue_index[queue_number]) + (queue_number * queue_size);
+ int my_queue_index = atomic_fetch_and_inc_uint32((ccl_global uint*)&queue_index[queue_number])
+ + (queue_number * queue_size);
queues[my_queue_index] = ray_index;
}
@@ -47,6 +50,7 @@ ccl_device void enqueue_ray_index(
* is no more ray to allocate to other threads.
*/
ccl_device int get_ray_index(
+ KernelGlobals *kg,
int thread_index, /* Global thread index. */
int queue_number, /* Queue to operate on. */
ccl_global int *queues, /* Buffer of all queues. */
@@ -68,24 +72,25 @@ ccl_device void enqueue_ray_index_local(
int queue_number, /* Queue in which to enqueue ray index. */
char enqueue_flag, /* True for threads whose ray index has to be enqueued. */
int queuesize, /* queue size. */
- ccl_local unsigned int *local_queue_atomics, /* To to local queue atomics. */
+ ccl_local_param unsigned int *local_queue_atomics, /* To to local queue atomics. */
ccl_global int *Queue_data, /* Queues. */
ccl_global int *Queue_index) /* To do global queue atomics. */
{
- int lidx = get_local_id(1) * get_local_size(0) + get_local_id(0);
+ int lidx = ccl_local_id(1) * ccl_local_size(0) + ccl_local_id(0);
/* Get local queue id .*/
unsigned int lqidx;
if(enqueue_flag) {
- lqidx = atomic_inc(local_queue_atomics);
+ lqidx = atomic_fetch_and_inc_uint32(local_queue_atomics);
}
- barrier(CLK_LOCAL_MEM_FENCE);
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
/* Get global queue offset. */
if(lidx == 0) {
- *local_queue_atomics = atomic_add(&Queue_index[queue_number], *local_queue_atomics);
+ *local_queue_atomics = atomic_fetch_and_add_uint32((ccl_global uint*)&Queue_index[queue_number],
+ *local_queue_atomics);
}
- barrier(CLK_LOCAL_MEM_FENCE);
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
/* Get global queue index and enqueue ray. */
if(enqueue_flag) {
@@ -96,19 +101,19 @@ ccl_device void enqueue_ray_index_local(
ccl_device unsigned int get_local_queue_index(
int queue_number, /* Queue in which to enqueue the ray; -1 if no queue */
- ccl_local unsigned int *local_queue_atomics)
+ ccl_local_param unsigned int *local_queue_atomics)
{
- int my_lqidx = atomic_inc(&local_queue_atomics[queue_number]);
+ int my_lqidx = atomic_fetch_and_inc_uint32(&local_queue_atomics[queue_number]);
return my_lqidx;
}
ccl_device unsigned int get_global_per_queue_offset(
int queue_number,
- ccl_local unsigned int *local_queue_atomics,
+ ccl_local_param unsigned int *local_queue_atomics,
ccl_global int* global_queue_atomics)
{
- unsigned int queue_offset = atomic_add(&global_queue_atomics[queue_number],
- local_queue_atomics[queue_number]);
+ unsigned int queue_offset = atomic_fetch_and_add_uint32((ccl_global uint*)&global_queue_atomics[queue_number],
+ local_queue_atomics[queue_number]);
return queue_offset;
}
@@ -116,10 +121,12 @@ ccl_device unsigned int get_global_queue_index(
int queue_number,
int queuesize,
unsigned int lqidx,
- ccl_local unsigned int * global_per_queue_offset)
+ ccl_local_param unsigned int * global_per_queue_offset)
{
int my_gqidx = queuesize * queue_number + lqidx + global_per_queue_offset[queue_number];
return my_gqidx;
}
+CCL_NAMESPACE_END
+
#endif // __KERNEL_QUEUE_H__
diff --git a/intern/cycles/kernel/kernel_shadow.h b/intern/cycles/kernel/kernel_shadow.h
index 06a77a208cb..6abfa9c3873 100644
--- a/intern/cycles/kernel/kernel_shadow.h
+++ b/intern/cycles/kernel/kernel_shadow.h
@@ -248,7 +248,7 @@ ccl_device bool shadow_blocked_transparent_all(KernelGlobals *kg,
}
# endif /* __SHADOW_RECORD_ALL__ */
-# ifdef __KERNEL_GPU__
+# if defined(__KERNEL_GPU__) || !defined(__SHADOW_RECORD_ALL__)
/* Shadow function to compute how much light is blocked,
*
* Here we raytrace from one transparent surface to the next step by step.
@@ -359,7 +359,7 @@ ccl_device bool shadow_blocked_transparent_stepped(
shadow);
}
-# endif /* __KERNEL_GPU__ */
+# endif /* __KERNEL_GPU__ || !__SHADOW_RECORD_ALL__ */
#endif /* __TRANSPARENT_SHADOWS__ */
ccl_device_inline bool shadow_blocked(KernelGlobals *kg,
@@ -374,7 +374,7 @@ ccl_device_inline bool shadow_blocked(KernelGlobals *kg,
#ifdef __SPLIT_KERNEL__
Ray private_ray = *ray_input;
Ray *ray = &private_ray;
- Intersection *isect = &kg->isect_shadow[SD_THREAD];
+ Intersection *isect = &kernel_split_state.isect_shadow[SD_THREAD];
#else /* __SPLIT_KERNEL__ */
Ray *ray = ray_input;
Intersection isect_object;
diff --git a/intern/cycles/kernel/kernel_types.h b/intern/cycles/kernel/kernel_types.h
index 8250eaf6073..5a80cdd6585 100644
--- a/intern/cycles/kernel/kernel_types.h
+++ b/intern/cycles/kernel/kernel_types.h
@@ -56,6 +56,8 @@ CCL_NAMESPACE_BEGIN
#define VOLUME_STACK_SIZE 16
+#define WORK_POOL_SIZE 64
+
/* device capabilities */
#ifdef __KERNEL_CPU__
# ifdef __KERNEL_SSE2__
@@ -799,7 +801,7 @@ enum ShaderDataObjectFlag {
};
#ifdef __SPLIT_KERNEL__
-# define SD_THREAD (get_global_id(1) * get_global_size(0) + get_global_id(0))
+# define SD_THREAD (ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0))
# if !defined(__SPLIT_KERNEL_SOA__)
/* ShaderData is stored as an Array-of-Structures */
# define ccl_soa_member(type, name) type soa_##name
@@ -807,7 +809,7 @@ enum ShaderDataObjectFlag {
# define ccl_fetch_array(s, t, index) (&s[SD_THREAD].soa_##t[index])
# else
/* ShaderData is stored as an Structure-of-Arrays */
-# define SD_GLOBAL_SIZE (get_global_size(0) * get_global_size(1))
+# define SD_GLOBAL_SIZE (ccl_global_size(0) * ccl_global_size(1))
# define SD_FIELD_SIZE(t) sizeof(((struct ShaderData*)0)->t)
# define SD_OFFSETOF(t) ((char*)(&((struct ShaderData*)0)->t) - (char*)0)
# define ccl_soa_member(type, name) type soa_##name
diff --git a/intern/cycles/kernel/kernel_work_stealing.h b/intern/cycles/kernel/kernel_work_stealing.h
index 7d559b1aa31..28fc5ce1c30 100644
--- a/intern/cycles/kernel/kernel_work_stealing.h
+++ b/intern/cycles/kernel/kernel_work_stealing.h
@@ -17,177 +17,102 @@
#ifndef __KERNEL_WORK_STEALING_H__
#define __KERNEL_WORK_STEALING_H__
+CCL_NAMESPACE_BEGIN
+
/*
* Utility functions for work stealing
*/
-#ifdef __WORK_STEALING__
-
#ifdef __KERNEL_OPENCL__
# pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics : enable
#endif
-uint get_group_id_with_ray_index(uint ray_index,
- uint tile_dim_x,
- uint tile_dim_y,
- uint parallel_samples,
- int dim)
+ccl_device_inline uint kernel_total_work_size(KernelGlobals *kg)
+{
+ return kernel_split_params.w * kernel_split_params.h * kernel_split_params.num_samples;
+}
+
+ccl_device_inline uint kernel_num_work_pools(KernelGlobals *kg)
+{
+ return ccl_global_size(0) * ccl_global_size(1) / WORK_POOL_SIZE;
+}
+
+ccl_device_inline uint work_pool_from_ray_index(KernelGlobals *kg, uint ray_index)
+{
+ return ray_index / WORK_POOL_SIZE;
+}
+
+ccl_device_inline uint work_pool_work_size(KernelGlobals *kg, uint work_pool)
{
- if(dim == 0) {
- uint x_span = ray_index % (tile_dim_x * parallel_samples);
- return x_span / get_local_size(0);
+ uint total_work_size = kernel_total_work_size(kg);
+ uint num_pools = kernel_num_work_pools(kg);
+
+ if(work_pool >= num_pools || work_pool * WORK_POOL_SIZE >= total_work_size) {
+ return 0;
+ }
+
+ uint work_size = (total_work_size / (num_pools * WORK_POOL_SIZE)) * WORK_POOL_SIZE;
+
+ uint remainder = (total_work_size % (num_pools * WORK_POOL_SIZE));
+ if(work_pool < remainder / WORK_POOL_SIZE) {
+ work_size += WORK_POOL_SIZE;
}
- else /*if(dim == 1)*/ {
- kernel_assert(dim == 1);
- uint y_span = ray_index / (tile_dim_x * parallel_samples);
- return y_span / get_local_size(1);
+ else if(work_pool == remainder / WORK_POOL_SIZE) {
+ work_size += remainder % WORK_POOL_SIZE;
}
+
+ return work_size;
}
-uint get_total_work(uint tile_dim_x,
- uint tile_dim_y,
- uint grp_idx,
- uint grp_idy,
- uint num_samples)
+ccl_device_inline uint get_global_work_index(KernelGlobals *kg, uint work_index, uint ray_index)
{
- uint threads_within_tile_border_x =
- (grp_idx == (get_num_groups(0) - 1)) ? tile_dim_x % get_local_size(0)
- : get_local_size(0);
- uint threads_within_tile_border_y =
- (grp_idy == (get_num_groups(1) - 1)) ? tile_dim_y % get_local_size(1)
- : get_local_size(1);
-
- threads_within_tile_border_x =
- (threads_within_tile_border_x == 0) ? get_local_size(0)
- : threads_within_tile_border_x;
- threads_within_tile_border_y =
- (threads_within_tile_border_y == 0) ? get_local_size(1)
- : threads_within_tile_border_y;
-
- return threads_within_tile_border_x *
- threads_within_tile_border_y *
- num_samples;
+ uint num_pools = kernel_num_work_pools(kg);
+ uint pool = work_pool_from_ray_index(kg, ray_index);
+
+ return (work_index / WORK_POOL_SIZE) * (num_pools * WORK_POOL_SIZE)
+ + (pool * WORK_POOL_SIZE)
+ + (work_index % WORK_POOL_SIZE);
}
-/* Returns 0 in case there is no next work available */
-/* Returns 1 in case work assigned is valid */
-int get_next_work(ccl_global uint *work_pool,
- ccl_private uint *my_work,
- uint tile_dim_x,
- uint tile_dim_y,
- uint num_samples,
- uint parallel_samples,
- uint ray_index)
+/* Returns true if there is work */
+ccl_device bool get_next_work(KernelGlobals *kg, ccl_private uint *work_index, uint ray_index)
{
- uint grp_idx = get_group_id_with_ray_index(ray_index,
- tile_dim_x,
- tile_dim_y,
- parallel_samples,
- 0);
- uint grp_idy = get_group_id_with_ray_index(ray_index,
- tile_dim_x,
- tile_dim_y,
- parallel_samples,
- 1);
- uint total_work = get_total_work(tile_dim_x,
- tile_dim_y,
- grp_idx,
- grp_idy,
- num_samples);
- uint group_index = grp_idy * get_num_groups(0) + grp_idx;
- *my_work = atomic_inc(&work_pool[group_index]);
- return (*my_work < total_work) ? 1 : 0;
+ uint work_pool = work_pool_from_ray_index(kg, ray_index);
+ uint pool_size = work_pool_work_size(kg, work_pool);
+
+ if(pool_size == 0) {
+ return false;
+ }
+
+ *work_index = atomic_fetch_and_inc_uint32(&kernel_split_params.work_pools[work_pool]);
+ return (*work_index < pool_size);
}
-/* This function assumes that the passed my_work is valid. */
-/* Decode sample number w.r.t. assigned my_work. */
-uint get_my_sample(uint my_work,
- uint tile_dim_x,
- uint tile_dim_y,
- uint parallel_samples,
- uint ray_index)
+/* This function assumes that the passed `work` is valid. */
+/* Decode sample number w.r.t. assigned `work`. */
+ccl_device uint get_work_sample(KernelGlobals *kg, uint work_index, uint ray_index)
{
- uint grp_idx = get_group_id_with_ray_index(ray_index,
- tile_dim_x,
- tile_dim_y,
- parallel_samples,
- 0);
- uint grp_idy = get_group_id_with_ray_index(ray_index,
- tile_dim_x,
- tile_dim_y,
- parallel_samples,
- 1);
- uint threads_within_tile_border_x =
- (grp_idx == (get_num_groups(0) - 1)) ? tile_dim_x % get_local_size(0)
- : get_local_size(0);
- uint threads_within_tile_border_y =
- (grp_idy == (get_num_groups(1) - 1)) ? tile_dim_y % get_local_size(1)
- : get_local_size(1);
-
- threads_within_tile_border_x =
- (threads_within_tile_border_x == 0) ? get_local_size(0)
- : threads_within_tile_border_x;
- threads_within_tile_border_y =
- (threads_within_tile_border_y == 0) ? get_local_size(1)
- : threads_within_tile_border_y;
-
- return my_work /
- (threads_within_tile_border_x * threads_within_tile_border_y);
+ return get_global_work_index(kg, work_index, ray_index) / (kernel_split_params.w * kernel_split_params.h);
}
-/* Decode pixel and tile position w.r.t. assigned my_work. */
-void get_pixel_tile_position(ccl_private uint *pixel_x,
+/* Decode pixel and tile position w.r.t. assigned `work`. */
+ccl_device void get_work_pixel_tile_position(KernelGlobals *kg,
+ ccl_private uint *pixel_x,
ccl_private uint *pixel_y,
ccl_private uint *tile_x,
ccl_private uint *tile_y,
- uint my_work,
- uint tile_dim_x,
- uint tile_dim_y,
- uint tile_offset_x,
- uint tile_offset_y,
- uint parallel_samples,
+ uint work_index,
uint ray_index)
{
- uint grp_idx = get_group_id_with_ray_index(ray_index,
- tile_dim_x,
- tile_dim_y,
- parallel_samples,
- 0);
- uint grp_idy = get_group_id_with_ray_index(ray_index,
- tile_dim_x,
- tile_dim_y,
- parallel_samples,
- 1);
- uint threads_within_tile_border_x =
- (grp_idx == (get_num_groups(0) - 1)) ? tile_dim_x % get_local_size(0)
- : get_local_size(0);
- uint threads_within_tile_border_y =
- (grp_idy == (get_num_groups(1) - 1)) ? tile_dim_y % get_local_size(1)
- : get_local_size(1);
-
- threads_within_tile_border_x =
- (threads_within_tile_border_x == 0) ? get_local_size(0)
- : threads_within_tile_border_x;
- threads_within_tile_border_y =
- (threads_within_tile_border_y == 0) ? get_local_size(1)
- : threads_within_tile_border_y;
-
- uint total_associated_pixels =
- threads_within_tile_border_x * threads_within_tile_border_y;
- uint work_group_pixel_index = my_work % total_associated_pixels;
- uint work_group_pixel_x =
- work_group_pixel_index % threads_within_tile_border_x;
- uint work_group_pixel_y =
- work_group_pixel_index / threads_within_tile_border_x;
-
- *pixel_x =
- tile_offset_x + (grp_idx * get_local_size(0)) + work_group_pixel_x;
- *pixel_y =
- tile_offset_y + (grp_idy * get_local_size(1)) + work_group_pixel_y;
- *tile_x = *pixel_x - tile_offset_x;
- *tile_y = *pixel_y - tile_offset_y;
+ uint pixel_index = get_global_work_index(kg, work_index, ray_index) % (kernel_split_params.w*kernel_split_params.h);
+
+ *tile_x = pixel_index % kernel_split_params.w;
+ *tile_y = pixel_index / kernel_split_params.w;
+
+ *pixel_x = *tile_x + kernel_split_params.x;
+ *pixel_y = *tile_y + kernel_split_params.y;
}
-#endif /* __WORK_STEALING__ */
+CCL_NAMESPACE_END
#endif /* __KERNEL_WORK_STEALING_H__ */
diff --git a/intern/cycles/kernel/kernels/opencl/kernel.cl b/intern/cycles/kernel/kernels/opencl/kernel.cl
index e501fd4f015..52406d2f548 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel.cl
@@ -67,8 +67,8 @@ __kernel void kernel_ocl_path_trace(
kg->name = name;
#include "../../kernel_textures.h"
- int x = sx + get_global_id(0);
- int y = sy + get_global_id(1);
+ int x = sx + ccl_global_id(0);
+ int y = sy + ccl_global_id(1);
if(x < sx + sw && y < sy + sh)
kernel_path_trace(kg, buffer, rng_state, sample, x, y, offset, stride);
@@ -96,7 +96,7 @@ __kernel void kernel_ocl_shader(
kg->name = name;
#include "../../kernel_textures.h"
- int x = sx + get_global_id(0);
+ int x = sx + ccl_global_id(0);
if(x < sx + sw) {
kernel_shader_evaluate(kg,
@@ -128,7 +128,7 @@ __kernel void kernel_ocl_bake(
kg->name = name;
#include "../../kernel_textures.h"
- int x = sx + get_global_id(0);
+ int x = sx + ccl_global_id(0);
if(x < sx + sw) {
#ifdef __NO_BAKING__
@@ -159,8 +159,8 @@ __kernel void kernel_ocl_convert_to_byte(
kg->name = name;
#include "../../kernel_textures.h"
- int x = sx + get_global_id(0);
- int y = sy + get_global_id(1);
+ int x = sx + ccl_global_id(0);
+ int y = sy + ccl_global_id(1);
if(x < sx + sw && y < sy + sh)
kernel_film_convert_to_byte(kg, rgba, buffer, sample_scale, x, y, offset, stride);
@@ -186,8 +186,8 @@ __kernel void kernel_ocl_convert_to_half_float(
kg->name = name;
#include "../../kernel_textures.h"
- int x = sx + get_global_id(0);
- int y = sy + get_global_id(1);
+ int x = sx + ccl_global_id(0);
+ int y = sy + ccl_global_id(1);
if(x < sx + sw && y < sy + sh)
kernel_film_convert_to_half_float(kg, rgba, buffer, sample_scale, x, y, offset, stride);
@@ -195,7 +195,7 @@ __kernel void kernel_ocl_convert_to_half_float(
__kernel void kernel_ocl_zero_buffer(ccl_global float4 *buffer, ulong size, ulong offset)
{
- size_t i = get_global_id(0) + get_global_id(1) * get_global_size(0);
+ size_t i = ccl_global_id(0) + ccl_global_id(1) * ccl_global_size(0);
if(i < size / sizeof(float4)) {
buffer[i+offset/sizeof(float4)] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
diff --git a/intern/cycles/kernel/kernels/opencl/kernel_background_buffer_update.cl b/intern/cycles/kernel/kernels/opencl/kernel_background_buffer_update.cl
index 1914d241eb1..47e363f6e03 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel_background_buffer_update.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel_background_buffer_update.cl
@@ -14,112 +14,13 @@
* limitations under the License.
*/
+#include "kernel_compat_opencl.h"
+#include "split/kernel_split_common.h"
#include "split/kernel_background_buffer_update.h"
__kernel void kernel_ocl_path_trace_background_buffer_update(
- ccl_global char *kg,
- ccl_constant KernelData *data,
- ccl_global float *per_sample_output_buffers,
- ccl_global uint *rng_state,
- ccl_global uint *rng_coop, /* Required for buffer Update */
- ccl_global float3 *throughput_coop, /* Required for background hit processing */
- PathRadiance *PathRadiance_coop, /* Required for background hit processing and buffer Update */
- ccl_global Ray *Ray_coop, /* Required for background hit processing */
- ccl_global PathState *PathState_coop, /* Required for background hit processing */
- ccl_global float *L_transparent_coop, /* Required for background hit processing and buffer Update */
- ccl_global char *ray_state, /* Stores information on the current state of a ray */
- int sw, int sh, int sx, int sy, int stride,
- int rng_state_offset_x,
- int rng_state_offset_y,
- int rng_state_stride,
- ccl_global unsigned int *work_array, /* Denotes work of each ray */
- ccl_global int *Queue_data, /* Queues memory */
- ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
- int queuesize, /* Size (capacity) of each queue */
- int end_sample,
- int start_sample,
-#ifdef __WORK_STEALING__
- ccl_global unsigned int *work_pool_wgs,
- unsigned int num_samples,
-#endif
-#ifdef __KERNEL_DEBUG__
- DebugData *debugdata_coop,
-#endif
- int parallel_samples) /* Number of samples to be processed in parallel */
+ KernelGlobals *kg,
+ ccl_constant KernelData *data)
{
- ccl_local unsigned int local_queue_atomics;
- if(get_local_id(0) == 0 && get_local_id(1) == 0) {
- local_queue_atomics = 0;
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
- if(ray_index == 0) {
- /* We will empty this queue in this kernel. */
- Queue_index[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS] = 0;
- }
- char enqueue_flag = 0;
- ray_index = get_ray_index(ray_index,
- QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
- Queue_data,
- queuesize,
- 1);
-
-#ifdef __COMPUTE_DEVICE_GPU__
- /* If we are executing on a GPU device, we exit all threads that are not
- * required.
- *
- * If we are executing on a CPU device, then we need to keep all threads
- * active since we have barrier() calls later in the kernel. CPU devices,
- * expect all threads to execute barrier statement.
- */
- if(ray_index == QUEUE_EMPTY_SLOT) {
- return;
- }
-#endif
-
-#ifndef __COMPUTE_DEVICE_GPU__
- if(ray_index != QUEUE_EMPTY_SLOT) {
-#endif
- enqueue_flag =
- kernel_background_buffer_update((KernelGlobals *)kg,
- per_sample_output_buffers,
- rng_state,
- rng_coop,
- throughput_coop,
- PathRadiance_coop,
- Ray_coop,
- PathState_coop,
- L_transparent_coop,
- ray_state,
- sw, sh, sx, sy, stride,
- rng_state_offset_x,
- rng_state_offset_y,
- rng_state_stride,
- work_array,
- end_sample,
- start_sample,
-#ifdef __WORK_STEALING__
- work_pool_wgs,
- num_samples,
-#endif
-#ifdef __KERNEL_DEBUG__
- debugdata_coop,
-#endif
- parallel_samples,
- ray_index);
-#ifndef __COMPUTE_DEVICE_GPU__
- }
-#endif
-
- /* Enqueue RAY_REGENERATED rays into QUEUE_ACTIVE_AND_REGENERATED_RAYS;
- * These rays will be made active during next SceneIntersectkernel.
- */
- enqueue_ray_index_local(ray_index,
- QUEUE_ACTIVE_AND_REGENERATED_RAYS,
- enqueue_flag,
- queuesize,
- &local_queue_atomics,
- Queue_data,
- Queue_index);
+ kernel_background_buffer_update(kg);
}
diff --git a/intern/cycles/kernel/kernels/opencl/kernel_data_init.cl b/intern/cycles/kernel/kernels/opencl/kernel_data_init.cl
index 18139687eab..1e3c4fa28c7 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel_data_init.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel_data_init.cl
@@ -14,77 +14,49 @@
* limitations under the License.
*/
+#include "kernel_compat_opencl.h"
+#include "split/kernel_split_common.h"
#include "split/kernel_data_init.h"
__kernel void kernel_ocl_path_trace_data_init(
- ccl_global char *globals,
- ccl_global char *sd_DL_shadow,
+ KernelGlobals *kg,
ccl_constant KernelData *data,
- ccl_global float *per_sample_output_buffers,
+ ccl_global void *split_data_buffer,
+ int num_elements,
+ ccl_global char *ray_state,
ccl_global uint *rng_state,
- ccl_global uint *rng_coop, /* rng array to store rng values for all rays */
- ccl_global float3 *throughput_coop, /* throughput array to store throughput values for all rays */
- ccl_global float *L_transparent_coop, /* L_transparent array to store L_transparent values for all rays */
- PathRadiance *PathRadiance_coop, /* PathRadiance array to store PathRadiance values for all rays */
- ccl_global Ray *Ray_coop, /* Ray array to store Ray information for all rays */
- ccl_global PathState *PathState_coop, /* PathState array to store PathState information for all rays */
- Intersection *Intersection_coop_shadow,
- ccl_global char *ray_state, /* Stores information on current state of a ray */
#define KERNEL_TEX(type, ttype, name) \
ccl_global type *name,
#include "../../kernel_textures.h"
- int start_sample, int sx, int sy, int sw, int sh, int offset, int stride,
- int rng_state_offset_x,
- int rng_state_offset_y,
- int rng_state_stride,
- ccl_global int *Queue_data, /* Memory for queues */
+ int start_sample,
+ int end_sample,
+ int sx, int sy, int sw, int sh, int offset, int stride,
ccl_global int *Queue_index, /* Tracks the number of elements in queues */
int queuesize, /* size (capacity) of the queue */
ccl_global char *use_queues_flag, /* flag to decide if scene-intersect kernel should use queues to fetch ray index */
- ccl_global unsigned int *work_array, /* work array to store which work each ray belongs to */
-#ifdef __WORK_STEALING__
ccl_global unsigned int *work_pool_wgs, /* Work pool for each work group */
unsigned int num_samples, /* Total number of samples per pixel */
-#endif
-#ifdef __KERNEL_DEBUG__
- DebugData *debugdata_coop,
-#endif
- int parallel_samples) /* Number of samples to be processed in parallel */
+ ccl_global float *buffer)
{
- kernel_data_init((KernelGlobals *)globals,
- (ShaderData *)sd_DL_shadow,
+ kernel_data_init(kg,
data,
- per_sample_output_buffers,
- rng_state,
- rng_coop,
- throughput_coop,
- L_transparent_coop,
- PathRadiance_coop,
- Ray_coop,
- PathState_coop,
- Intersection_coop_shadow,
+ split_data_buffer,
+ num_elements,
ray_state,
+ rng_state,
#define KERNEL_TEX(type, ttype, name) name,
#include "../../kernel_textures.h"
- start_sample, sx, sy, sw, sh, offset, stride,
- rng_state_offset_x,
- rng_state_offset_y,
- rng_state_stride,
- Queue_data,
+ start_sample,
+ end_sample,
+ sx, sy, sw, sh, offset, stride,
Queue_index,
queuesize,
use_queues_flag,
- work_array,
-#ifdef __WORK_STEALING__
work_pool_wgs,
num_samples,
-#endif
-#ifdef __KERNEL_DEBUG__
- debugdata_coop,
-#endif
- parallel_samples);
+ buffer);
}
diff --git a/intern/cycles/kernel/kernels/opencl/kernel_direct_lighting.cl b/intern/cycles/kernel/kernels/opencl/kernel_direct_lighting.cl
index c6a2c8d050c..5d2f46b319d 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel_direct_lighting.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel_direct_lighting.cl
@@ -14,74 +14,13 @@
* limitations under the License.
*/
+#include "kernel_compat_opencl.h"
+#include "split/kernel_split_common.h"
#include "split/kernel_direct_lighting.h"
__kernel void kernel_ocl_path_trace_direct_lighting(
- ccl_global char *kg,
- ccl_constant KernelData *data,
- ccl_global char *sd, /* Required for direct lighting */
- ccl_global uint *rng_coop, /* Required for direct lighting */
- ccl_global PathState *PathState_coop, /* Required for direct lighting */
- ccl_global int *ISLamp_coop, /* Required for direct lighting */
- ccl_global Ray *LightRay_coop, /* Required for direct lighting */
- ccl_global BsdfEval *BSDFEval_coop, /* Required for direct lighting */
- ccl_global char *ray_state, /* Denotes the state of each ray */
- ccl_global int *Queue_data, /* Queue memory */
- ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
- int queuesize) /* Size (capacity) of each queue */
+ KernelGlobals *kg,
+ ccl_constant KernelData *data)
{
- ccl_local unsigned int local_queue_atomics;
- if(get_local_id(0) == 0 && get_local_id(1) == 0) {
- local_queue_atomics = 0;
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- char enqueue_flag = 0;
- int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
- ray_index = get_ray_index(ray_index,
- QUEUE_ACTIVE_AND_REGENERATED_RAYS,
- Queue_data,
- queuesize,
- 0);
-
-#ifdef __COMPUTE_DEVICE_GPU__
- /* If we are executing on a GPU device, we exit all threads that are not
- * required.
- *
- * If we are executing on a CPU device, then we need to keep all threads
- * active since we have barrier() calls later in the kernel. CPU devices,
- * expect all threads to execute barrier statement.
- */
- if(ray_index == QUEUE_EMPTY_SLOT) {
- return;
- }
-#endif
-
-#ifndef __COMPUTE_DEVICE_GPU__
- if(ray_index != QUEUE_EMPTY_SLOT) {
-#endif
- enqueue_flag = kernel_direct_lighting((KernelGlobals *)kg,
- (ShaderData *)sd,
- rng_coop,
- PathState_coop,
- ISLamp_coop,
- LightRay_coop,
- BSDFEval_coop,
- ray_state,
- ray_index);
-
-#ifndef __COMPUTE_DEVICE_GPU__
- }
-#endif
-
-#ifdef __EMISSION__
- /* Enqueue RAY_SHADOW_RAY_CAST_DL rays. */
- enqueue_ray_index_local(ray_index,
- QUEUE_SHADOW_RAY_CAST_DL_RAYS,
- enqueue_flag,
- queuesize,
- &local_queue_atomics,
- Queue_data,
- Queue_index);
-#endif
+ kernel_direct_lighting(kg);
}
diff --git a/intern/cycles/kernel/kernels/opencl/kernel_holdout_emission_blurring_pathtermination_ao.cl b/intern/cycles/kernel/kernels/opencl/kernel_holdout_emission_blurring_pathtermination_ao.cl
index e063614da1a..7724b8a0bdf 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel_holdout_emission_blurring_pathtermination_ao.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel_holdout_emission_blurring_pathtermination_ao.cl
@@ -14,110 +14,13 @@
* limitations under the License.
*/
+#include "kernel_compat_opencl.h"
+#include "split/kernel_split_common.h"
#include "split/kernel_holdout_emission_blurring_pathtermination_ao.h"
__kernel void kernel_ocl_path_trace_holdout_emission_blurring_pathtermination_ao(
- ccl_global char *kg,
- ccl_constant KernelData *data,
- ccl_global char *sd, /* Required throughout the kernel except probabilistic path termination and AO */
- ccl_global float *per_sample_output_buffers,
- ccl_global uint *rng_coop, /* Required for "kernel_write_data_passes" and AO */
- ccl_global float3 *throughput_coop, /* Required for handling holdout material and AO */
- ccl_global float *L_transparent_coop, /* Required for handling holdout material */
- PathRadiance *PathRadiance_coop, /* Required for "kernel_write_data_passes" and indirect primitive emission */
- ccl_global PathState *PathState_coop, /* Required throughout the kernel and AO */
- Intersection *Intersection_coop, /* Required for indirect primitive emission */
- ccl_global float3 *AOAlpha_coop, /* Required for AO */
- ccl_global float3 *AOBSDF_coop, /* Required for AO */
- ccl_global Ray *AOLightRay_coop, /* Required for AO */
- int sw, int sh, int sx, int sy, int stride,
- ccl_global char *ray_state, /* Denotes the state of each ray */
- ccl_global unsigned int *work_array, /* Denotes the work that each ray belongs to */
- ccl_global int *Queue_data, /* Queue memory */
- ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
- int queuesize, /* Size (capacity) of each queue */
-#ifdef __WORK_STEALING__
- unsigned int start_sample,
-#endif
- int parallel_samples) /* Number of samples to be processed in parallel */
+ KernelGlobals *kg,
+ ccl_constant KernelData *data)
{
- ccl_local unsigned int local_queue_atomics_bg;
- ccl_local unsigned int local_queue_atomics_ao;
- if(get_local_id(0) == 0 && get_local_id(1) == 0) {
- local_queue_atomics_bg = 0;
- local_queue_atomics_ao = 0;
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- char enqueue_flag = 0;
- char enqueue_flag_AO_SHADOW_RAY_CAST = 0;
- int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
- ray_index = get_ray_index(ray_index,
- QUEUE_ACTIVE_AND_REGENERATED_RAYS,
- Queue_data,
- queuesize,
- 0);
-
-#ifdef __COMPUTE_DEVICE_GPU__
- /* If we are executing on a GPU device, we exit all threads that are not
- * required.
- *
- * If we are executing on a CPU device, then we need to keep all threads
- * active since we have barrier() calls later in the kernel. CPU devices,
- * expect all threads to execute barrier statement.
- */
- if(ray_index == QUEUE_EMPTY_SLOT) {
- return;
- }
-#endif /* __COMPUTE_DEVICE_GPU__ */
-
-#ifndef __COMPUTE_DEVICE_GPU__
- if(ray_index != QUEUE_EMPTY_SLOT) {
-#endif
- kernel_holdout_emission_blurring_pathtermination_ao(
- (KernelGlobals *)kg,
- (ShaderData *)sd,
- per_sample_output_buffers,
- rng_coop,
- throughput_coop,
- L_transparent_coop,
- PathRadiance_coop,
- PathState_coop,
- Intersection_coop,
- AOAlpha_coop,
- AOBSDF_coop,
- AOLightRay_coop,
- sw, sh, sx, sy, stride,
- ray_state,
- work_array,
-#ifdef __WORK_STEALING__
- start_sample,
-#endif
- parallel_samples,
- ray_index,
- &enqueue_flag,
- &enqueue_flag_AO_SHADOW_RAY_CAST);
-#ifndef __COMPUTE_DEVICE_GPU__
- }
-#endif
-
- /* Enqueue RAY_UPDATE_BUFFER rays. */
- enqueue_ray_index_local(ray_index,
- QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
- enqueue_flag,
- queuesize,
- &local_queue_atomics_bg,
- Queue_data,
- Queue_index);
-
-#ifdef __AO__
- /* Enqueue to-shadow-ray-cast rays. */
- enqueue_ray_index_local(ray_index,
- QUEUE_SHADOW_RAY_CAST_AO_RAYS,
- enqueue_flag_AO_SHADOW_RAY_CAST,
- queuesize,
- &local_queue_atomics_ao,
- Queue_data,
- Queue_index);
-#endif
+ kernel_holdout_emission_blurring_pathtermination_ao(kg);
}
diff --git a/intern/cycles/kernel/kernels/opencl/kernel_lamp_emission.cl b/intern/cycles/kernel/kernels/opencl/kernel_lamp_emission.cl
index 267bddc2ffc..2b84d0ea43e 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel_lamp_emission.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel_lamp_emission.cl
@@ -14,67 +14,13 @@
* limitations under the License.
*/
+#include "kernel_compat_opencl.h"
+#include "split/kernel_split_common.h"
#include "split/kernel_lamp_emission.h"
__kernel void kernel_ocl_path_trace_lamp_emission(
- ccl_global char *kg,
- ccl_constant KernelData *data,
- ccl_global float3 *throughput_coop, /* Required for lamp emission */
- PathRadiance *PathRadiance_coop, /* Required for lamp emission */
- ccl_global Ray *Ray_coop, /* Required for lamp emission */
- ccl_global PathState *PathState_coop, /* Required for lamp emission */
- Intersection *Intersection_coop, /* Required for lamp emission */
- ccl_global char *ray_state, /* Denotes the state of each ray */
- int sw, int sh,
- ccl_global int *Queue_data, /* Memory for queues */
- ccl_global int *Queue_index, /* Tracks the number of elements in queues */
- int queuesize, /* Size (capacity) of queues */
- ccl_global char *use_queues_flag, /* Used to decide if this kernel should use
- * queues to fetch ray index
- */
- int parallel_samples) /* Number of samples to be processed in parallel */
+ KernelGlobals *kg,
+ ccl_constant KernelData *data)
{
- int x = get_global_id(0);
- int y = get_global_id(1);
-
- /* We will empty this queue in this kernel. */
- if(get_global_id(0) == 0 && get_global_id(1) == 0) {
- Queue_index[QUEUE_ACTIVE_AND_REGENERATED_RAYS] = 0;
- }
- /* Fetch use_queues_flag. */
- ccl_local char local_use_queues_flag;
- if(get_local_id(0) == 0 && get_local_id(1) == 0) {
- local_use_queues_flag = use_queues_flag[0];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- int ray_index;
- if(local_use_queues_flag) {
- int thread_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
- ray_index = get_ray_index(thread_index,
- QUEUE_ACTIVE_AND_REGENERATED_RAYS,
- Queue_data,
- queuesize,
- 1);
- if(ray_index == QUEUE_EMPTY_SLOT) {
- return;
- }
- } else {
- if(x < (sw * parallel_samples) && y < sh) {
- ray_index = x + y * (sw * parallel_samples);
- } else {
- return;
- }
- }
-
- kernel_lamp_emission((KernelGlobals *)kg,
- throughput_coop,
- PathRadiance_coop,
- Ray_coop,
- PathState_coop,
- Intersection_coop,
- ray_state,
- sw, sh,
- use_queues_flag,
- ray_index);
+ kernel_lamp_emission(kg);
}
diff --git a/intern/cycles/kernel/kernels/opencl/kernel_next_iteration_setup.cl b/intern/cycles/kernel/kernels/opencl/kernel_next_iteration_setup.cl
index 6d49b6294a8..e87e367fb9c 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel_next_iteration_setup.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel_next_iteration_setup.cl
@@ -14,101 +14,13 @@
* limitations under the License.
*/
+#include "kernel_compat_opencl.h"
+#include "split/kernel_split_common.h"
#include "split/kernel_next_iteration_setup.h"
__kernel void kernel_ocl_path_trace_next_iteration_setup(
- ccl_global char *kg,
- ccl_constant KernelData *data,
- ccl_global char *sd, /* Required for setting up ray for next iteration */
- ccl_global uint *rng_coop, /* Required for setting up ray for next iteration */
- ccl_global float3 *throughput_coop, /* Required for setting up ray for next iteration */
- PathRadiance *PathRadiance_coop, /* Required for setting up ray for next iteration */
- ccl_global Ray *Ray_coop, /* Required for setting up ray for next iteration */
- ccl_global PathState *PathState_coop, /* Required for setting up ray for next iteration */
- ccl_global Ray *LightRay_dl_coop, /* Required for radiance update - direct lighting */
- ccl_global int *ISLamp_coop, /* Required for radiance update - direct lighting */
- ccl_global BsdfEval *BSDFEval_coop, /* Required for radiance update - direct lighting */
- ccl_global Ray *LightRay_ao_coop, /* Required for radiance update - AO */
- ccl_global float3 *AOBSDF_coop, /* Required for radiance update - AO */
- ccl_global float3 *AOAlpha_coop, /* Required for radiance update - AO */
- ccl_global char *ray_state, /* Denotes the state of each ray */
- ccl_global int *Queue_data, /* Queue memory */
- ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
- int queuesize, /* Size (capacity) of each queue */
- ccl_global char *use_queues_flag) /* flag to decide if scene_intersect kernel should
- * use queues to fetch ray index */
+ KernelGlobals *kg,
+ ccl_constant KernelData *data)
{
- ccl_local unsigned int local_queue_atomics;
- if(get_local_id(0) == 0 && get_local_id(1) == 0) {
- local_queue_atomics = 0;
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- if(get_global_id(0) == 0 && get_global_id(1) == 0) {
- /* If we are here, then it means that scene-intersect kernel
- * has already been executed atleast once. From the next time,
- * scene-intersect kernel may operate on queues to fetch ray index
- */
- use_queues_flag[0] = 1;
-
- /* Mark queue indices of QUEUE_SHADOW_RAY_CAST_AO_RAYS and
- * QUEUE_SHADOW_RAY_CAST_DL_RAYS queues that were made empty during the
- * previous kernel.
- */
- Queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS] = 0;
- Queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS] = 0;
- }
-
- char enqueue_flag = 0;
- int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
- ray_index = get_ray_index(ray_index,
- QUEUE_ACTIVE_AND_REGENERATED_RAYS,
- Queue_data,
- queuesize,
- 0);
-
-#ifdef __COMPUTE_DEVICE_GPU__
- /* If we are executing on a GPU device, we exit all threads that are not
- * required.
- *
- * If we are executing on a CPU device, then we need to keep all threads
- * active since we have barrier() calls later in the kernel. CPU devices,
- * expect all threads to execute barrier statement.
- */
- if(ray_index == QUEUE_EMPTY_SLOT) {
- return;
- }
-#endif
-
-#ifndef __COMPUTE_DEVICE_GPU__
- if(ray_index != QUEUE_EMPTY_SLOT) {
-#endif
- enqueue_flag = kernel_next_iteration_setup((KernelGlobals *)kg,
- (ShaderData *)sd,
- rng_coop,
- throughput_coop,
- PathRadiance_coop,
- Ray_coop,
- PathState_coop,
- LightRay_dl_coop,
- ISLamp_coop,
- BSDFEval_coop,
- LightRay_ao_coop,
- AOBSDF_coop,
- AOAlpha_coop,
- ray_state,
- use_queues_flag,
- ray_index);
-#ifndef __COMPUTE_DEVICE_GPU__
- }
-#endif
-
- /* Enqueue RAY_UPDATE_BUFFER rays. */
- enqueue_ray_index_local(ray_index,
- QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
- enqueue_flag,
- queuesize,
- &local_queue_atomics,
- Queue_data,
- Queue_index);
+ kernel_next_iteration_setup(kg);
}
diff --git a/intern/cycles/kernel/kernels/opencl/kernel_queue_enqueue.cl b/intern/cycles/kernel/kernels/opencl/kernel_queue_enqueue.cl
index 3156dc255fb..9ceb6a5c3d8 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel_queue_enqueue.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel_queue_enqueue.cl
@@ -14,93 +14,13 @@
* limitations under the License.
*/
-#include "../../kernel_compat_opencl.h"
-#include "../../kernel_math.h"
-#include "../../kernel_types.h"
-#include "../../kernel_globals.h"
-#include "../../kernel_queues.h"
+#include "kernel_compat_opencl.h"
+#include "split/kernel_split_common.h"
+#include "split/kernel_queue_enqueue.h"
-/*
- * The kernel "kernel_queue_enqueue" enqueues rays of
- * different ray state into their appropriate Queues;
- * 1. Rays that have been determined to hit the background from the
- * "kernel_scene_intersect" kernel
- * are enqueued in QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS;
- * 2. Rays that have been determined to be actively participating in path-iteration will be enqueued into QUEUE_ACTIVE_AND_REGENERATED_RAYS.
- *
- * The input and output of the kernel is as follows,
- *
- * ray_state -------------------------------------------|--- kernel_queue_enqueue --|--- Queue_data (QUEUE_ACTIVE_AND_REGENERATED_RAYS & QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS)
- * Queue_index(QUEUE_ACTIVE_AND_REGENERATED_RAYS) ------| |--- Queue_index (QUEUE_ACTIVE_AND_REGENERATED_RAYS & QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS)
- * Queue_index(QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS) ---| |
- * queuesize -------------------------------------------| |
- *
- * Note on Queues :
- * State of queues during the first time this kernel is called :
- * At entry,
- * Both QUEUE_ACTIVE_AND_REGENERATED_RAYS and QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be empty.
- * At exit,
- * QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE rays
- * QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_HIT_BACKGROUND rays.
- *
- * State of queue during other times this kernel is called :
- * At entry,
- * QUEUE_ACTIVE_AND_REGENERATED_RAYS will be empty.
- * QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will contain RAY_TO_REGENERATE and RAY_UPDATE_BUFFER rays.
- * At exit,
- * QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE rays.
- * QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_TO_REGENERATE, RAY_UPDATE_BUFFER, RAY_HIT_BACKGROUND rays.
- */
__kernel void kernel_ocl_path_trace_queue_enqueue(
- ccl_global int *Queue_data, /* Queue memory */
- ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
- ccl_global char *ray_state, /* Denotes the state of each ray */
- int queuesize) /* Size (capacity) of each queue */
+ KernelGlobals *kg,
+ ccl_constant KernelData *data)
{
- /* We have only 2 cases (Hit/Not-Hit) */
- ccl_local unsigned int local_queue_atomics[2];
-
- int lidx = get_local_id(1) * get_local_size(0) + get_local_id(0);
- int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
-
- if(lidx < 2 ) {
- local_queue_atomics[lidx] = 0;
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- int queue_number = -1;
-
- if(IS_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND)) {
- queue_number = QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS;
- }
- else if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
- queue_number = QUEUE_ACTIVE_AND_REGENERATED_RAYS;
- }
-
- unsigned int my_lqidx;
- if(queue_number != -1) {
- my_lqidx = get_local_queue_index(queue_number, local_queue_atomics);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- if(lidx == 0) {
- local_queue_atomics[QUEUE_ACTIVE_AND_REGENERATED_RAYS] =
- get_global_per_queue_offset(QUEUE_ACTIVE_AND_REGENERATED_RAYS,
- local_queue_atomics,
- Queue_index);
- local_queue_atomics[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS] =
- get_global_per_queue_offset(QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
- local_queue_atomics,
- Queue_index);
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- unsigned int my_gqidx;
- if(queue_number != -1) {
- my_gqidx = get_global_queue_index(queue_number,
- queuesize,
- my_lqidx,
- local_queue_atomics);
- Queue_data[my_gqidx] = ray_index;
- }
+ kernel_queue_enqueue(kg);
}
diff --git a/intern/cycles/kernel/kernels/opencl/kernel_scene_intersect.cl b/intern/cycles/kernel/kernels/opencl/kernel_scene_intersect.cl
index 7f3f433c7a6..4e083e87d1c 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel_scene_intersect.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel_scene_intersect.cl
@@ -14,67 +14,13 @@
* limitations under the License.
*/
+#include "kernel_compat_opencl.h"
+#include "split/kernel_split_common.h"
#include "split/kernel_scene_intersect.h"
__kernel void kernel_ocl_path_trace_scene_intersect(
- ccl_global char *kg,
- ccl_constant KernelData *data,
- ccl_global uint *rng_coop,
- ccl_global Ray *Ray_coop, /* Required for scene_intersect */
- ccl_global PathState *PathState_coop, /* Required for scene_intersect */
- Intersection *Intersection_coop, /* Required for scene_intersect */
- ccl_global char *ray_state, /* Denotes the state of each ray */
- int sw, int sh,
- ccl_global int *Queue_data, /* Memory for queues */
- ccl_global int *Queue_index, /* Tracks the number of elements in queues */
- int queuesize, /* Size (capacity) of queues */
- ccl_global char *use_queues_flag, /* used to decide if this kernel should use
- * queues to fetch ray index */
-#ifdef __KERNEL_DEBUG__
- DebugData *debugdata_coop,
-#endif
- int parallel_samples) /* Number of samples to be processed in parallel */
+ KernelGlobals *kg,
+ ccl_constant KernelData *data)
{
- int x = get_global_id(0);
- int y = get_global_id(1);
-
- /* Fetch use_queues_flag */
- ccl_local char local_use_queues_flag;
- if(get_local_id(0) == 0 && get_local_id(1) == 0) {
- local_use_queues_flag = use_queues_flag[0];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- int ray_index;
- if(local_use_queues_flag) {
- int thread_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
- ray_index = get_ray_index(thread_index,
- QUEUE_ACTIVE_AND_REGENERATED_RAYS,
- Queue_data,
- queuesize,
- 0);
-
- if(ray_index == QUEUE_EMPTY_SLOT) {
- return;
- }
- } else {
- if(x < (sw * parallel_samples) && y < sh) {
- ray_index = x + y * (sw * parallel_samples);
- } else {
- return;
- }
- }
-
- kernel_scene_intersect((KernelGlobals *)kg,
- rng_coop,
- Ray_coop,
- PathState_coop,
- Intersection_coop,
- ray_state,
- sw, sh,
- use_queues_flag,
-#ifdef __KERNEL_DEBUG__
- debugdata_coop,
-#endif
- ray_index);
+ kernel_scene_intersect(kg);
}
diff --git a/intern/cycles/kernel/kernels/opencl/kernel_shader_eval.cl b/intern/cycles/kernel/kernels/opencl/kernel_shader_eval.cl
index c37856c8f30..a2b48b15928 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel_shader_eval.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel_shader_eval.cl
@@ -14,55 +14,13 @@
* limitations under the License.
*/
+#include "kernel_compat_opencl.h"
+#include "split/kernel_split_common.h"
#include "split/kernel_shader_eval.h"
__kernel void kernel_ocl_path_trace_shader_eval(
- ccl_global char *kg,
- ccl_constant KernelData *data,
- ccl_global char *sd, /* Output ShaderData structure to be filled */
- ccl_global uint *rng_coop, /* Required for rbsdf calculation */
- ccl_global Ray *Ray_coop, /* Required for setting up shader from ray */
- ccl_global PathState *PathState_coop, /* Required for all functions in this kernel */
- Intersection *Intersection_coop, /* Required for setting up shader from ray */
- ccl_global char *ray_state, /* Denotes the state of each ray */
- ccl_global int *Queue_data, /* queue memory */
- ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
- int queuesize) /* Size (capacity) of each queue */
+ KernelGlobals *kg,
+ ccl_constant KernelData *data)
{
- /* Enqeueue RAY_TO_REGENERATE rays into QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS queue. */
- ccl_local unsigned int local_queue_atomics;
- if(get_local_id(0) == 0 && get_local_id(1) == 0) {
- local_queue_atomics = 0;
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
- ray_index = get_ray_index(ray_index,
- QUEUE_ACTIVE_AND_REGENERATED_RAYS,
- Queue_data,
- queuesize,
- 0);
-
- if(ray_index == QUEUE_EMPTY_SLOT) {
- return;
- }
-
- char enqueue_flag = (IS_STATE(ray_state, ray_index, RAY_TO_REGENERATE)) ? 1 : 0;
- enqueue_ray_index_local(ray_index,
- QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
- enqueue_flag,
- queuesize,
- &local_queue_atomics,
- Queue_data,
- Queue_index);
-
- /* Continue on with shader evaluation. */
- kernel_shader_eval((KernelGlobals *)kg,
- (ShaderData *)sd,
- rng_coop,
- Ray_coop,
- PathState_coop,
- Intersection_coop,
- ray_state,
- ray_index);
+ kernel_shader_eval(kg);
}
diff --git a/intern/cycles/kernel/kernels/opencl/kernel_shadow_blocked.cl b/intern/cycles/kernel/kernels/opencl/kernel_shadow_blocked.cl
index edf76fba714..3693f7f9c9d 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel_shadow_blocked.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel_shadow_blocked.cl
@@ -14,52 +14,13 @@
* limitations under the License.
*/
+#include "kernel_compat_opencl.h"
+#include "split/kernel_split_common.h"
#include "split/kernel_shadow_blocked.h"
__kernel void kernel_ocl_path_trace_shadow_blocked(
- ccl_global char *kg,
- ccl_constant KernelData *data,
- ccl_global PathState *PathState_coop, /* Required for shadow blocked */
- ccl_global Ray *LightRay_dl_coop, /* Required for direct lighting's shadow blocked */
- ccl_global Ray *LightRay_ao_coop, /* Required for AO's shadow blocked */
- ccl_global char *ray_state,
- ccl_global int *Queue_data, /* Queue memory */
- ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
- int queuesize) /* Size (capacity) of each queue */
+ KernelGlobals *kg,
+ ccl_constant KernelData *data)
{
- int lidx = get_local_id(1) * get_local_id(0) + get_local_id(0);
-
- ccl_local unsigned int ao_queue_length;
- ccl_local unsigned int dl_queue_length;
- if(lidx == 0) {
- ao_queue_length = Queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS];
- dl_queue_length = Queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-
- /* flag determining if the current ray is to process shadow ray for AO or DL */
- char shadow_blocked_type = -1;
-
- int ray_index = QUEUE_EMPTY_SLOT;
- int thread_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
- if(thread_index < ao_queue_length + dl_queue_length) {
- if(thread_index < ao_queue_length) {
- ray_index = get_ray_index(thread_index, QUEUE_SHADOW_RAY_CAST_AO_RAYS, Queue_data, queuesize, 1);
- shadow_blocked_type = RAY_SHADOW_RAY_CAST_AO;
- } else {
- ray_index = get_ray_index(thread_index - ao_queue_length, QUEUE_SHADOW_RAY_CAST_DL_RAYS, Queue_data, queuesize, 1);
- shadow_blocked_type = RAY_SHADOW_RAY_CAST_DL;
- }
- }
-
- if(ray_index == QUEUE_EMPTY_SLOT)
- return;
-
- kernel_shadow_blocked((KernelGlobals *)kg,
- PathState_coop,
- LightRay_dl_coop,
- LightRay_ao_coop,
- ray_state,
- shadow_blocked_type,
- ray_index);
+ kernel_shadow_blocked(kg);
}
diff --git a/intern/cycles/kernel/kernels/opencl/kernel_sum_all_radiance.cl b/intern/cycles/kernel/kernels/opencl/kernel_sum_all_radiance.cl
index 88a1ed830af..e945050a110 100644
--- a/intern/cycles/kernel/kernels/opencl/kernel_sum_all_radiance.cl
+++ b/intern/cycles/kernel/kernels/opencl/kernel_sum_all_radiance.cl
@@ -14,25 +14,13 @@
* limitations under the License.
*/
+#include "kernel_compat_opencl.h"
+#include "split/kernel_split_common.h"
#include "split/kernel_sum_all_radiance.h"
__kernel void kernel_ocl_path_trace_sum_all_radiance(
- ccl_constant KernelData *data, /* To get pass_stride to offet into buffer */
- ccl_global float *buffer, /* Output buffer of RenderTile */
- ccl_global float *per_sample_output_buffer, /* Radiance contributed by all samples */
- int parallel_samples, int sw, int sh, int stride,
- int buffer_offset_x,
- int buffer_offset_y,
- int buffer_stride,
- int start_sample)
+ KernelGlobals *kg,
+ ccl_constant KernelData *data)
{
- kernel_sum_all_radiance(data,
- buffer,
- per_sample_output_buffer,
- parallel_samples,
- sw, sh, stride,
- buffer_offset_x,
- buffer_offset_y,
- buffer_stride,
- start_sample);
+ kernel_sum_all_radiance(kg);
}
diff --git a/intern/cycles/kernel/split/kernel_background_buffer_update.h b/intern/cycles/kernel/split/kernel_background_buffer_update.h
index 9bfa71c75ef..6d48856eb44 100644
--- a/intern/cycles/kernel/split/kernel_background_buffer_update.h
+++ b/intern/cycles/kernel/split/kernel_background_buffer_update.h
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "kernel_split_common.h"
+CCL_NAMESPACE_BEGIN
/* Note on kernel_background_buffer_update kernel.
* This is the fourth kernel in the ray tracing logic, and the third
@@ -69,50 +69,61 @@
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE and RAY_REGENERATED rays
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be empty
*/
-ccl_device char kernel_background_buffer_update(
- KernelGlobals *kg,
- ccl_global float *per_sample_output_buffers,
- ccl_global uint *rng_state,
- ccl_global uint *rng_coop, /* Required for buffer Update */
- ccl_global float3 *throughput_coop, /* Required for background hit processing */
- PathRadiance *PathRadiance_coop, /* Required for background hit processing and buffer Update */
- ccl_global Ray *Ray_coop, /* Required for background hit processing */
- ccl_global PathState *PathState_coop, /* Required for background hit processing */
- ccl_global float *L_transparent_coop, /* Required for background hit processing and buffer Update */
- ccl_global char *ray_state, /* Stores information on the current state of a ray */
- int sw, int sh, int sx, int sy, int stride,
- int rng_state_offset_x,
- int rng_state_offset_y,
- int rng_state_stride,
- ccl_global unsigned int *work_array, /* Denotes work of each ray */
- int end_sample,
- int start_sample,
-#ifdef __WORK_STEALING__
- ccl_global unsigned int *work_pool_wgs,
- unsigned int num_samples,
-#endif
-#ifdef __KERNEL_DEBUG__
- DebugData *debugdata_coop,
-#endif
- int parallel_samples, /* Number of samples to be processed in parallel */
- int ray_index)
+ccl_device void kernel_background_buffer_update(KernelGlobals *kg)
{
+ ccl_local unsigned int local_queue_atomics;
+ if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
+ local_queue_atomics = 0;
+ }
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+ int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+ if(ray_index == 0) {
+ /* We will empty this queue in this kernel. */
+ kernel_split_params.queue_index[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS] = 0;
+ }
char enqueue_flag = 0;
+ ray_index = get_ray_index(kg, ray_index,
+ QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_size,
+ 1);
+
+#ifdef __COMPUTE_DEVICE_GPU__
+ /* If we are executing on a GPU device, we exit all threads that are not
+ * required.
+ *
+ * If we are executing on a CPU device, then we need to keep all threads
+ * active since we have barrier() calls later in the kernel. CPU devices,
+ * expect all threads to execute barrier statement.
+ */
+ if(ray_index == QUEUE_EMPTY_SLOT) {
+ return;
+ }
+#endif
+
+#ifndef __COMPUTE_DEVICE_GPU__
+ if(ray_index != QUEUE_EMPTY_SLOT) {
+#endif
+
+ ccl_global uint *rng_state = kernel_split_params.rng_state;
+ int stride = kernel_split_params.stride;
+
+ ccl_global char *ray_state = kernel_split_state.ray_state;
#ifdef __KERNEL_DEBUG__
- DebugData *debug_data = &debugdata_coop[ray_index];
+ DebugData *debug_data = &kernel_split_state.debug_data[ray_index];
#endif
- ccl_global PathState *state = &PathState_coop[ray_index];
- PathRadiance *L = L = &PathRadiance_coop[ray_index];
- ccl_global Ray *ray = &Ray_coop[ray_index];
- ccl_global float3 *throughput = &throughput_coop[ray_index];
- ccl_global float *L_transparent = &L_transparent_coop[ray_index];
- ccl_global uint *rng = &rng_coop[ray_index];
-
-#ifdef __WORK_STEALING__
- unsigned int my_work;
- ccl_global float *initial_per_sample_output_buffers;
+ ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
+ PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
+ ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
+ ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
+ ccl_global float *L_transparent = &kernel_split_state.L_transparent[ray_index];
+ ccl_global uint *rng = &kernel_split_state.rng[ray_index];
+ ccl_global float *per_sample_output_buffers = kernel_split_state.per_sample_output_buffers;
+
+ unsigned int work_index;
ccl_global uint *initial_rng;
-#endif
+
unsigned int sample;
unsigned int tile_x;
unsigned int tile_y;
@@ -120,29 +131,17 @@ ccl_device char kernel_background_buffer_update(
unsigned int pixel_y;
unsigned int my_sample_tile;
-#ifdef __WORK_STEALING__
- my_work = work_array[ray_index];
- sample = get_my_sample(my_work, sw, sh, parallel_samples, ray_index) + start_sample;
- get_pixel_tile_position(&pixel_x, &pixel_y,
+ work_index = kernel_split_state.work_array[ray_index];
+ sample = get_work_sample(kg, work_index, ray_index) + kernel_split_params.start_sample;
+ get_work_pixel_tile_position(kg, &pixel_x, &pixel_y,
&tile_x, &tile_y,
- my_work,
- sw, sh, sx, sy,
- parallel_samples,
+ work_index,
ray_index);
my_sample_tile = 0;
- initial_per_sample_output_buffers = per_sample_output_buffers;
initial_rng = rng_state;
-#else /* __WORK_STEALING__ */
- sample = work_array[ray_index];
- int tile_index = ray_index / parallel_samples;
- /* buffer and rng_state's stride is "stride". Find x and y using ray_index */
- tile_x = tile_index % sw;
- tile_y = tile_index / sw;
- my_sample_tile = ray_index - (tile_index * parallel_samples);
-#endif /* __WORK_STEALING__ */
-
- rng_state += (rng_state_offset_x + tile_x) + (rng_state_offset_y + tile_y) * rng_state_stride;
- per_sample_output_buffers += (((tile_x + (tile_y * stride)) * parallel_samples) + my_sample_tile) * kernel_data.film.pass_stride;
+
+ rng_state += kernel_split_params.offset + pixel_x + pixel_y*kernel_split_params.stride;
+ per_sample_output_buffers += ((tile_x + (tile_y * stride)) + my_sample_tile) * kernel_data.film.pass_stride;
if(IS_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND)) {
/* eval background shader if nothing hit */
@@ -157,7 +156,7 @@ ccl_device char kernel_background_buffer_update(
if(IS_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND)) {
#ifdef __BACKGROUND__
/* sample background shader */
- float3 L_background = indirect_background(kg, kg->sd_input, state, ray);
+ float3 L_background = indirect_background(kg, kernel_split_state.sd_DL_shadow, state, ray);
path_radiance_accum_background(L, (*throughput), L_background, state->bounce);
#endif
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER);
@@ -180,41 +179,26 @@ ccl_device char kernel_background_buffer_update(
}
if(IS_STATE(ray_state, ray_index, RAY_TO_REGENERATE)) {
-#ifdef __WORK_STEALING__
/* We have completed current work; So get next work */
- int valid_work = get_next_work(work_pool_wgs, &my_work, sw, sh, num_samples, parallel_samples, ray_index);
+ int valid_work = get_next_work(kg, &work_index, ray_index);
if(!valid_work) {
/* If work is invalid, this means no more work is available and the thread may exit */
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_INACTIVE);
}
-#else /* __WORK_STEALING__ */
- if((sample + parallel_samples) >= end_sample) {
- ASSIGN_RAY_STATE(ray_state, ray_index, RAY_INACTIVE);
- }
-#endif /* __WORK_STEALING__ */
if(IS_STATE(ray_state, ray_index, RAY_TO_REGENERATE)) {
-#ifdef __WORK_STEALING__
- work_array[ray_index] = my_work;
+ kernel_split_state.work_array[ray_index] = work_index;
/* Get the sample associated with the current work */
- sample = get_my_sample(my_work, sw, sh, parallel_samples, ray_index) + start_sample;
+ sample = get_work_sample(kg, work_index, ray_index) + kernel_split_params.start_sample;
/* Get pixel and tile position associated with current work */
- get_pixel_tile_position(&pixel_x, &pixel_y, &tile_x, &tile_y, my_work, sw, sh, sx, sy, parallel_samples, ray_index);
+ get_work_pixel_tile_position(kg, &pixel_x, &pixel_y, &tile_x, &tile_y, work_index, ray_index);
my_sample_tile = 0;
/* Remap rng_state according to the current work */
- rng_state = initial_rng + ((rng_state_offset_x + tile_x) + (rng_state_offset_y + tile_y) * rng_state_stride);
+ rng_state = initial_rng + kernel_split_params.offset + pixel_x + pixel_y*kernel_split_params.stride;
/* Remap per_sample_output_buffers according to the current work */
- per_sample_output_buffers = initial_per_sample_output_buffers
- + (((tile_x + (tile_y * stride)) * parallel_samples) + my_sample_tile) * kernel_data.film.pass_stride;
-#else /* __WORK_STEALING__ */
- work_array[ray_index] = sample + parallel_samples;
- sample = work_array[ray_index];
-
- /* Get ray position from ray index */
- pixel_x = sx + ((ray_index / parallel_samples) % sw);
- pixel_y = sy + ((ray_index / parallel_samples) / sw);
-#endif /* __WORK_STEALING__ */
+ per_sample_output_buffers = kernel_split_state.per_sample_output_buffers
+ + ((tile_x + (tile_y * stride)) + my_sample_tile) * kernel_data.film.pass_stride;
/* Initialize random numbers and ray. */
kernel_path_trace_setup(kg, rng_state, sample, pixel_x, pixel_y, rng, ray);
@@ -226,7 +210,7 @@ ccl_device char kernel_background_buffer_update(
*throughput = make_float3(1.0f, 1.0f, 1.0f);
*L_transparent = 0.0f;
path_radiance_init(L, kernel_data.film.use_light_pass);
- path_state_init(kg, kg->sd_input, state, rng, sample, ray);
+ path_state_init(kg, kernel_split_state.sd_DL_shadow, state, rng, sample, ray);
#ifdef __KERNEL_DEBUG__
debug_data_init(debug_data);
#endif
@@ -244,5 +228,22 @@ ccl_device char kernel_background_buffer_update(
}
}
}
- return enqueue_flag;
+
+#ifndef __COMPUTE_DEVICE_GPU__
+ }
+#endif
+
+ /* Enqueue RAY_REGENERATED rays into QUEUE_ACTIVE_AND_REGENERATED_RAYS;
+ * These rays will be made active during next SceneIntersectkernel.
+ */
+ enqueue_ray_index_local(ray_index,
+ QUEUE_ACTIVE_AND_REGENERATED_RAYS,
+ enqueue_flag,
+ kernel_split_params.queue_size,
+ &local_queue_atomics,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_index);
}
+
+CCL_NAMESPACE_END
+
diff --git a/intern/cycles/kernel/split/kernel_data_init.h b/intern/cycles/kernel/split/kernel_data_init.h
index 6e158d53d23..eeeb5294fdc 100644
--- a/intern/cycles/kernel/split/kernel_data_init.h
+++ b/intern/cycles/kernel/split/kernel_data_init.h
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "kernel_split_common.h"
+CCL_NAMESPACE_BEGIN
/* Note on kernel_data_initialization kernel
* This kernel Initializes structures needed in path-iteration kernels.
@@ -50,72 +50,77 @@
* All slots in queues are initialized to queue empty slot;
* The number of elements in the queues is initialized to 0;
*/
+
ccl_device void kernel_data_init(
KernelGlobals *kg,
- ShaderData *sd_DL_shadow,
ccl_constant KernelData *data,
- ccl_global float *per_sample_output_buffers,
+ ccl_global void *split_data_buffer,
+ int num_elements,
+ ccl_global char *ray_state,
ccl_global uint *rng_state,
- ccl_global uint *rng_coop, /* rng array to store rng values for all rays */
- ccl_global float3 *throughput_coop, /* throughput array to store throughput values for all rays */
- ccl_global float *L_transparent_coop, /* L_transparent array to store L_transparent values for all rays */
- PathRadiance *PathRadiance_coop, /* PathRadiance array to store PathRadiance values for all rays */
- ccl_global Ray *Ray_coop, /* Ray array to store Ray information for all rays */
- ccl_global PathState *PathState_coop, /* PathState array to store PathState information for all rays */
- Intersection *Intersection_coop_shadow,
- ccl_global char *ray_state, /* Stores information on current state of a ray */
+#ifdef __KERNEL_OPENCL__
#define KERNEL_TEX(type, ttype, name) \
ccl_global type *name,
#include "../kernel_textures.h"
+#endif
- int start_sample, int sx, int sy, int sw, int sh, int offset, int stride,
- int rng_state_offset_x,
- int rng_state_offset_y,
- int rng_state_stride,
- ccl_global int *Queue_data, /* Memory for queues */
+ int start_sample,
+ int end_sample,
+ int sx, int sy, int sw, int sh, int offset, int stride,
ccl_global int *Queue_index, /* Tracks the number of elements in queues */
int queuesize, /* size (capacity) of the queue */
ccl_global char *use_queues_flag, /* flag to decide if scene-intersect kernel should use queues to fetch ray index */
- ccl_global unsigned int *work_array, /* work array to store which work each ray belongs to */
-#ifdef __WORK_STEALING__
- ccl_global unsigned int *work_pool_wgs, /* Work pool for each work group */
- unsigned int num_samples, /* Total number of samples per pixel */
-#endif
-#ifdef __KERNEL_DEBUG__
- DebugData *debugdata_coop,
-#endif
- int parallel_samples) /* Number of samples to be processed in parallel */
+ ccl_global unsigned int *work_pools, /* Work pool for each work group */
+ unsigned int num_samples,
+ ccl_global float *buffer)
{
+#ifdef __KERNEL_OPENCL__
kg->data = data;
- kg->sd_input = sd_DL_shadow;
- kg->isect_shadow = Intersection_coop_shadow;
+#endif
+
+ kernel_split_params.x = sx;
+ kernel_split_params.y = sy;
+ kernel_split_params.w = sw;
+ kernel_split_params.h = sh;
+
+ kernel_split_params.offset = offset;
+ kernel_split_params.stride = stride;
+
+ kernel_split_params.rng_state = rng_state;
+
+ kernel_split_params.start_sample = start_sample;
+ kernel_split_params.end_sample = end_sample;
+
+ kernel_split_params.work_pools = work_pools;
+ kernel_split_params.num_samples = num_samples;
+
+ kernel_split_params.queue_index = Queue_index;
+ kernel_split_params.queue_size = queuesize;
+ kernel_split_params.use_queues_flag = use_queues_flag;
+
+ kernel_split_params.buffer = buffer;
+
+ split_data_init(&kernel_split_state, num_elements, split_data_buffer, ray_state);
+
+#ifdef __KERNEL_OPENCL__
#define KERNEL_TEX(type, ttype, name) \
kg->name = name;
#include "../kernel_textures.h"
+#endif
- int thread_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
-
-#ifdef __WORK_STEALING__
- int lid = get_local_id(1) * get_local_size(0) + get_local_id(0);
- /* Initialize work_pool_wgs */
- if(lid == 0) {
- int group_index = get_group_id(1) * get_num_groups(0) + get_group_id(0);
- work_pool_wgs[group_index] = 0;
- }
- barrier(CLK_LOCAL_MEM_FENCE);
-#endif /* __WORK_STEALING__ */
+ int thread_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
/* Initialize queue data and queue index. */
if(thread_index < queuesize) {
/* Initialize active ray queue. */
- Queue_data[QUEUE_ACTIVE_AND_REGENERATED_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
+ kernel_split_state.queue_data[QUEUE_ACTIVE_AND_REGENERATED_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
/* Initialize background and buffer update queue. */
- Queue_data[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
+ kernel_split_state.queue_data[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
/* Initialize shadow ray cast of AO queue. */
- Queue_data[QUEUE_SHADOW_RAY_CAST_AO_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
+ kernel_split_state.queue_data[QUEUE_SHADOW_RAY_CAST_AO_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
/* Initialize shadow ray cast of direct lighting queue. */
- Queue_data[QUEUE_SHADOW_RAY_CAST_DL_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
+ kernel_split_state.queue_data[QUEUE_SHADOW_RAY_CAST_DL_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
}
if(thread_index == 0) {
@@ -126,109 +131,83 @@ ccl_device void kernel_data_init(
/* The scene-intersect kernel should not use the queues very first time.
* since the queue would be empty.
*/
- use_queues_flag[0] = 0;
+ *use_queues_flag = 0;
+ }
+
+ int ray_index = ccl_global_id(0) + ccl_global_id(1) * ccl_global_size(0);
+
+ /* This is the first assignment to ray_state;
+ * So we dont use ASSIGN_RAY_STATE macro.
+ */
+ kernel_split_state.ray_state[ray_index] = RAY_ACTIVE;
+
+ unsigned int my_sample;
+ unsigned int pixel_x;
+ unsigned int pixel_y;
+ unsigned int tile_x;
+ unsigned int tile_y;
+ unsigned int my_sample_tile;
+
+ unsigned int work_index = 0;
+ /* Get work. */
+ if(!get_next_work(kg, &work_index, ray_index)) {
+ /* No more work, mark ray as inactive */
+ kernel_split_state.ray_state[ray_index] = RAY_INACTIVE;
+
+ return;
}
- int x = get_global_id(0);
- int y = get_global_id(1);
+ /* Get the sample associated with the work. */
+ my_sample = get_work_sample(kg, work_index, ray_index) + start_sample;
- if(x < (sw * parallel_samples) && y < sh) {
- int ray_index = x + y * (sw * parallel_samples);
+ my_sample_tile = 0;
- /* This is the first assignment to ray_state;
- * So we dont use ASSIGN_RAY_STATE macro.
+ /* Get pixel and tile position associated with the work. */
+ get_work_pixel_tile_position(kg, &pixel_x, &pixel_y,
+ &tile_x, &tile_y,
+ work_index,
+ ray_index);
+ kernel_split_state.work_array[ray_index] = work_index;
+
+ rng_state += kernel_split_params.offset + pixel_x + pixel_y*stride;
+
+ ccl_global float *per_sample_output_buffers = kernel_split_state.per_sample_output_buffers;
+ per_sample_output_buffers += ((tile_x + (tile_y * stride)) + (my_sample_tile)) * kernel_data.film.pass_stride;
+
+ /* Initialize random numbers and ray. */
+ kernel_path_trace_setup(kg,
+ rng_state,
+ my_sample,
+ pixel_x, pixel_y,
+ &kernel_split_state.rng[ray_index],
+ &kernel_split_state.ray[ray_index]);
+
+ if(kernel_split_state.ray[ray_index].t != 0.0f) {
+ /* Initialize throughput, L_transparent, Ray, PathState;
+ * These rays proceed with path-iteration.
*/
- ray_state[ray_index] = RAY_ACTIVE;
-
- unsigned int my_sample;
- unsigned int pixel_x;
- unsigned int pixel_y;
- unsigned int tile_x;
- unsigned int tile_y;
- unsigned int my_sample_tile;
-
-#ifdef __WORK_STEALING__
- unsigned int my_work = 0;
- /* Get work. */
- get_next_work(work_pool_wgs, &my_work, sw, sh, num_samples, parallel_samples, ray_index);
- /* Get the sample associated with the work. */
- my_sample = get_my_sample(my_work, sw, sh, parallel_samples, ray_index) + start_sample;
-
- my_sample_tile = 0;
-
- /* Get pixel and tile position associated with the work. */
- get_pixel_tile_position(&pixel_x, &pixel_y,
- &tile_x, &tile_y,
- my_work,
- sw, sh, sx, sy,
- parallel_samples,
- ray_index);
- work_array[ray_index] = my_work;
-#else /* __WORK_STEALING__ */
- unsigned int tile_index = ray_index / parallel_samples;
- tile_x = tile_index % sw;
- tile_y = tile_index / sw;
- my_sample_tile = ray_index - (tile_index * parallel_samples);
- my_sample = my_sample_tile + start_sample;
-
- /* Initialize work array. */
- work_array[ray_index] = my_sample ;
-
- /* Calculate pixel position of this ray. */
- pixel_x = sx + tile_x;
- pixel_y = sy + tile_y;
-#endif /* __WORK_STEALING__ */
-
- rng_state += (rng_state_offset_x + tile_x) + (rng_state_offset_y + tile_y) * rng_state_stride;
-
- /* Initialise per_sample_output_buffers to all zeros. */
- per_sample_output_buffers += (((tile_x + (tile_y * stride)) * parallel_samples) + (my_sample_tile)) * kernel_data.film.pass_stride;
- int per_sample_output_buffers_iterator = 0;
- for(per_sample_output_buffers_iterator = 0;
- per_sample_output_buffers_iterator < kernel_data.film.pass_stride;
- per_sample_output_buffers_iterator++)
- {
- per_sample_output_buffers[per_sample_output_buffers_iterator] = 0.0f;
- }
-
- /* Initialize random numbers and ray. */
- kernel_path_trace_setup(kg,
- rng_state,
- my_sample,
- pixel_x, pixel_y,
- &rng_coop[ray_index],
- &Ray_coop[ray_index]);
-
- if(Ray_coop[ray_index].t != 0.0f) {
- /* Initialize throughput, L_transparent, Ray, PathState;
- * These rays proceed with path-iteration.
- */
- throughput_coop[ray_index] = make_float3(1.0f, 1.0f, 1.0f);
- L_transparent_coop[ray_index] = 0.0f;
- path_radiance_init(&PathRadiance_coop[ray_index], kernel_data.film.use_light_pass);
- path_state_init(kg,
- kg->sd_input,
- &PathState_coop[ray_index],
- &rng_coop[ray_index],
- my_sample,
- &Ray_coop[ray_index]);
+ kernel_split_state.throughput[ray_index] = make_float3(1.0f, 1.0f, 1.0f);
+ kernel_split_state.L_transparent[ray_index] = 0.0f;
+ path_radiance_init(&kernel_split_state.path_radiance[ray_index], kernel_data.film.use_light_pass);
+ path_state_init(kg,
+ kernel_split_state.sd_DL_shadow,
+ &kernel_split_state.path_state[ray_index],
+ &kernel_split_state.rng[ray_index],
+ my_sample,
+ &kernel_split_state.ray[ray_index]);
#ifdef __KERNEL_DEBUG__
- debug_data_init(&debugdata_coop[ray_index]);
+ debug_data_init(&kernel_split_state.debug_data[ray_index]);
#endif
- }
- else {
- /* These rays do not participate in path-iteration. */
- float4 L_rad = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
- /* Accumulate result in output buffer. */
- kernel_write_pass_float4(per_sample_output_buffers, my_sample, L_rad);
- path_rng_end(kg, rng_state, rng_coop[ray_index]);
- ASSIGN_RAY_STATE(ray_state, ray_index, RAY_TO_REGENERATE);
- }
}
-
- /* Mark rest of the ray-state indices as RAY_INACTIVE. */
- if(thread_index < (get_global_size(0) * get_global_size(1)) - (sh * (sw * parallel_samples))) {
- /* First assignment, hence we dont use ASSIGN_RAY_STATE macro */
- ray_state[((sw * parallel_samples) * sh) + thread_index] = RAY_INACTIVE;
+ else {
+ /* These rays do not participate in path-iteration. */
+ float4 L_rad = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
+ /* Accumulate result in output buffer. */
+ kernel_write_pass_float4(per_sample_output_buffers, my_sample, L_rad);
+ path_rng_end(kg, rng_state, kernel_split_state.rng[ray_index]);
+ ASSIGN_RAY_STATE(kernel_split_state.ray_state, ray_index, RAY_TO_REGENERATE);
}
}
+
+CCL_NAMESPACE_END
+
diff --git a/intern/cycles/kernel/split/kernel_direct_lighting.h b/intern/cycles/kernel/split/kernel_direct_lighting.h
index 82ca18829d3..43b943710a1 100644
--- a/intern/cycles/kernel/split/kernel_direct_lighting.h
+++ b/intern/cycles/kernel/split/kernel_direct_lighting.h
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "kernel_split_common.h"
+CCL_NAMESPACE_BEGIN
/* Note on kernel_direct_lighting kernel.
* This is the eighth kernel in the ray tracing logic. This is the seventh
@@ -47,20 +47,42 @@
* QUEUE_SHADOW_RAY_CAST_DL_RAYS queue will be filled with rays for which a shadow_blocked function must be executed, after this
* kernel call. Before this kernel call the QUEUE_SHADOW_RAY_CAST_DL_RAYS will be empty.
*/
-ccl_device char kernel_direct_lighting(
- KernelGlobals *kg,
- ShaderData *sd, /* Required for direct lighting */
- ccl_global uint *rng_coop, /* Required for direct lighting */
- ccl_global PathState *PathState_coop, /* Required for direct lighting */
- ccl_global int *ISLamp_coop, /* Required for direct lighting */
- ccl_global Ray *LightRay_coop, /* Required for direct lighting */
- ccl_global BsdfEval *BSDFEval_coop, /* Required for direct lighting */
- ccl_global char *ray_state, /* Denotes the state of each ray */
- int ray_index)
+ccl_device void kernel_direct_lighting(KernelGlobals *kg)
{
+ ccl_local unsigned int local_queue_atomics;
+ if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
+ local_queue_atomics = 0;
+ }
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
char enqueue_flag = 0;
- if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
- ccl_global PathState *state = &PathState_coop[ray_index];
+ int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+ ray_index = get_ray_index(kg, ray_index,
+ QUEUE_ACTIVE_AND_REGENERATED_RAYS,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_size,
+ 0);
+
+#ifdef __COMPUTE_DEVICE_GPU__
+ /* If we are executing on a GPU device, we exit all threads that are not
+ * required.
+ *
+ * If we are executing on a CPU device, then we need to keep all threads
+ * active since we have barrier() calls later in the kernel. CPU devices,
+ * expect all threads to execute barrier statement.
+ */
+ if(ray_index == QUEUE_EMPTY_SLOT) {
+ return;
+ }
+#endif
+
+#ifndef __COMPUTE_DEVICE_GPU__
+ if(ray_index != QUEUE_EMPTY_SLOT) {
+#endif
+
+ if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE)) {
+ ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
+ ShaderData *sd = kernel_split_state.sd;
/* direct lighting */
#ifdef __EMISSION__
@@ -68,7 +90,7 @@ ccl_device char kernel_direct_lighting(
(ccl_fetch(sd, flag) & SD_BSDF_HAS_EVAL)))
{
/* Sample illumination from lights to find path contribution. */
- ccl_global RNG* rng = &rng_coop[ray_index];
+ ccl_global RNG* rng = &kernel_split_state.rng[ray_index];
float light_t = path_state_rng_1D(kg, rng, state, PRNG_LIGHT);
float light_u, light_v;
path_state_rng_2D(kg, rng, state, PRNG_LIGHT_U, &light_u, &light_v);
@@ -89,20 +111,36 @@ ccl_device char kernel_direct_lighting(
BsdfEval L_light;
bool is_lamp;
- if(direct_emission(kg, sd, kg->sd_input, &ls, state, &light_ray, &L_light, &is_lamp, terminate)) {
+ if(direct_emission(kg, sd, kernel_split_state.sd_DL_shadow, &ls, state, &light_ray, &L_light, &is_lamp, terminate)) {
/* Write intermediate data to global memory to access from
* the next kernel.
*/
- LightRay_coop[ray_index] = light_ray;
- BSDFEval_coop[ray_index] = L_light;
- ISLamp_coop[ray_index] = is_lamp;
+ kernel_split_state.light_ray[ray_index] = light_ray;
+ kernel_split_state.bsdf_eval[ray_index] = L_light;
+ kernel_split_state.is_lamp[ray_index] = is_lamp;
/* Mark ray state for next shadow kernel. */
- ADD_RAY_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL);
+ ADD_RAY_FLAG(kernel_split_state.ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL);
enqueue_flag = 1;
}
}
}
#endif /* __EMISSION__ */
}
- return enqueue_flag;
+
+#ifndef __COMPUTE_DEVICE_GPU__
+ }
+#endif
+
+#ifdef __EMISSION__
+ /* Enqueue RAY_SHADOW_RAY_CAST_DL rays. */
+ enqueue_ray_index_local(ray_index,
+ QUEUE_SHADOW_RAY_CAST_DL_RAYS,
+ enqueue_flag,
+ kernel_split_params.queue_size,
+ &local_queue_atomics,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_index);
+#endif
}
+
+CCL_NAMESPACE_END
diff --git a/intern/cycles/kernel/split/kernel_holdout_emission_blurring_pathtermination_ao.h b/intern/cycles/kernel/split/kernel_holdout_emission_blurring_pathtermination_ao.h
index 5d951b972ed..3a7f1629e66 100644
--- a/intern/cycles/kernel/split/kernel_holdout_emission_blurring_pathtermination_ao.h
+++ b/intern/cycles/kernel/split/kernel_holdout_emission_blurring_pathtermination_ao.h
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "kernel_split_common.h"
+CCL_NAMESPACE_BEGIN
/* Note on kernel_holdout_emission_blurring_pathtermination_ao kernel.
* This is the sixth kernel in the ray tracing logic. This is the fifth
@@ -70,35 +70,48 @@
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_TO_REGENERATE and RAY_UPDATE_BUFFER rays
* QUEUE_SHADOW_RAY_CAST_AO_RAYS will be filled with rays marked with flag RAY_SHADOW_RAY_CAST_AO
*/
-ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
- KernelGlobals *kg,
- ShaderData *sd, /* Required throughout the kernel except probabilistic path termination and AO */
- ccl_global float *per_sample_output_buffers,
- ccl_global uint *rng_coop, /* Required for "kernel_write_data_passes" and AO */
- ccl_global float3 *throughput_coop, /* Required for handling holdout material and AO */
- ccl_global float *L_transparent_coop, /* Required for handling holdout material */
- PathRadiance *PathRadiance_coop, /* Required for "kernel_write_data_passes" and indirect primitive emission */
- ccl_global PathState *PathState_coop, /* Required throughout the kernel and AO */
- Intersection *Intersection_coop, /* Required for indirect primitive emission */
- ccl_global float3 *AOAlpha_coop, /* Required for AO */
- ccl_global float3 *AOBSDF_coop, /* Required for AO */
- ccl_global Ray *AOLightRay_coop, /* Required for AO */
- int sw, int sh, int sx, int sy, int stride,
- ccl_global char *ray_state, /* Denotes the state of each ray */
- ccl_global unsigned int *work_array, /* Denotes the work that each ray belongs to */
-#ifdef __WORK_STEALING__
- unsigned int start_sample,
-#endif
- int parallel_samples, /* Number of samples to be processed in parallel */
- int ray_index,
- char *enqueue_flag,
- char *enqueue_flag_AO_SHADOW_RAY_CAST)
+ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(KernelGlobals *kg)
{
-#ifdef __WORK_STEALING__
- unsigned int my_work;
+ ccl_local unsigned int local_queue_atomics_bg;
+ ccl_local unsigned int local_queue_atomics_ao;
+ if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
+ local_queue_atomics_bg = 0;
+ local_queue_atomics_ao = 0;
+ }
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+ char enqueue_flag = 0;
+ char enqueue_flag_AO_SHADOW_RAY_CAST = 0;
+ int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+ ray_index = get_ray_index(kg, ray_index,
+ QUEUE_ACTIVE_AND_REGENERATED_RAYS,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_size,
+ 0);
+
+#ifdef __COMPUTE_DEVICE_GPU__
+ /* If we are executing on a GPU device, we exit all threads that are not
+ * required.
+ *
+ * If we are executing on a CPU device, then we need to keep all threads
+ * active since we have barrier() calls later in the kernel. CPU devices,
+ * expect all threads to execute barrier statement.
+ */
+ if(ray_index == QUEUE_EMPTY_SLOT) {
+ return;
+ }
+#endif /* __COMPUTE_DEVICE_GPU__ */
+
+#ifndef __COMPUTE_DEVICE_GPU__
+ if(ray_index != QUEUE_EMPTY_SLOT) {
+#endif
+
+ int stride = kernel_split_params.stride;
+
+ unsigned int work_index;
unsigned int pixel_x;
unsigned int pixel_y;
-#endif
+
unsigned int tile_x;
unsigned int tile_y;
int my_sample_tile;
@@ -108,31 +121,26 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
ccl_global PathState *state = 0x0;
float3 throughput;
+ ccl_global char *ray_state = kernel_split_state.ray_state;
+ ShaderData *sd = kernel_split_state.sd;
+ ccl_global float *per_sample_output_buffers = kernel_split_state.per_sample_output_buffers;
+
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
- throughput = throughput_coop[ray_index];
- state = &PathState_coop[ray_index];
- rng = &rng_coop[ray_index];
-#ifdef __WORK_STEALING__
- my_work = work_array[ray_index];
- sample = get_my_sample(my_work, sw, sh, parallel_samples, ray_index) + start_sample;
- get_pixel_tile_position(&pixel_x, &pixel_y,
+ throughput = kernel_split_state.throughput[ray_index];
+ state = &kernel_split_state.path_state[ray_index];
+ rng = &kernel_split_state.rng[ray_index];
+
+ work_index = kernel_split_state.work_array[ray_index];
+ sample = get_work_sample(kg, work_index, ray_index) + kernel_split_params.start_sample;
+ get_work_pixel_tile_position(kg, &pixel_x, &pixel_y,
&tile_x, &tile_y,
- my_work,
- sw, sh, sx, sy,
- parallel_samples,
+ work_index,
ray_index);
my_sample_tile = 0;
-#else /* __WORK_STEALING__ */
- sample = work_array[ray_index];
- /* Buffer's stride is "stride"; Find x and y using ray_index. */
- int tile_index = ray_index / parallel_samples;
- tile_x = tile_index % sw;
- tile_y = tile_index / sw;
- my_sample_tile = ray_index - (tile_index * parallel_samples);
-#endif /* __WORK_STEALING__ */
+
per_sample_output_buffers +=
- (((tile_x + (tile_y * stride)) * parallel_samples) + my_sample_tile) *
+ ((tile_x + (tile_y * stride)) + my_sample_tile) *
kernel_data.film.pass_stride;
/* holdout */
@@ -150,18 +158,18 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
holdout_weight = shader_holdout_eval(kg, sd);
}
/* any throughput is ok, should all be identical here */
- L_transparent_coop[ray_index] += average(holdout_weight*throughput);
+ kernel_split_state.L_transparent[ray_index] += average(holdout_weight*throughput);
}
if(ccl_fetch(sd, object_flag) & SD_OBJECT_HOLDOUT_MASK) {
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER);
- *enqueue_flag = 1;
+ enqueue_flag = 1;
}
}
#endif /* __HOLDOUT__ */
}
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
- PathRadiance *L = &PathRadiance_coop[ray_index];
+ PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
/* Holdout mask objects do not write data passes. */
kernel_write_data_passes(kg,
per_sample_output_buffers,
@@ -188,7 +196,7 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
float3 emission = indirect_primitive_emission(
kg,
sd,
- Intersection_coop[ray_index].t,
+ kernel_split_state.isect[ray_index].t,
state->flag,
state->ray_pdf);
path_radiance_accum_emission(L, throughput, emission, state->bounce);
@@ -203,7 +211,7 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
if(probability == 0.0f) {
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER);
- *enqueue_flag = 1;
+ enqueue_flag = 1;
}
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
@@ -211,10 +219,10 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
float terminate = path_state_rng_1D_for_decision(kg, rng, state, PRNG_TERMINATE);
if(terminate >= probability) {
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER);
- *enqueue_flag = 1;
+ enqueue_flag = 1;
}
else {
- throughput_coop[ray_index] = throughput/probability;
+ kernel_split_state.throughput[ray_index] = throughput/probability;
}
}
}
@@ -232,8 +240,8 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
float ao_factor = kernel_data.background.ao_factor;
float3 ao_N;
- AOBSDF_coop[ray_index] = shader_bsdf_ao(kg, sd, ao_factor, &ao_N);
- AOAlpha_coop[ray_index] = shader_bsdf_alpha(kg, sd);
+ kernel_split_state.ao_bsdf[ray_index] = shader_bsdf_ao(kg, sd, ao_factor, &ao_N);
+ kernel_split_state.ao_alpha[ray_index] = shader_bsdf_alpha(kg, sd);
float3 ao_D;
float ao_pdf;
@@ -249,12 +257,39 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
#endif
_ray.dP = ccl_fetch(sd, dP);
_ray.dD = differential3_zero();
- AOLightRay_coop[ray_index] = _ray;
+ kernel_split_state.ao_light_ray[ray_index] = _ray;
ADD_RAY_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_AO);
- *enqueue_flag_AO_SHADOW_RAY_CAST = 1;
+ enqueue_flag_AO_SHADOW_RAY_CAST = 1;
}
}
}
#endif /* __AO__ */
+
+#ifndef __COMPUTE_DEVICE_GPU__
+ }
+#endif
+
+ /* Enqueue RAY_UPDATE_BUFFER rays. */
+ enqueue_ray_index_local(ray_index,
+ QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
+ enqueue_flag,
+ kernel_split_params.queue_size,
+ &local_queue_atomics_bg,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_index);
+
+#ifdef __AO__
+ /* Enqueue to-shadow-ray-cast rays. */
+ enqueue_ray_index_local(ray_index,
+ QUEUE_SHADOW_RAY_CAST_AO_RAYS,
+ enqueue_flag_AO_SHADOW_RAY_CAST,
+ kernel_split_params.queue_size,
+ &local_queue_atomics_ao,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_index);
+#endif
}
+
+CCL_NAMESPACE_END
+
diff --git a/intern/cycles/kernel/split/kernel_lamp_emission.h b/intern/cycles/kernel/split/kernel_lamp_emission.h
index 3bd0e361078..a9820af3001 100644
--- a/intern/cycles/kernel/split/kernel_lamp_emission.h
+++ b/intern/cycles/kernel/split/kernel_lamp_emission.h
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "kernel_split_common.h"
+CCL_NAMESPACE_BEGIN
/* Note on kernel_lamp_emission
* This is the 3rd kernel in the ray-tracing logic. This is the second of the
@@ -36,28 +36,39 @@
* sw -------------------------------------------------| |
* sh -------------------------------------------------| |
*/
-ccl_device void kernel_lamp_emission(
- KernelGlobals *kg,
- ccl_global float3 *throughput_coop, /* Required for lamp emission */
- PathRadiance *PathRadiance_coop, /* Required for lamp emission */
- ccl_global Ray *Ray_coop, /* Required for lamp emission */
- ccl_global PathState *PathState_coop, /* Required for lamp emission */
- Intersection *Intersection_coop, /* Required for lamp emission */
- ccl_global char *ray_state, /* Denotes the state of each ray */
- int sw, int sh,
- ccl_global char *use_queues_flag, /* Used to decide if this kernel should use
- * queues to fetch ray index
- */
- int ray_index)
+ccl_device void kernel_lamp_emission(KernelGlobals *kg)
{
- if(IS_STATE(ray_state, ray_index, RAY_ACTIVE) ||
- IS_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND))
+ /* We will empty this queue in this kernel. */
+ if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
+ kernel_split_params.queue_index[QUEUE_ACTIVE_AND_REGENERATED_RAYS] = 0;
+ }
+ /* Fetch use_queues_flag. */
+ ccl_local char local_use_queues_flag;
+ if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
+ local_use_queues_flag = *kernel_split_params.use_queues_flag;
+ }
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+ int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+ if(local_use_queues_flag) {
+ ray_index = get_ray_index(kg, ray_index,
+ QUEUE_ACTIVE_AND_REGENERATED_RAYS,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_size,
+ 1);
+ if(ray_index == QUEUE_EMPTY_SLOT) {
+ return;
+ }
+ }
+
+ if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE) ||
+ IS_STATE(kernel_split_state.ray_state, ray_index, RAY_HIT_BACKGROUND))
{
- PathRadiance *L = &PathRadiance_coop[ray_index];
- ccl_global PathState *state = &PathState_coop[ray_index];
+ PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
+ ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
- float3 throughput = throughput_coop[ray_index];
- Ray ray = Ray_coop[ray_index];
+ float3 throughput = kernel_split_state.throughput[ray_index];
+ Ray ray = kernel_split_state.ray[ray_index];
#ifdef __LAMP_MIS__
if(kernel_data.integrator.use_lamp_mis && !(state->flag & PATH_RAY_CAMERA)) {
@@ -65,7 +76,7 @@ ccl_device void kernel_lamp_emission(
Ray light_ray;
light_ray.P = ray.P - state->ray_t*ray.D;
- state->ray_t += Intersection_coop[ray_index].t;
+ state->ray_t += kernel_split_state.isect[ray_index].t;
light_ray.D = ray.D;
light_ray.t = state->ray_t;
light_ray.time = ray.time;
@@ -74,10 +85,13 @@ ccl_device void kernel_lamp_emission(
/* intersect with lamp */
float3 emission;
- if(indirect_lamp_emission(kg, kg->sd_input, state, &light_ray, &emission)) {
+ if(indirect_lamp_emission(kg, kernel_split_state.sd_DL_shadow, state, &light_ray, &emission)) {
path_radiance_accum_emission(L, throughput, emission, state->bounce);
}
}
#endif /* __LAMP_MIS__ */
}
}
+
+CCL_NAMESPACE_END
+
diff --git a/intern/cycles/kernel/split/kernel_next_iteration_setup.h b/intern/cycles/kernel/split/kernel_next_iteration_setup.h
index 816f3a6fbff..0ff8286e59b 100644
--- a/intern/cycles/kernel/split/kernel_next_iteration_setup.h
+++ b/intern/cycles/kernel/split/kernel_next_iteration_setup.h
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "kernel_split_common.h"
+CCL_NAMESPACE_BEGIN
/* Note on kernel_setup_next_iteration kernel.
* This is the tenth kernel in the ray tracing logic. This is the ninth
@@ -59,47 +59,76 @@
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE, RAY_REGENERATED and more RAY_UPDATE_BUFFER rays.
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_TO_REGENERATE and more RAY_UPDATE_BUFFER rays
*/
-ccl_device char kernel_next_iteration_setup(
- KernelGlobals *kg,
- ShaderData *sd, /* Required for setting up ray for next iteration */
- ccl_global uint *rng_coop, /* Required for setting up ray for next iteration */
- ccl_global float3 *throughput_coop, /* Required for setting up ray for next iteration */
- PathRadiance *PathRadiance_coop, /* Required for setting up ray for next iteration */
- ccl_global Ray *Ray_coop, /* Required for setting up ray for next iteration */
- ccl_global PathState *PathState_coop, /* Required for setting up ray for next iteration */
- ccl_global Ray *LightRay_dl_coop, /* Required for radiance update - direct lighting */
- ccl_global int *ISLamp_coop, /* Required for radiance update - direct lighting */
- ccl_global BsdfEval *BSDFEval_coop, /* Required for radiance update - direct lighting */
- ccl_global Ray *LightRay_ao_coop, /* Required for radiance update - AO */
- ccl_global float3 *AOBSDF_coop, /* Required for radiance update - AO */
- ccl_global float3 *AOAlpha_coop, /* Required for radiance update - AO */
- ccl_global char *ray_state, /* Denotes the state of each ray */
- ccl_global char *use_queues_flag, /* flag to decide if scene_intersect kernel should
- * use queues to fetch ray index */
- int ray_index)
+ccl_device void kernel_next_iteration_setup(KernelGlobals *kg)
{
+ ccl_local unsigned int local_queue_atomics;
+ if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
+ local_queue_atomics = 0;
+ }
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+ if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
+ /* If we are here, then it means that scene-intersect kernel
+ * has already been executed atleast once. From the next time,
+ * scene-intersect kernel may operate on queues to fetch ray index
+ */
+ *kernel_split_params.use_queues_flag = 1;
+
+ /* Mark queue indices of QUEUE_SHADOW_RAY_CAST_AO_RAYS and
+ * QUEUE_SHADOW_RAY_CAST_DL_RAYS queues that were made empty during the
+ * previous kernel.
+ */
+ kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS] = 0;
+ kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS] = 0;
+ }
+
char enqueue_flag = 0;
+ int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+ ray_index = get_ray_index(kg, ray_index,
+ QUEUE_ACTIVE_AND_REGENERATED_RAYS,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_size,
+ 0);
+
+#ifdef __COMPUTE_DEVICE_GPU__
+ /* If we are executing on a GPU device, we exit all threads that are not
+ * required.
+ *
+ * If we are executing on a CPU device, then we need to keep all threads
+ * active since we have barrier() calls later in the kernel. CPU devices,
+ * expect all threads to execute barrier statement.
+ */
+ if(ray_index == QUEUE_EMPTY_SLOT) {
+ return;
+ }
+#endif
+
+#ifndef __COMPUTE_DEVICE_GPU__
+ if(ray_index != QUEUE_EMPTY_SLOT) {
+#endif
/* Load ShaderData structure. */
PathRadiance *L = NULL;
ccl_global PathState *state = NULL;
+ ccl_global char *ray_state = kernel_split_state.ray_state;
/* Path radiance update for AO/Direct_lighting's shadow blocked. */
if(IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL) ||
IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_AO))
{
- state = &PathState_coop[ray_index];
- L = &PathRadiance_coop[ray_index];
- float3 _throughput = throughput_coop[ray_index];
+ state = &kernel_split_state.path_state[ray_index];
+ L = &kernel_split_state.path_radiance[ray_index];
+ float3 _throughput = kernel_split_state.throughput[ray_index];
if(IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_AO)) {
- float3 shadow = LightRay_ao_coop[ray_index].P;
- char update_path_radiance = LightRay_ao_coop[ray_index].t;
+ float3 shadow = kernel_split_state.ao_light_ray[ray_index].P;
+ // TODO(mai): investigate correctness here
+ char update_path_radiance = (char)kernel_split_state.ao_light_ray[ray_index].t;
if(update_path_radiance) {
path_radiance_accum_ao(L,
_throughput,
- AOAlpha_coop[ray_index],
- AOBSDF_coop[ray_index],
+ kernel_split_state.ao_alpha[ray_index],
+ kernel_split_state.ao_bsdf[ray_index],
shadow,
state->bounce);
}
@@ -107,35 +136,50 @@ ccl_device char kernel_next_iteration_setup(
}
if(IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL)) {
- float3 shadow = LightRay_dl_coop[ray_index].P;
- char update_path_radiance = LightRay_dl_coop[ray_index].t;
+ float3 shadow = kernel_split_state.light_ray[ray_index].P;
+ // TODO(mai): investigate correctness here
+ char update_path_radiance = (char)kernel_split_state.light_ray[ray_index].t;
if(update_path_radiance) {
- BsdfEval L_light = BSDFEval_coop[ray_index];
+ BsdfEval L_light = kernel_split_state.bsdf_eval[ray_index];
path_radiance_accum_light(L,
_throughput,
&L_light,
shadow,
1.0f,
state->bounce,
- ISLamp_coop[ray_index]);
+ kernel_split_state.is_lamp[ray_index]);
}
REMOVE_RAY_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL);
}
}
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
- ccl_global float3 *throughput = &throughput_coop[ray_index];
- ccl_global Ray *ray = &Ray_coop[ray_index];
- ccl_global RNG *rng = &rng_coop[ray_index];
- state = &PathState_coop[ray_index];
- L = &PathRadiance_coop[ray_index];
+ ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
+ ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
+ ccl_global RNG *rng = &kernel_split_state.rng[ray_index];
+ state = &kernel_split_state.path_state[ray_index];
+ L = &kernel_split_state.path_radiance[ray_index];
/* Compute direct lighting and next bounce. */
- if(!kernel_path_surface_bounce(kg, rng, sd, throughput, state, L, ray)) {
+ if(!kernel_path_surface_bounce(kg, rng, kernel_split_state.sd, throughput, state, L, ray)) {
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER);
enqueue_flag = 1;
}
}
- return enqueue_flag;
+#ifndef __COMPUTE_DEVICE_GPU__
+ }
+#endif
+
+ /* Enqueue RAY_UPDATE_BUFFER rays. */
+ enqueue_ray_index_local(ray_index,
+ QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
+ enqueue_flag,
+ kernel_split_params.queue_size,
+ &local_queue_atomics,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_index);
}
+
+CCL_NAMESPACE_END
+
diff --git a/intern/cycles/kernel/split/kernel_queue_enqueue.h b/intern/cycles/kernel/split/kernel_queue_enqueue.h
new file mode 100644
index 00000000000..66aad705bd4
--- /dev/null
+++ b/intern/cycles/kernel/split/kernel_queue_enqueue.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2011-2016 Blender Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CCL_NAMESPACE_BEGIN
+
+/*
+ * The kernel "kernel_queue_enqueue" enqueues rays of
+ * different ray state into their appropriate Queues;
+ * 1. Rays that have been determined to hit the background from the
+ * "kernel_scene_intersect" kernel
+ * are enqueued in QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS;
+ * 2. Rays that have been determined to be actively participating in path-iteration will be enqueued into QUEUE_ACTIVE_AND_REGENERATED_RAYS.
+ *
+ * The input and output of the kernel is as follows,
+ *
+ * ray_state -------------------------------------------|--- kernel_queue_enqueue --|--- Queue_data (QUEUE_ACTIVE_AND_REGENERATED_RAYS & QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS)
+ * Queue_index(QUEUE_ACTIVE_AND_REGENERATED_RAYS) ------| |--- Queue_index (QUEUE_ACTIVE_AND_REGENERATED_RAYS & QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS)
+ * Queue_index(QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS) ---| |
+ * queuesize -------------------------------------------| |
+ *
+ * Note on Queues :
+ * State of queues during the first time this kernel is called :
+ * At entry,
+ * Both QUEUE_ACTIVE_AND_REGENERATED_RAYS and QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be empty.
+ * At exit,
+ * QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE rays
+ * QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_HIT_BACKGROUND rays.
+ *
+ * State of queue during other times this kernel is called :
+ * At entry,
+ * QUEUE_ACTIVE_AND_REGENERATED_RAYS will be empty.
+ * QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will contain RAY_TO_REGENERATE and RAY_UPDATE_BUFFER rays.
+ * At exit,
+ * QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE rays.
+ * QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_TO_REGENERATE, RAY_UPDATE_BUFFER, RAY_HIT_BACKGROUND rays.
+ */
+ccl_device void kernel_queue_enqueue(KernelGlobals *kg)
+{
+ /* We have only 2 cases (Hit/Not-Hit) */
+ ccl_local unsigned int local_queue_atomics[2];
+
+ int lidx = ccl_local_id(1) * ccl_local_size(0) + ccl_local_id(0);
+ int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+
+ if(lidx == 0) {
+ local_queue_atomics[0] = 0;
+ local_queue_atomics[1] = 0;
+ }
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+ int queue_number = -1;
+
+ if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_HIT_BACKGROUND)) {
+ queue_number = QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS;
+ }
+ else if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE)) {
+ queue_number = QUEUE_ACTIVE_AND_REGENERATED_RAYS;
+ }
+
+ unsigned int my_lqidx;
+ if(queue_number != -1) {
+ my_lqidx = get_local_queue_index(queue_number, local_queue_atomics);
+ }
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+ if(lidx == 0) {
+ local_queue_atomics[QUEUE_ACTIVE_AND_REGENERATED_RAYS] =
+ get_global_per_queue_offset(QUEUE_ACTIVE_AND_REGENERATED_RAYS,
+ local_queue_atomics,
+ kernel_split_params.queue_index);
+ local_queue_atomics[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS] =
+ get_global_per_queue_offset(QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
+ local_queue_atomics,
+ kernel_split_params.queue_index);
+ }
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+ unsigned int my_gqidx;
+ if(queue_number != -1) {
+ my_gqidx = get_global_queue_index(queue_number,
+ kernel_split_params.queue_size,
+ my_lqidx,
+ local_queue_atomics);
+ kernel_split_state.queue_data[my_gqidx] = ray_index;
+ }
+}
+
+CCL_NAMESPACE_END
+
diff --git a/intern/cycles/kernel/split/kernel_scene_intersect.h b/intern/cycles/kernel/split/kernel_scene_intersect.h
index 2388580051f..a7e0c7692a2 100644
--- a/intern/cycles/kernel/split/kernel_scene_intersect.h
+++ b/intern/cycles/kernel/split/kernel_scene_intersect.h
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "kernel_split_common.h"
+CCL_NAMESPACE_BEGIN
/* Note on kernel_scene_intersect kernel.
* This is the second kernel in the ray tracing logic. This is the first
@@ -61,34 +61,41 @@
* QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS - no change
*/
-ccl_device void kernel_scene_intersect(
- KernelGlobals *kg,
- ccl_global uint *rng_coop,
- ccl_global Ray *Ray_coop, /* Required for scene_intersect */
- ccl_global PathState *PathState_coop, /* Required for scene_intersect */
- Intersection *Intersection_coop, /* Required for scene_intersect */
- ccl_global char *ray_state, /* Denotes the state of each ray */
- int sw, int sh,
- ccl_global char *use_queues_flag, /* used to decide if this kernel should use
- * queues to fetch ray index */
-#ifdef __KERNEL_DEBUG__
- DebugData *debugdata_coop,
-#endif
- int ray_index)
+ccl_device void kernel_scene_intersect(KernelGlobals *kg)
{
+ /* Fetch use_queues_flag */
+ ccl_local char local_use_queues_flag;
+ if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
+ local_use_queues_flag = *kernel_split_params.use_queues_flag;
+ }
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+ int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+ if(local_use_queues_flag) {
+ ray_index = get_ray_index(kg, ray_index,
+ QUEUE_ACTIVE_AND_REGENERATED_RAYS,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_size,
+ 0);
+
+ if(ray_index == QUEUE_EMPTY_SLOT) {
+ return;
+ }
+ }
+
/* All regenerated rays become active here */
- if(IS_STATE(ray_state, ray_index, RAY_REGENERATED))
- ASSIGN_RAY_STATE(ray_state, ray_index, RAY_ACTIVE);
+ if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_REGENERATED))
+ ASSIGN_RAY_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE);
- if(!IS_STATE(ray_state, ray_index, RAY_ACTIVE))
+ if(!IS_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE))
return;
#ifdef __KERNEL_DEBUG__
- DebugData *debug_data = &debugdata_coop[ray_index];
+ DebugData *debug_data = &kernel_split_state.debug_data[ray_index];
#endif
- Intersection *isect = &Intersection_coop[ray_index];
- PathState state = PathState_coop[ray_index];
- Ray ray = Ray_coop[ray_index];
+ Intersection *isect = &kernel_split_state.isect[ray_index];
+ PathState state = kernel_split_state.path_state[ray_index];
+ Ray ray = kernel_split_state.ray[ray_index];
/* intersect scene */
uint visibility = path_state_ray_visibility(kg, &state);
@@ -96,7 +103,7 @@ ccl_device void kernel_scene_intersect(
#ifdef __HAIR__
float difl = 0.0f, extmax = 0.0f;
uint lcg_state = 0;
- RNG rng = rng_coop[ray_index];
+ RNG rng = kernel_split_state.rng[ray_index];
if(kernel_data.bvh.have_curves) {
if((kernel_data.cam.resolution == 1) && (state.flag & PATH_RAY_CAMERA)) {
@@ -128,6 +135,9 @@ ccl_device void kernel_scene_intersect(
* These rays undergo special processing in the
* background_bufferUpdate kernel.
*/
- ASSIGN_RAY_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND);
+ ASSIGN_RAY_STATE(kernel_split_state.ray_state, ray_index, RAY_HIT_BACKGROUND);
}
}
+
+CCL_NAMESPACE_END
+
diff --git a/intern/cycles/kernel/split/kernel_shader_eval.h b/intern/cycles/kernel/split/kernel_shader_eval.h
index cef64bf5f36..8328f60af39 100644
--- a/intern/cycles/kernel/split/kernel_shader_eval.h
+++ b/intern/cycles/kernel/split/kernel_shader_eval.h
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "kernel_split_common.h"
+CCL_NAMESPACE_BEGIN
/* Note on kernel_shader_eval kernel
* This kernel is the 5th kernel in the ray tracing logic. This is
@@ -44,27 +44,51 @@
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE and RAY_REGENERATED rays
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_TO_REGENERATE rays
*/
-ccl_device void kernel_shader_eval(
- KernelGlobals *kg,
- ShaderData *sd, /* Output ShaderData structure to be filled */
- ccl_global uint *rng_coop, /* Required for rbsdf calculation */
- ccl_global Ray *Ray_coop, /* Required for setting up shader from ray */
- ccl_global PathState *PathState_coop, /* Required for all functions in this kernel */
- Intersection *Intersection_coop, /* Required for setting up shader from ray */
- ccl_global char *ray_state, /* Denotes the state of each ray */
- int ray_index)
+
+ccl_device void kernel_shader_eval(KernelGlobals *kg)
{
- if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
- Intersection *isect = &Intersection_coop[ray_index];
- ccl_global uint *rng = &rng_coop[ray_index];
- ccl_global PathState *state = &PathState_coop[ray_index];
- Ray ray = Ray_coop[ray_index];
+ /* Enqeueue RAY_TO_REGENERATE rays into QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS queue. */
+ ccl_local unsigned int local_queue_atomics;
+ if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
+ local_queue_atomics = 0;
+ }
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+ int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+ ray_index = get_ray_index(kg, ray_index,
+ QUEUE_ACTIVE_AND_REGENERATED_RAYS,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_size,
+ 0);
+
+ if(ray_index == QUEUE_EMPTY_SLOT) {
+ return;
+ }
+
+ char enqueue_flag = (IS_STATE(kernel_split_state.ray_state, ray_index, RAY_TO_REGENERATE)) ? 1 : 0;
+ enqueue_ray_index_local(ray_index,
+ QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
+ enqueue_flag,
+ kernel_split_params.queue_size,
+ &local_queue_atomics,
+ kernel_split_state.queue_data,
+ kernel_split_params.queue_index);
+
+ /* Continue on with shader evaluation. */
+ if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE)) {
+ Intersection *isect = &kernel_split_state.isect[ray_index];
+ ccl_global uint *rng = &kernel_split_state.rng[ray_index];
+ ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
+ Ray ray = kernel_split_state.ray[ray_index];
shader_setup_from_ray(kg,
- sd,
+ kernel_split_state.sd,
isect,
&ray);
float rbsdf = path_state_rng_1D_for_decision(kg, rng, state, PRNG_BSDF);
- shader_eval_surface(kg, sd, rng, state, rbsdf, state->flag, SHADER_CONTEXT_MAIN);
+ shader_eval_surface(kg, kernel_split_state.sd, rng, state, rbsdf, state->flag, SHADER_CONTEXT_MAIN);
}
}
+
+CCL_NAMESPACE_END
+
diff --git a/intern/cycles/kernel/split/kernel_shadow_blocked.h b/intern/cycles/kernel/split/kernel_shadow_blocked.h
index 6153af47f96..f862c37ea64 100644
--- a/intern/cycles/kernel/split/kernel_shadow_blocked.h
+++ b/intern/cycles/kernel/split/kernel_shadow_blocked.h
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "kernel_split_common.h"
+CCL_NAMESPACE_BEGIN
/* Note on kernel_shadow_blocked kernel.
* This is the ninth kernel in the ray tracing logic. This is the eighth
@@ -45,24 +45,47 @@
* and RAY_SHADOW_RAY_CAST_DL respectively, during kernel entry.
* QUEUE_SHADOW_RAY_CAST_AO_RAYS and QUEUE_SHADOW_RAY_CAST_DL_RAYS will be empty at kernel exit.
*/
-ccl_device void kernel_shadow_blocked(
- KernelGlobals *kg,
- ccl_global PathState *PathState_coop, /* Required for shadow blocked */
- ccl_global Ray *LightRay_dl_coop, /* Required for direct lighting's shadow blocked */
- ccl_global Ray *LightRay_ao_coop, /* Required for AO's shadow blocked */
- ccl_global char *ray_state,
- char shadow_blocked_type,
- int ray_index)
+ccl_device void kernel_shadow_blocked(KernelGlobals *kg)
{
+ int lidx = ccl_local_id(1) * ccl_local_id(0) + ccl_local_id(0);
+
+ ccl_local unsigned int ao_queue_length;
+ ccl_local unsigned int dl_queue_length;
+ if(lidx == 0) {
+ ao_queue_length = kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS];
+ dl_queue_length = kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS];
+ }
+ ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+ /* flag determining if the current ray is to process shadow ray for AO or DL */
+ char shadow_blocked_type = -1;
+
+ int ray_index = QUEUE_EMPTY_SLOT;
+ int thread_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+ if(thread_index < ao_queue_length + dl_queue_length) {
+ if(thread_index < ao_queue_length) {
+ ray_index = get_ray_index(kg, thread_index, QUEUE_SHADOW_RAY_CAST_AO_RAYS,
+ kernel_split_state.queue_data, kernel_split_params.queue_size, 1);
+ shadow_blocked_type = RAY_SHADOW_RAY_CAST_AO;
+ } else {
+ ray_index = get_ray_index(kg, thread_index - ao_queue_length, QUEUE_SHADOW_RAY_CAST_DL_RAYS,
+ kernel_split_state.queue_data, kernel_split_params.queue_size, 1);
+ shadow_blocked_type = RAY_SHADOW_RAY_CAST_DL;
+ }
+ }
+
+ if(ray_index == QUEUE_EMPTY_SLOT)
+ return;
+
/* Flag determining if we need to update L. */
char update_path_radiance = 0;
- if(IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL) ||
- IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_AO))
+ if(IS_FLAG(kernel_split_state.ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL) ||
+ IS_FLAG(kernel_split_state.ray_state, ray_index, RAY_SHADOW_RAY_CAST_AO))
{
- ccl_global PathState *state = &PathState_coop[ray_index];
- ccl_global Ray *light_ray_dl_global = &LightRay_dl_coop[ray_index];
- ccl_global Ray *light_ray_ao_global = &LightRay_ao_coop[ray_index];
+ ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
+ ccl_global Ray *light_ray_dl_global = &kernel_split_state.light_ray[ray_index];
+ ccl_global Ray *light_ray_ao_global = &kernel_split_state.ao_light_ray[ray_index];
ccl_global Ray *light_ray_global =
shadow_blocked_type == RAY_SHADOW_RAY_CAST_AO
@@ -71,7 +94,7 @@ ccl_device void kernel_shadow_blocked(
float3 shadow;
update_path_radiance = !(shadow_blocked(kg,
- kg->sd_input,
+ kernel_split_state.sd_DL_shadow,
state,
light_ray_global,
&shadow));
@@ -83,3 +106,6 @@ ccl_device void kernel_shadow_blocked(
light_ray_global->t = update_path_radiance;
}
}
+
+CCL_NAMESPACE_END
+
diff --git a/intern/cycles/kernel/split/kernel_split_common.h b/intern/cycles/kernel/split/kernel_split_common.h
index 2135ee22b2e..c3963667aea 100644
--- a/intern/cycles/kernel/split/kernel_split_common.h
+++ b/intern/cycles/kernel/split/kernel_split_common.h
@@ -17,9 +17,11 @@
#ifndef __KERNEL_SPLIT_H__
#define __KERNEL_SPLIT_H__
-#include "kernel_compat_opencl.h"
#include "kernel_math.h"
#include "kernel_types.h"
+
+#include "kernel_split_data.h"
+
#include "kernel_globals.h"
#include "kernel_image_opencl.h"
diff --git a/intern/cycles/kernel/split/kernel_split_data.h b/intern/cycles/kernel/split/kernel_split_data.h
new file mode 100644
index 00000000000..18f062ef682
--- /dev/null
+++ b/intern/cycles/kernel/split/kernel_split_data.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2011-2016 Blender Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __KERNEL_SPLIT_DATA_H__
+#define __KERNEL_SPLIT_DATA_H__
+
+CCL_NAMESPACE_BEGIN
+
+/* parameters used by the split kernels, we use a single struct to avoid passing these to each kernel */
+
+typedef struct SplitParams {
+ int x;
+ int y;
+ int w;
+ int h;
+
+ int offset;
+ int stride;
+
+ ccl_global uint *rng_state;
+
+ int start_sample;
+ int end_sample;
+
+ ccl_global unsigned int *work_pools;
+ unsigned int num_samples;
+
+ ccl_global int *queue_index;
+ int queue_size;
+ ccl_global char *use_queues_flag;
+
+ ccl_global float *buffer;
+} SplitParams;
+
+/* Global memory variables [porting]; These memory is used for
+ * co-operation between different kernels; Data written by one
+ * kernel will be available to another kernel via this global
+ * memory.
+ */
+
+/* SPLIT_DATA_ENTRY(type, name, num) */
+
+#if defined(WITH_CYCLES_DEBUG) || defined(__KERNEL_DEBUG__)
+/* DebugData memory */
+# define SPLIT_DATA_DEBUG_ENTRIES \
+ SPLIT_DATA_ENTRY(DebugData, debug_data, 1)
+#else
+# define SPLIT_DATA_DEBUG_ENTRIES
+#endif
+
+#define SPLIT_DATA_ENTRIES \
+ SPLIT_DATA_ENTRY(ccl_global RNG, rng, 1) \
+ SPLIT_DATA_ENTRY(ccl_global float3, throughput, 1) \
+ SPLIT_DATA_ENTRY(ccl_global float, L_transparent, 1) \
+ SPLIT_DATA_ENTRY(PathRadiance, path_radiance, 1) \
+ SPLIT_DATA_ENTRY(ccl_global Ray, ray, 1) \
+ SPLIT_DATA_ENTRY(ccl_global PathState, path_state, 1) \
+ SPLIT_DATA_ENTRY(Intersection, isect, 1) \
+ SPLIT_DATA_ENTRY(ccl_global float3, ao_alpha, 1) \
+ SPLIT_DATA_ENTRY(ccl_global float3, ao_bsdf, 1) \
+ SPLIT_DATA_ENTRY(ccl_global Ray, ao_light_ray, 1) \
+ SPLIT_DATA_ENTRY(ccl_global BsdfEval, bsdf_eval, 1) \
+ SPLIT_DATA_ENTRY(ccl_global int, is_lamp, 1) \
+ SPLIT_DATA_ENTRY(ccl_global Ray, light_ray, 1) \
+ SPLIT_DATA_ENTRY(Intersection, isect_shadow, 2) \
+ SPLIT_DATA_ENTRY(ccl_global int, queue_data, (NUM_QUEUES*2)) /* TODO(mai): this is too large? */ \
+ SPLIT_DATA_ENTRY(ccl_global uint, work_array, 1) \
+ SPLIT_DATA_DEBUG_ENTRIES \
+
+/* struct that holds pointers to data in the shared state buffer */
+typedef struct SplitData {
+#define SPLIT_DATA_ENTRY(type, name, num) type *name;
+ SPLIT_DATA_ENTRIES
+#undef SPLIT_DATA_ENTRY
+
+ /* size calculation for these is non trivial, so they are left out of SPLIT_DATA_ENTRIES and handled separately */
+ ShaderData *sd;
+ ShaderData *sd_DL_shadow;
+ ccl_global float *per_sample_output_buffers;
+
+ /* this is actually in a separate buffer from the rest of the split state data (so it can be read back from
+ * the host easily) but is still used the same as the other data so we have it here in this struct as well
+ */
+ ccl_global char *ray_state;
+} SplitData;
+
+#define SIZEOF_SD(max_closure) (sizeof(ShaderData) - (sizeof(ShaderClosure) * (MAX_CLOSURE - (max_closure))))
+
+ccl_device_inline size_t split_data_buffer_size(size_t num_elements,
+ size_t max_closure,
+ size_t per_thread_output_buffer_size)
+{
+ size_t size = 0;
+#define SPLIT_DATA_ENTRY(type, name, num) + align_up(num_elements * num * sizeof(type), 16)
+ size = size SPLIT_DATA_ENTRIES;
+#undef SPLIT_DATA_ENTRY
+
+ /* TODO(sergey): This will actually over-allocate if
+ * particular kernel does not support multiclosure.
+ */
+ size += align_up(num_elements * SIZEOF_SD(max_closure), 16); /* sd */
+ size += align_up(2 * num_elements * SIZEOF_SD(max_closure), 16); /* sd_DL_shadow */
+ size += align_up(num_elements * per_thread_output_buffer_size, 16); /* per_sample_output_buffers */
+
+ return size;
+}
+
+ccl_device_inline void split_data_init(ccl_global SplitData *split_data,
+ size_t num_elements,
+ ccl_global void *data,
+ ccl_global char *ray_state)
+{
+ ccl_global char *p = (ccl_global char*)data;
+
+#define SPLIT_DATA_ENTRY(type, name, num) \
+ split_data->name = (type*)p; p += align_up(num_elements * num * sizeof(type), 16);
+ SPLIT_DATA_ENTRIES
+#undef SPLIT_DATA_ENTRY
+
+ split_data->sd = (ShaderData*)p;
+ p += align_up(num_elements * SIZEOF_SD(MAX_CLOSURE), 16);
+
+ split_data->sd_DL_shadow = (ShaderData*)p;
+ p += align_up(2 * num_elements * SIZEOF_SD(MAX_CLOSURE), 16);
+
+ split_data->per_sample_output_buffers = (ccl_global float*)p;
+ //p += align_up(num_elements * per_thread_output_buffer_size, 16);
+
+ split_data->ray_state = ray_state;
+}
+
+#define kernel_split_state (kg->split_data)
+#define kernel_split_params (kg->split_param_data)
+
+CCL_NAMESPACE_END
+
+#endif /* __KERNEL_SPLIT_DATA_H__ */
+
+
+
diff --git a/intern/cycles/kernel/split/kernel_sum_all_radiance.h b/intern/cycles/kernel/split/kernel_sum_all_radiance.h
index a21e9b6a0b1..fdceae2dafb 100644
--- a/intern/cycles/kernel/split/kernel_sum_all_radiance.h
+++ b/intern/cycles/kernel/split/kernel_sum_all_radiance.h
@@ -14,46 +14,44 @@
* limitations under the License.
*/
-#include "../kernel_compat_opencl.h"
-#include "../kernel_math.h"
-#include "../kernel_types.h"
-#include "../kernel_globals.h"
+CCL_NAMESPACE_BEGIN
/* Since we process various samples in parallel; The output radiance of different samples
* are stored in different locations; This kernel combines the output radiance contributed
* by all different samples and stores them in the RenderTile's output buffer.
*/
-ccl_device void kernel_sum_all_radiance(
- ccl_constant KernelData *data, /* To get pass_stride to offet into buffer */
- ccl_global float *buffer, /* Output buffer of RenderTile */
- ccl_global float *per_sample_output_buffer, /* Radiance contributed by all samples */
- int parallel_samples, int sw, int sh, int stride,
- int buffer_offset_x,
- int buffer_offset_y,
- int buffer_stride,
- int start_sample)
+
+ccl_device void kernel_sum_all_radiance(KernelGlobals *kg)
{
- int x = get_global_id(0);
- int y = get_global_id(1);
+ int x = ccl_global_id(0);
+ int y = ccl_global_id(1);
+
+ ccl_global float *buffer = kernel_split_params.buffer;
+ int sw = kernel_split_params.w;
+ int sh = kernel_split_params.h;
+ int stride = kernel_split_params.stride;
+ int start_sample = kernel_split_params.start_sample;
if(x < sw && y < sh) {
- buffer += ((buffer_offset_x + x) + (buffer_offset_y + y) * buffer_stride) * (data->film.pass_stride);
- per_sample_output_buffer += ((x + y * stride) * parallel_samples) * (data->film.pass_stride);
+ ccl_global float *per_sample_output_buffer = kernel_split_state.per_sample_output_buffers;
+ per_sample_output_buffer += (x + y * stride) * (kernel_data.film.pass_stride);
+
+ x += kernel_split_params.x;
+ y += kernel_split_params.y;
- int sample_stride = (data->film.pass_stride);
+ buffer += (kernel_split_params.offset + x + y*stride) * (kernel_data.film.pass_stride);
- int sample_iterator = 0;
int pass_stride_iterator = 0;
- int num_floats = data->film.pass_stride;
-
- for(sample_iterator = 0; sample_iterator < parallel_samples; sample_iterator++) {
- for(pass_stride_iterator = 0; pass_stride_iterator < num_floats; pass_stride_iterator++) {
- *(buffer + pass_stride_iterator) =
- (start_sample == 0 && sample_iterator == 0)
- ? *(per_sample_output_buffer + pass_stride_iterator)
- : *(buffer + pass_stride_iterator) + *(per_sample_output_buffer + pass_stride_iterator);
- }
- per_sample_output_buffer += sample_stride;
+ int num_floats = kernel_data.film.pass_stride;
+
+ for(pass_stride_iterator = 0; pass_stride_iterator < num_floats; pass_stride_iterator++) {
+ *(buffer + pass_stride_iterator) =
+ (start_sample == 0)
+ ? *(per_sample_output_buffer + pass_stride_iterator)
+ : *(buffer + pass_stride_iterator) + *(per_sample_output_buffer + pass_stride_iterator);
}
}
}
+
+CCL_NAMESPACE_END
+
diff --git a/intern/cycles/util/util_types.h b/intern/cycles/util/util_types.h
index a000fae4bd6..751d875fb22 100644
--- a/intern/cycles/util/util_types.h
+++ b/intern/cycles/util/util_types.h
@@ -397,11 +397,6 @@ ccl_device_inline float4 make_float4(float x, float y, float z, float w)
return a;
}
-ccl_device_inline int align_up(int offset, int alignment)
-{
- return (offset + alignment - 1) & ~(alignment - 1);
-}
-
ccl_device_inline int3 make_int3(int i)
{
#ifdef __KERNEL_SSE__
@@ -476,6 +471,21 @@ ccl_device_inline int4 make_int4(const float3& f)
#endif
+ccl_device_inline int align_up(int offset, int alignment)
+{
+ return (offset + alignment - 1) & ~(alignment - 1);
+}
+
+ccl_device_inline int round_up(int x, int multiple)
+{
+ return ((x + multiple - 1) / multiple) * multiple;
+}
+
+ccl_device_inline int round_down(int x, int multiple)
+{
+ return (x / multiple) * multiple;
+}
+
/* Interpolation types for textures
* cuda also use texture space to store other objects */
enum InterpolationType {