Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/cycles/device')
-rw-r--r--intern/cycles/device/device.cpp85
-rw-r--r--intern/cycles/device/device.h82
-rw-r--r--intern/cycles/device/device_cpu.cpp25
-rw-r--r--intern/cycles/device/device_cuda.cpp105
-rw-r--r--intern/cycles/device/device_multi.cpp11
-rw-r--r--intern/cycles/device/device_network.cpp21
-rw-r--r--intern/cycles/device/device_opencl.cpp2913
-rw-r--r--intern/cycles/device/device_task.cpp2
-rw-r--r--intern/cycles/device/device_task.h11
9 files changed, 2868 insertions, 387 deletions
diff --git a/intern/cycles/device/device.cpp b/intern/cycles/device/device.cpp
index 3a33b8fb68b..5cad8e1b49c 100644
--- a/intern/cycles/device/device.cpp
+++ b/intern/cycles/device/device.cpp
@@ -33,6 +33,13 @@ CCL_NAMESPACE_BEGIN
/* Device */
+Device::~Device()
+{
+ if(!background && vertex_buffer != 0) {
+ glDeleteBuffers(1, &vertex_buffer);
+ }
+}
+
void Device::pixels_alloc(device_memory& mem)
{
mem_alloc(mem, MEM_READ_WRITE);
@@ -51,7 +58,7 @@ void Device::pixels_free(device_memory& mem)
mem_free(mem);
}
-void Device::draw_pixels(device_memory& rgba, int y, int w, int h, int dy, int width, int height, bool transparent,
+void Device::draw_pixels(device_memory& rgba, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
const DeviceDrawParams &draw_params)
{
pixels_copy_from(rgba, y, w, h);
@@ -67,6 +74,9 @@ void Device::draw_pixels(device_memory& rgba, int y, int w, int h, int dy, int w
/* for multi devices, this assumes the inefficient method that we allocate
* all pixels on the device even though we only render to a subset */
GLhalf *data_pointer = (GLhalf*)rgba.data_pointer;
+ float vbuffer[16], *basep;
+ float *vp = NULL;
+
data_pointer += 4*y*w;
/* draw half float texture, GLSL shader for display transform assumed to be bound */
@@ -83,23 +93,63 @@ void Device::draw_pixels(device_memory& rgba, int y, int w, int h, int dy, int w
draw_params.bind_display_space_shader_cb();
}
- glPushMatrix();
- glTranslatef(0.0f, (float)dy, 0.0f);
+ if(GLEW_VERSION_1_5) {
+ if(!vertex_buffer)
+ glGenBuffers(1, &vertex_buffer);
+
+ glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
+ /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
+ glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
- glBegin(GL_QUADS);
-
- glTexCoord2f(0.0f, 0.0f);
- glVertex2f(0.0f, 0.0f);
- glTexCoord2f(1.0f, 0.0f);
- glVertex2f((float)width, 0.0f);
- glTexCoord2f(1.0f, 1.0f);
- glVertex2f((float)width, (float)height);
- glTexCoord2f(0.0f, 1.0f);
- glVertex2f(0.0f, (float)height);
+ vp = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
- glEnd();
+ basep = NULL;
+ }
+ else {
+ basep = vbuffer;
+ vp = vbuffer;
+ }
- glPopMatrix();
+ if(vp) {
+ /* texture coordinate - vertex pair */
+ vp[0] = 0.0f;
+ vp[1] = 0.0f;
+ vp[2] = dx;
+ vp[3] = dy;
+
+ vp[4] = 1.0f;
+ vp[5] = 0.0f;
+ vp[6] = (float)width + dx;
+ vp[7] = dy;
+
+ vp[8] = 1.0f;
+ vp[9] = 1.0f;
+ vp[10] = (float)width + dx;
+ vp[11] = (float)height + dy;
+
+ vp[12] = 0.0f;
+ vp[13] = 1.0f;
+ vp[14] = dx;
+ vp[15] = (float)height + dy;
+
+ if(vertex_buffer)
+ glUnmapBuffer(GL_ARRAY_BUFFER);
+ }
+
+ glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), basep);
+ glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), ((char *)basep) + 2 * sizeof(float));
+
+ glEnableClientState(GL_VERTEX_ARRAY);
+ glEnableClientState(GL_TEXTURE_COORD_ARRAY);
+
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+
+ glDisableClientState(GL_TEXTURE_COORD_ARRAY);
+ glDisableClientState(GL_VERTEX_ARRAY);
+
+ if(vertex_buffer) {
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+ }
if(draw_params.unbind_display_space_shader_cb) {
draw_params.unbind_display_space_shader_cb();
@@ -113,7 +163,7 @@ void Device::draw_pixels(device_memory& rgba, int y, int w, int h, int dy, int w
/* fallback for old graphics cards that don't support GLSL, half float,
* and non-power-of-two textures */
glPixelZoom((float)width/(float)w, (float)height/(float)h);
- glRasterPos2f(0, dy);
+ glRasterPos2f(dx, dy);
uint8_t *pixels = (uint8_t*)rgba.data_pointer;
@@ -277,13 +327,10 @@ string Device::device_capabilities()
#endif
#ifdef WITH_OPENCL
- /* TODO(sergey): Needs proper usable implementation. */
- /*
if(device_opencl_init()) {
capabilities += "\nOpenCL device capabilities:\n";
capabilities += device_opencl_capabilities();
}
- */
#endif
return capabilities;
diff --git a/intern/cycles/device/device.h b/intern/cycles/device/device.h
index 7c17f7f4112..6b4a190bbf0 100644
--- a/intern/cycles/device/device.h
+++ b/intern/cycles/device/device.h
@@ -55,6 +55,7 @@ public:
bool advanced_shading;
bool pack_images;
bool extended_images; /* flag for GPU and Multi device */
+ bool use_split_kernel; /* Denotes if the device is going to run cycles using split-kernel */
vector<DeviceInfo> multi_devices;
DeviceInfo()
@@ -66,25 +67,76 @@ public:
advanced_shading = true;
pack_images = false;
extended_images = false;
+ use_split_kernel = false;
+ }
+};
+
+class DeviceRequestedFeatures {
+public:
+ /* Use experimental feature set. */
+ bool experimental;
+
+ /* Maximum number of closures in shader trees. */
+ int max_closure;
+
+ /* Selective nodes compilation. */
+
+ /* Identifier of a node group up to which all the nodes needs to be
+ * compiled in. Nodes from higher group indices will be ignores.
+ */
+ int max_nodes_group;
+
+ /* Features bitfield indicating which features from the requested group
+ * will be compiled in. Nodes which corresponds to features which are not
+ * in this bitfield will be ignored even if they're in the requested group.
+ */
+ int nodes_features;
+
+ /* BVH/sampling kernel features. */
+ bool use_hair;
+ bool use_object_motion;
+ bool use_camera_motion;
+
+ DeviceRequestedFeatures()
+ {
+ /* TODO(sergey): Find more meaningful defaults. */
+ experimental = false;
+ max_closure = 0;
+ max_nodes_group = 0;
+ nodes_features = 0;
+ use_hair = false;
+ use_object_motion = false;
+ use_camera_motion = false;
+ }
+
+ bool modified(const DeviceRequestedFeatures& requested_features)
+ {
+ return !(experimental == requested_features.experimental &&
+ max_closure == requested_features.max_closure &&
+ max_nodes_group == requested_features.max_nodes_group &&
+ nodes_features == requested_features.nodes_features);
}
};
/* Device */
struct DeviceDrawParams {
- boost::function<void(void)> bind_display_space_shader_cb;
- boost::function<void(void)> unbind_display_space_shader_cb;
+ function<void(void)> bind_display_space_shader_cb;
+ function<void(void)> unbind_display_space_shader_cb;
};
class Device {
protected:
- Device(DeviceInfo& info_, Stats &stats_, bool background) : background(background), info(info_), stats(stats_) {}
+ Device(DeviceInfo& info_, Stats &stats_, bool background) : background(background), vertex_buffer(0), info(info_), stats(stats_) {}
bool background;
string error_msg;
+ /* used for real time display */
+ unsigned int vertex_buffer;
+
public:
- virtual ~Device() {}
+ virtual ~Device();
/* info */
DeviceInfo info;
@@ -106,9 +158,15 @@ public:
virtual void const_copy_to(const char *name, void *host, size_t size) = 0;
/* texture memory */
- virtual void tex_alloc(const char *name, device_memory& mem,
- InterpolationType interpolation = INTERPOLATION_NONE, bool periodic = false) {};
- virtual void tex_free(device_memory& mem) {};
+ virtual void tex_alloc(const char * /*name*/,
+ device_memory& /*mem*/,
+ InterpolationType interpolation = INTERPOLATION_NONE,
+ bool periodic = false)
+ {
+ (void)interpolation; /* Ignored. */
+ (void)periodic; /* Ignored. */
+ };
+ virtual void tex_free(device_memory& /*mem*/) {};
/* pixel memory */
virtual void pixels_alloc(device_memory& mem);
@@ -119,7 +177,9 @@ public:
virtual void *osl_memory() { return NULL; }
/* load/compile kernels, must be called before adding tasks */
- virtual bool load_kernels(bool experimental) { return true; }
+ virtual bool load_kernels(
+ const DeviceRequestedFeatures& /*requested_features*/)
+ { return true; }
/* tasks */
virtual int get_split_task_count(DeviceTask& task) = 0;
@@ -129,7 +189,7 @@ public:
/* opengl drawing */
virtual void draw_pixels(device_memory& mem, int y, int w, int h,
- int dy, int width, int height, bool transparent,
+ int dx, int dy, int width, int height, bool transparent,
const DeviceDrawParams &draw_params);
#ifdef WITH_NETWORK
@@ -138,8 +198,8 @@ public:
#endif
/* multi device */
- virtual void map_tile(Device *sub_device, RenderTile& tile) {}
- virtual int device_number(Device *sub_device) { return 0; }
+ virtual void map_tile(Device * /*sub_device*/, RenderTile& /*tile*/) {}
+ virtual int device_number(Device * /*sub_device*/) { return 0; }
/* static */
static Device *create(DeviceInfo& info, Stats &stats, bool background = true);
diff --git a/intern/cycles/device/device_cpu.cpp b/intern/cycles/device/device_cpu.cpp
index 9abcf9167d5..013f656e31c 100644
--- a/intern/cycles/device/device_cpu.cpp
+++ b/intern/cycles/device/device_cpu.cpp
@@ -19,6 +19,15 @@
/* So ImathMath is included before our kernel_cpu_compat. */
#ifdef WITH_OSL
+# if defined(_MSC_VER)
+/* Prevent OSL from polluting the context with weird macros from windows.h.
+ * TODO(sergey): Ideally it's only enough to have class/struct declarations in
+ * the header and skip header include here.
+ */
+# define NOGDI
+# define NOMINMAX
+# define WIN32_LEAN_AND_MEAN
+# endif
# include <OSL/oslexec.h>
#endif
@@ -38,6 +47,7 @@
#include "util_debug.h"
#include "util_foreach.h"
#include "util_function.h"
+#include "util_logging.h"
#include "util_opengl.h"
#include "util_progress.h"
#include "util_system.h"
@@ -75,19 +85,21 @@ public:
task_pool.stop();
}
- void mem_alloc(device_memory& mem, MemoryType type)
+ void mem_alloc(device_memory& mem, MemoryType /*type*/)
{
mem.device_pointer = mem.data_pointer;
mem.device_size = mem.memory_size();
stats.mem_alloc(mem.device_size);
}
- void mem_copy_to(device_memory& mem)
+ void mem_copy_to(device_memory& /*mem*/)
{
/* no-op */
}
- void mem_copy_from(device_memory& mem, int y, int w, int h, int elem)
+ void mem_copy_from(device_memory& /*mem*/,
+ int /*y*/, int /*w*/, int /*h*/,
+ int /*elem*/)
{
/* no-op */
}
@@ -111,8 +123,9 @@ public:
kernel_const_copy(&kernel_globals, name, host, size);
}
- void tex_alloc(const char *name, device_memory& mem, InterpolationType interpolation, bool periodic)
+ void tex_alloc(const char *name, device_memory& mem, InterpolationType interpolation, bool /*periodic*/)
{
+ VLOG(1) << "Texture allocate: " << name << ", " << mem.memory_size() << " bytes.";
kernel_tex_copy(&kernel_globals, name, mem.data_pointer, mem.data_width, mem.data_height, mem.data_depth, interpolation);
mem.device_pointer = mem.data_pointer;
mem.device_size = mem.memory_size();
@@ -207,7 +220,7 @@ public:
int end_sample = tile.start_sample + tile.num_samples;
for(int sample = start_sample; sample < end_sample; sample++) {
- if (task.get_cancel() || task_pool.canceled()) {
+ if(task.get_cancel() || task_pool.canceled()) {
if(task.need_finish_queue == false)
break;
}
@@ -368,7 +381,7 @@ public:
int get_split_task_count(DeviceTask& task)
{
- if (task.type == DeviceTask::SHADER)
+ if(task.type == DeviceTask::SHADER)
return task.get_subtask_count(TaskScheduler::num_threads(), 256);
else
return task.get_subtask_count(TaskScheduler::num_threads());
diff --git a/intern/cycles/device/device_cuda.cpp b/intern/cycles/device/device_cuda.cpp
index 79a1a2b7fe1..4a9c27f5429 100644
--- a/intern/cycles/device/device_cuda.cpp
+++ b/intern/cycles/device/device_cuda.cpp
@@ -185,7 +185,7 @@ public:
cuda_assert(cuCtxDestroy(cuContext));
}
- bool support_device(bool experimental)
+ bool support_device(bool /*experimental*/)
{
int major, minor;
cuDeviceComputeCapability(&major, &minor, cuDevId);
@@ -266,7 +266,7 @@ public:
printf("CUDA version %d.%d detected, build may succeed but only CUDA 6.5 is officially supported.\n", cuda_version/10, cuda_version%10);
/* compile */
- string kernel = path_join(kernel_path, "kernel.cu");
+ string kernel = path_join(kernel_path, path_join("kernels", path_join("cuda", "kernel.cu")));
string include = kernel_path;
const int machine = system_cpu_bits();
@@ -281,7 +281,7 @@ public:
nvcc, major, minor, machine, kernel.c_str(), cubin.c_str(), include.c_str(), cuda_version);
if(experimental)
- command += " -D__KERNEL_CUDA_EXPERIMENTAL__";
+ command += " -D__KERNEL_EXPERIMENTAL__";
if(getenv("CYCLES_CUDA_EXTRA_CFLAGS")) {
command += string(" ") + getenv("CYCLES_CUDA_EXTRA_CFLAGS");
@@ -309,18 +309,18 @@ public:
return cubin;
}
- bool load_kernels(bool experimental)
+ bool load_kernels(const DeviceRequestedFeatures& requested_features)
{
/* check if cuda init succeeded */
if(cuContext == 0)
return false;
/* check if GPU is supported */
- if(!support_device(experimental))
+ if(!support_device(requested_features.experimental))
return false;
/* get kernel */
- string cubin = compile_kernel(experimental);
+ string cubin = compile_kernel(requested_features.experimental);
if(cubin == "")
return false;
@@ -331,7 +331,7 @@ public:
string cubin_data;
CUresult result;
- if (path_read_text(cubin, cubin_data))
+ if(path_read_text(cubin, cubin_data))
result = cuModuleLoadData(&cuModule, cubin_data.c_str());
else
result = CUDA_ERROR_FILE_NOT_FOUND;
@@ -344,7 +344,7 @@ public:
return (result == CUDA_SUCCESS);
}
- void mem_alloc(device_memory& mem, MemoryType type)
+ void mem_alloc(device_memory& mem, MemoryType /*type*/)
{
cuda_push_context();
CUdeviceptr device_pointer;
@@ -419,6 +419,7 @@ public:
void tex_alloc(const char *name, device_memory& mem, InterpolationType interpolation, bool periodic)
{
/* todo: support 3D textures, only CPU for now */
+ VLOG(1) << "Texture allocate: " << name << ", " << mem.memory_size() << " bytes.";
/* determine format */
CUarray_format_enum format;
@@ -483,7 +484,7 @@ public:
if(interpolation == INTERPOLATION_CLOSEST) {
cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_POINT));
}
- else if (interpolation == INTERPOLATION_LINEAR) {
+ else if(interpolation == INTERPOLATION_LINEAR) {
cuda_assert(cuTexRefSetFilterMode(texref, CU_TR_FILTER_MODE_LINEAR));
}
else {/* CUBIC and SMART are unsupported for CUDA */
@@ -879,11 +880,12 @@ public:
}
}
- void draw_pixels(device_memory& mem, int y, int w, int h, int dy, int width, int height, bool transparent,
+ void draw_pixels(device_memory& mem, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
const DeviceDrawParams &draw_params)
{
if(!background) {
PixelMem pmem = pixel_mem_map[mem.device_pointer];
+ float *vpointer;
cuda_push_context();
@@ -917,23 +919,52 @@ public:
draw_params.bind_display_space_shader_cb();
}
- glPushMatrix();
- glTranslatef(0.0f, (float)dy, 0.0f);
-
- glBegin(GL_QUADS);
-
- glTexCoord2f(0.0f, 0.0f);
- glVertex2f(0.0f, 0.0f);
- glTexCoord2f((float)w/(float)pmem.w, 0.0f);
- glVertex2f((float)width, 0.0f);
- glTexCoord2f((float)w/(float)pmem.w, (float)h/(float)pmem.h);
- glVertex2f((float)width, (float)height);
- glTexCoord2f(0.0f, (float)h/(float)pmem.h);
- glVertex2f(0.0f, (float)height);
+ if(!vertex_buffer)
+ glGenBuffers(1, &vertex_buffer);
+
+ glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
+ /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */
+ glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW);
+
+ vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
+
+ if(vpointer) {
+ /* texture coordinate - vertex pair */
+ vpointer[0] = 0.0f;
+ vpointer[1] = 0.0f;
+ vpointer[2] = dx;
+ vpointer[3] = dy;
+
+ vpointer[4] = (float)w/(float)pmem.w;
+ vpointer[5] = 0.0f;
+ vpointer[6] = (float)width + dx;
+ vpointer[7] = dy;
+
+ vpointer[8] = (float)w/(float)pmem.w;
+ vpointer[9] = (float)h/(float)pmem.h;
+ vpointer[10] = (float)width + dx;
+ vpointer[11] = (float)height + dy;
+
+ vpointer[12] = 0.0f;
+ vpointer[13] = (float)h/(float)pmem.h;
+ vpointer[14] = dx;
+ vpointer[15] = (float)height + dy;
+
+ glUnmapBuffer(GL_ARRAY_BUFFER);
+ }
+
+ glTexCoordPointer(2, GL_FLOAT, 4 * sizeof(float), 0);
+ glVertexPointer(2, GL_FLOAT, 4 * sizeof(float), (char *)NULL + 2 * sizeof(float));
+
+ glEnableClientState(GL_VERTEX_ARRAY);
+ glEnableClientState(GL_TEXTURE_COORD_ARRAY);
+
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
- glEnd();
+ glDisableClientState(GL_TEXTURE_COORD_ARRAY);
+ glDisableClientState(GL_VERTEX_ARRAY);
- glPopMatrix();
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
if(draw_params.unbind_display_space_shader_cb) {
draw_params.unbind_display_space_shader_cb();
@@ -950,7 +981,7 @@ public:
return;
}
- Device::draw_pixels(mem, y, w, h, dy, width, height, transparent, draw_params);
+ Device::draw_pixels(mem, y, w, h, dx, dy, width, height, transparent, draw_params);
}
void thread_run(DeviceTask *task)
@@ -966,7 +997,7 @@ public:
int end_sample = tile.start_sample + tile.num_samples;
for(int sample = start_sample; sample < end_sample; sample++) {
- if (task->get_cancel()) {
+ if(task->get_cancel()) {
if(task->need_finish_queue == false)
break;
}
@@ -999,7 +1030,7 @@ public:
}
};
- int get_split_task_count(DeviceTask& task)
+ int get_split_task_count(DeviceTask& /*task*/)
{
return 1;
}
@@ -1035,12 +1066,12 @@ bool device_cuda_init(void)
static bool initialized = false;
static bool result = false;
- if (initialized)
+ if(initialized)
return result;
initialized = true;
int cuew_result = cuewInit();
- if (cuew_result == CUEW_SUCCESS) {
+ if(cuew_result == CUEW_SUCCESS) {
VLOG(1) << "CUEW initialization succeeded";
if(CUDADevice::have_precompiled_kernels()) {
VLOG(1) << "Found precompiled kernels";
@@ -1091,14 +1122,20 @@ void device_cuda_info(vector<DeviceInfo>& devices)
}
vector<DeviceInfo> display_devices;
-
+
for(int num = 0; num < count; num++) {
char name[256];
int attr;
-
+
if(cuDeviceGetName(name, 256, num) != CUDA_SUCCESS)
continue;
+ int major, minor;
+ cuDeviceComputeCapability(&major, &minor, num);
+ if(major < 2) {
+ continue;
+ }
+
DeviceInfo info;
info.type = DEVICE_CUDA;
@@ -1106,8 +1143,6 @@ void device_cuda_info(vector<DeviceInfo>& devices)
info.id = string_printf("CUDA_%d", num);
info.num = num;
- int major, minor;
- cuDeviceComputeCapability(&major, &minor, num);
info.advanced_shading = (major >= 2);
info.extended_images = (major >= 3);
info.pack_images = false;
@@ -1132,7 +1167,7 @@ string device_cuda_capabilities(void)
if(result != CUDA_ERROR_NO_DEVICE) {
return string("Error initializing CUDA: ") + cuewErrorString(result);
}
- return "No CUDA device found";
+ return "No CUDA device found\n";
}
int count;
diff --git a/intern/cycles/device/device_multi.cpp b/intern/cycles/device/device_multi.cpp
index 9aac86daa1d..c61e550151f 100644
--- a/intern/cycles/device/device_multi.cpp
+++ b/intern/cycles/device/device_multi.cpp
@@ -25,6 +25,7 @@
#include "util_foreach.h"
#include "util_list.h"
+#include "util_logging.h"
#include "util_map.h"
#include "util_time.h"
@@ -88,10 +89,10 @@ public:
return error_msg;
}
- bool load_kernels(bool experimental)
+ bool load_kernels(const DeviceRequestedFeatures& requested_features)
{
foreach(SubDevice& sub, devices)
- if(!sub.device->load_kernels(experimental))
+ if(!sub.device->load_kernels(requested_features))
return false;
return true;
@@ -170,6 +171,8 @@ public:
void tex_alloc(const char *name, device_memory& mem, InterpolationType interpolation, bool periodic)
{
+ VLOG(1) << "Texture allocate: " << name << ", " << mem.memory_size() << " bytes.";
+
foreach(SubDevice& sub, devices) {
mem.device_pointer = 0;
sub.device->tex_alloc(name, mem, interpolation, periodic);
@@ -233,7 +236,7 @@ public:
mem.device_pointer = tmp;
}
- void draw_pixels(device_memory& rgba, int y, int w, int h, int dy, int width, int height, bool transparent,
+ void draw_pixels(device_memory& rgba, int y, int w, int h, int dx, int dy, int width, int height, bool transparent,
const DeviceDrawParams &draw_params)
{
device_ptr tmp = rgba.device_pointer;
@@ -248,7 +251,7 @@ public:
/* adjust math for w/width */
rgba.device_pointer = sub.ptr_map[tmp];
- sub.device->draw_pixels(rgba, sy, w, sh, sdy, width, sheight, transparent, draw_params);
+ sub.device->draw_pixels(rgba, sy, w, sh, dx, sdy, width, sheight, transparent, draw_params);
i++;
}
diff --git a/intern/cycles/device/device_network.cpp b/intern/cycles/device/device_network.cpp
index 4733482bf4e..1d6066c94dc 100644
--- a/intern/cycles/device/device_network.cpp
+++ b/intern/cycles/device/device_network.cpp
@@ -19,6 +19,7 @@
#include "device_network.h"
#include "util_foreach.h"
+#include "util_logging.h"
#if defined(WITH_NETWORK)
@@ -164,6 +165,8 @@ public:
void tex_alloc(const char *name, device_memory& mem, InterpolationType interpolation, bool periodic)
{
+ VLOG(1) << "Texture allocate: " << name << ", " << mem.memory_size() << " bytes.";
+
thread_scoped_lock lock(rpc_lock);
mem.device_pointer = ++mem_counter;
@@ -194,7 +197,7 @@ public:
}
}
- bool load_kernels(bool experimental)
+ bool load_kernels(const DeviceRequestedFeatures& requested_features)
{
if(error_func.have_error())
return false;
@@ -202,7 +205,10 @@ public:
thread_scoped_lock lock(rpc_lock);
RPCSend snd(socket, &error_func, "load_kernels");
- snd.add(experimental);
+ snd.add(requested_features.experimental);
+ snd.add(requested_features.max_closure);
+ snd.add(requested_features.max_nodes_group);
+ snd.add(requested_features.nodes_features);
snd.write();
bool result;
@@ -269,7 +275,7 @@ public:
lock.unlock();
TileList::iterator it = tile_list_find(the_tiles, tile);
- if (it != the_tiles.end()) {
+ if(it != the_tiles.end()) {
tile.buffers = it->buffers;
the_tiles.erase(it);
}
@@ -605,11 +611,14 @@ protected:
device->tex_free(mem);
}
else if(rcv.name == "load_kernels") {
- bool experimental;
- rcv.read(experimental);
+ DeviceRequestedFeatures requested_features;
+ rcv.read(requested_features.experimental);
+ rcv.read(requested_features.max_closure);
+ rcv.read(requested_features.max_nodes_group);
+ rcv.read(requested_features.nodes_features);
bool result;
- result = device->load_kernels(experimental);
+ result = device->load_kernels(requested_features);
RPCSend snd(socket, &error_func, "load_kernels");
snd.add(result);
snd.write();
diff --git a/intern/cycles/device/device_opencl.cpp b/intern/cycles/device/device_opencl.cpp
index a5bf35a63c8..7ab2b412f0d 100644
--- a/intern/cycles/device/device_opencl.cpp
+++ b/intern/cycles/device/device_opencl.cpp
@@ -28,6 +28,7 @@
#include "buffers.h"
#include "util_foreach.h"
+#include "util_logging.h"
#include "util_map.h"
#include "util_math.h"
#include "util_md5.h"
@@ -39,7 +40,39 @@ CCL_NAMESPACE_BEGIN
#define CL_MEM_PTR(p) ((cl_mem)(uintptr_t)(p))
-static cl_device_type opencl_device_type()
+/* Macro declarations used with split kernel */
+
+/* Macro to enable/disable work-stealing */
+#define __WORK_STEALING__
+
+#define SPLIT_KERNEL_LOCAL_SIZE_X 64
+#define SPLIT_KERNEL_LOCAL_SIZE_Y 1
+
+/* This value may be tuned according to the scene we are rendering.
+ *
+ * Modifying PATH_ITER_INC_FACTOR value proportional to number of expected
+ * ray-bounces will improve performance.
+ */
+#define PATH_ITER_INC_FACTOR 8
+
+/* When allocate global memory in chunks. We may not be able to
+ * allocate exactly "CL_DEVICE_MAX_MEM_ALLOC_SIZE" bytes in chunks;
+ * Since some bytes may be needed for aligning chunks of memory;
+ * This is the amount of memory that we dedicate for that purpose.
+ */
+#define DATA_ALLOCATION_MEM_FACTOR 5000000 //5MB
+
+struct OpenCLPlatformDevice {
+ OpenCLPlatformDevice(cl_platform_id platform_id,
+ cl_device_id device_id)
+ : platform_id(platform_id), device_id(device_id) {}
+ cl_platform_id platform_id;
+ cl_device_id device_id;
+};
+
+namespace {
+
+cl_device_type opencl_device_type()
{
char *device = getenv("CYCLES_OPENCL_TEST");
@@ -59,12 +92,12 @@ static cl_device_type opencl_device_type()
return CL_DEVICE_TYPE_ALL;
}
-static bool opencl_kernel_use_debug()
+bool opencl_kernel_use_debug()
{
return (getenv("CYCLES_OPENCL_DEBUG") != NULL);
}
-static bool opencl_kernel_use_advanced_shading(const string& platform)
+bool opencl_kernel_use_advanced_shading(const string& platform)
{
/* keep this in sync with kernel_types.h! */
if(platform == "NVIDIA CUDA")
@@ -72,44 +105,182 @@ static bool opencl_kernel_use_advanced_shading(const string& platform)
else if(platform == "Apple")
return false;
else if(platform == "AMD Accelerated Parallel Processing")
- return false;
+ return true;
else if(platform == "Intel(R) OpenCL")
return true;
return false;
}
-static string opencl_kernel_build_options(const string& platform, const string *debug_src = NULL)
+bool opencl_kernel_use_split(const string& platform_name,
+ const cl_device_type device_type)
{
- string build_options = " -cl-fast-relaxed-math ";
-
- if(platform == "NVIDIA CUDA")
- build_options += "-D__KERNEL_OPENCL_NVIDIA__ -cl-nv-maxrregcount=32 -cl-nv-verbose ";
-
- else if(platform == "Apple")
- build_options += "-D__KERNEL_OPENCL_APPLE__ ";
+ if(getenv("CYCLES_OPENCL_SPLIT_KERNEL_TEST") != NULL) {
+ return true;
+ }
+ /* TODO(sergey): Replace string lookups with more enum-like API,
+ * similar to device/vendor checks blender's gpu.
+ */
+ if(platform_name == "AMD Accelerated Parallel Processing" &&
+ device_type == CL_DEVICE_TYPE_GPU)
+ {
+ return true;
+ }
+ return false;
+}
- else if(platform == "AMD Accelerated Parallel Processing")
- build_options += "-D__KERNEL_OPENCL_AMD__ ";
+bool opencl_device_supported(const string& platform_name,
+ const cl_device_id device_id)
+{
+ cl_device_type device_type;
+ clGetDeviceInfo(device_id,
+ CL_DEVICE_TYPE,
+ sizeof(cl_device_type),
+ &device_type,
+ NULL);
+ if(platform_name == "AMD Accelerated Parallel Processing" &&
+ device_type == CL_DEVICE_TYPE_GPU)
+ {
+ return true;;
+ }
+ return false;
+}
- else if(platform == "Intel(R) OpenCL") {
- build_options += "-D__KERNEL_OPENCL_INTEL_CPU__";
+bool opencl_platform_version_check(cl_platform_id platform,
+ string *error = NULL)
+{
+ const int req_major = 1, req_minor = 1;
+ int major, minor;
+ char version[256];
+ clGetPlatformInfo(platform,
+ CL_PLATFORM_VERSION,
+ sizeof(version),
+ &version,
+ NULL);
+ if(sscanf(version, "OpenCL %d.%d", &major, &minor) < 2) {
+ if(error != NULL) {
+ *error = string_printf("OpenCL: failed to parse platform version string (%s).", version);
+ }
+ return false;
+ }
+ if(!((major == req_major && minor >= req_minor) || (major > req_major))) {
+ if(error != NULL) {
+ *error = string_printf("OpenCL: platform version 1.1 or later required, found %d.%d", major, minor);
+ }
+ return false;
+ }
+ if(error != NULL) {
+ *error = "";
+ }
+ return true;
+}
- /* options for gdb source level kernel debugging. this segfaults on linux currently */
- if(opencl_kernel_use_debug() && debug_src)
- build_options += "-g -s \"" + *debug_src + "\"";
+bool opencl_device_version_check(cl_device_id device,
+ string *error = NULL)
+{
+ const int req_major = 1, req_minor = 1;
+ int major, minor;
+ char version[256];
+ clGetDeviceInfo(device,
+ CL_DEVICE_OPENCL_C_VERSION,
+ sizeof(version),
+ &version,
+ NULL);
+ if(sscanf(version, "OpenCL C %d.%d", &major, &minor) < 2) {
+ if(error != NULL) {
+ *error = string_printf("OpenCL: failed to parse OpenCL C version string (%s).", version);
+ }
+ return false;
+ }
+ if(!((major == req_major && minor >= req_minor) || (major > req_major))) {
+ if(error != NULL) {
+ *error = string_printf("OpenCL: C version 1.1 or later required, found %d.%d", major, minor);
+ }
+ return false;
+ }
+ if(error != NULL) {
+ *error = "";
}
+ return true;
+}
- if(opencl_kernel_use_debug())
- build_options += "-D__KERNEL_OPENCL_DEBUG__ ";
+void opencl_get_usable_devices(vector<OpenCLPlatformDevice> *usable_devices)
+{
+ const bool force_all_platforms =
+ (getenv("CYCLES_OPENCL_TEST") != NULL) ||
+ (getenv("CYCLES_OPENCL_SPLIT_KERNEL_TEST")) != NULL;
+ const cl_device_type device_type = opencl_device_type();
-#ifdef WITH_CYCLES_DEBUG
- build_options += "-D__KERNEL_DEBUG__ ";
-#endif
+ vector<cl_device_id> device_ids;
+ cl_uint num_devices = 0;
+ vector<cl_platform_id> platform_ids;
+ cl_uint num_platforms = 0;
+
+ /* Number of the devices added to the device info list. */
+ cl_uint num_added_devices = 0;
- return build_options;
+ /* Get devices. */
+ if(clGetPlatformIDs(0, NULL, &num_platforms) != CL_SUCCESS ||
+ num_platforms == 0)
+ {
+ return;
+ }
+ platform_ids.resize(num_platforms);
+ if(clGetPlatformIDs(num_platforms, &platform_ids[0], NULL) != CL_SUCCESS) {
+ return;
+ }
+ /* Devices are numbered consecutively across platforms. */
+ int num_base = 0;
+ for(int platform = 0;
+ platform < num_platforms;
+ platform++, num_base += num_added_devices)
+ {
+ cl_platform_id platform_id = platform_ids[platform];
+ num_devices = num_added_devices = 0;
+ if(clGetDeviceIDs(platform_id,
+ device_type,
+ 0,
+ NULL,
+ &num_devices) != CL_SUCCESS || num_devices == 0)
+ {
+ continue;
+ }
+ device_ids.resize(num_devices);
+ if(clGetDeviceIDs(platform_id,
+ device_type,
+ num_devices,
+ &device_ids[0],
+ NULL) != CL_SUCCESS)
+ {
+ continue;
+ }
+ if(!opencl_platform_version_check(platform_ids[platform])) {
+ continue;
+ }
+ char pname[256];
+ clGetPlatformInfo(platform_id,
+ CL_PLATFORM_NAME,
+ sizeof(pname),
+ &pname,
+ NULL);
+ string platform_name = pname;
+ for(int num = 0; num < num_devices; num++) {
+ cl_device_id device_id = device_ids[num];
+ if(!opencl_device_version_check(device_id)) {
+ continue;
+ }
+ if(force_all_platforms ||
+ opencl_device_supported(platform_name, device_id))
+ {
+ usable_devices->push_back(OpenCLPlatformDevice(platform_id,
+ device_id));
+ }
+ }
+ }
}
+} /* namespace */
+
/* thread safe cache for contexts and programs */
class OpenCLCache
{
@@ -117,14 +288,18 @@ class OpenCLCache
{
thread_mutex *mutex;
cl_context context;
- cl_program program;
+ /* cl_program for shader, bake, film_convert kernels (used in OpenCLDeviceBase) */
+ cl_program ocl_dev_base_program;
+ /* cl_program for megakernel (used in OpenCLDeviceMegaKernel) */
+ cl_program ocl_dev_megakernel_program;
- Slot() : mutex(NULL), context(NULL), program(NULL) {}
+ Slot() : mutex(NULL), context(NULL), ocl_dev_base_program(NULL), ocl_dev_megakernel_program(NULL) {}
Slot(const Slot &rhs)
: mutex(rhs.mutex)
, context(rhs.context)
- , program(rhs.program)
+ , ocl_dev_base_program(rhs.ocl_dev_base_program)
+ , ocl_dev_megakernel_program(rhs.ocl_dev_megakernel_program)
{
/* copy can only happen in map insert, assert that */
assert(mutex == NULL);
@@ -235,6 +410,12 @@ class OpenCLCache
}
public:
+
+ enum ProgramName {
+ OCL_DEV_BASE_PROGRAM,
+ OCL_DEV_MEGAKERNEL_PROGRAM,
+ };
+
/* see get_something comment */
static cl_context get_context(cl_platform_id platform, cl_device_id device,
thread_scoped_lock &slot_locker)
@@ -253,10 +434,21 @@ public:
}
/* see get_something comment */
- static cl_program get_program(cl_platform_id platform, cl_device_id device,
+ static cl_program get_program(cl_platform_id platform, cl_device_id device, ProgramName program_name,
thread_scoped_lock &slot_locker)
{
- cl_program program = get_something<cl_program>(platform, device, &Slot::program, slot_locker);
+ cl_program program = NULL;
+
+ if(program_name == OCL_DEV_BASE_PROGRAM) {
+ /* Get program related to OpenCLDeviceBase */
+ program = get_something<cl_program>(platform, device, &Slot::ocl_dev_base_program, slot_locker);
+ }
+ else if(program_name == OCL_DEV_MEGAKERNEL_PROGRAM) {
+ /* Get program related to megakernel */
+ program = get_something<cl_program>(platform, device, &Slot::ocl_dev_megakernel_program, slot_locker);
+ } else {
+ assert(!"Invalid program name");
+ }
if(!program)
return NULL;
@@ -283,10 +475,18 @@ public:
}
/* see store_something comment */
- static void store_program(cl_platform_id platform, cl_device_id device, cl_program program,
+ static void store_program(cl_platform_id platform, cl_device_id device, cl_program program, ProgramName program_name,
thread_scoped_lock &slot_locker)
{
- store_something<cl_program>(platform, device, program, &Slot::program, slot_locker);
+ if(program_name == OCL_DEV_BASE_PROGRAM) {
+ store_something<cl_program>(platform, device, program, &Slot::ocl_dev_base_program, slot_locker);
+ }
+ else if(program_name == OCL_DEV_MEGAKERNEL_PROGRAM) {
+ store_something<cl_program>(platform, device, program, &Slot::ocl_dev_megakernel_program, slot_locker);
+ } else {
+ assert(!"Invalid program name\n");
+ return;
+ }
/* increment reference count in OpenCL.
* The caller is going to release the object when done with it. */
@@ -303,8 +503,10 @@ public:
thread_scoped_lock cache_lock(self.cache_lock);
foreach(CacheMap::value_type &item, self.cache) {
- if(item.second.program != NULL)
- clReleaseProgram(item.second.program);
+ if(item.second.ocl_dev_base_program != NULL)
+ clReleaseProgram(item.second.ocl_dev_base_program);
+ if(item.second.ocl_dev_megakernel_program != NULL)
+ clReleaseProgram(item.second.ocl_dev_megakernel_program);
if(item.second.context != NULL)
clReleaseContext(item.second.context);
}
@@ -313,7 +515,7 @@ public:
}
};
-class OpenCLDevice : public Device
+class OpenCLDeviceBase : public Device
{
public:
DedicatedTaskPool task_pool;
@@ -322,7 +524,6 @@ public:
cl_platform_id cpPlatform;
cl_device_id cdDevice;
cl_program cpProgram;
- cl_kernel ckPathTraceKernel;
cl_kernel ckFilmConvertByteKernel;
cl_kernel ckFilmConvertHalfFloatKernel;
cl_kernel ckShaderKernel;
@@ -384,7 +585,7 @@ public:
}
}
- OpenCLDevice(DeviceInfo& info, Stats &stats, bool background_)
+ OpenCLDeviceBase(DeviceInfo& info, Stats &stats, bool background_)
: Device(info, stats, background_)
{
cpPlatform = NULL;
@@ -392,7 +593,6 @@ public:
cxContext = NULL;
cqCommandQueue = NULL;
cpProgram = NULL;
- ckPathTraceKernel = NULL;
ckFilmConvertByteKernel = NULL;
ckFilmConvertHalfFloatKernel = NULL;
ckShaderKernel = NULL;
@@ -400,71 +600,19 @@ public:
null_mem = 0;
device_initialized = false;
- /* setup platform */
- cl_uint num_platforms;
-
- ciErr = clGetPlatformIDs(0, NULL, &num_platforms);
- if(opencl_error(ciErr))
- return;
-
- if(num_platforms == 0) {
- opencl_error("OpenCL: no platforms found.");
- return;
- }
-
- vector<cl_platform_id> platforms(num_platforms, NULL);
-
- ciErr = clGetPlatformIDs(num_platforms, &platforms[0], NULL);
- if(opencl_error(ciErr)) {
- fprintf(stderr, "clGetPlatformIDs failed \n");
- return;
- }
-
- int num_base = 0;
- int total_devices = 0;
-
- for (int platform = 0; platform < num_platforms; platform++) {
- cl_uint num_devices;
-
- if(opencl_error(clGetDeviceIDs(platforms[platform], opencl_device_type(), 0, NULL, &num_devices)))
- return;
-
- total_devices += num_devices;
-
- if(info.num - num_base >= num_devices) {
- /* num doesn't refer to a device in this platform */
- num_base += num_devices;
- continue;
- }
-
- /* device is in this platform */
- cpPlatform = platforms[platform];
-
- /* get devices */
- vector<cl_device_id> device_ids(num_devices, NULL);
-
- if(opencl_error(clGetDeviceIDs(cpPlatform, opencl_device_type(), num_devices, &device_ids[0], NULL))) {
- fprintf(stderr, "clGetDeviceIDs failed \n");
- return;
- }
-
- cdDevice = device_ids[info.num - num_base];
-
- char name[256];
- clGetPlatformInfo(cpPlatform, CL_PLATFORM_NAME, sizeof(name), &name, NULL);
- platform_name = name;
-
- break;
- }
-
- if(total_devices == 0) {
+ vector<OpenCLPlatformDevice> usable_devices;
+ opencl_get_usable_devices(&usable_devices);
+ if(usable_devices.size() == 0) {
opencl_error("OpenCL: no devices found.");
return;
}
- else if(!cdDevice) {
- opencl_error("OpenCL: specified device not found.");
- return;
- }
+ assert(info.num < usable_devices.size());
+ OpenCLPlatformDevice& platform_device = usable_devices[info.num];
+ cpPlatform = platform_device.platform_id;
+ cdDevice = platform_device.device_id;
+ char name[256];
+ clGetPlatformInfo(cpPlatform, CL_PLATFORM_NAME, sizeof(name), &name, NULL);
+ platform_name = name;
{
/* try to use cached context */
@@ -500,12 +648,12 @@ public:
if(opencl_error(ciErr))
return;
- fprintf(stderr,"Device init succes\n");
+ fprintf(stderr, "Device init success\n");
device_initialized = true;
}
static void CL_CALLBACK context_notify_callback(const char *err_info,
- const void *private_info, size_t cb, void *user_data)
+ const void * /*private_info*/, size_t /*cb*/, void *user_data)
{
char name[256];
clGetDeviceInfo((cl_device_id)user_data, CL_DEVICE_NAME, sizeof(name), &name, NULL);
@@ -515,38 +663,23 @@ public:
bool opencl_version_check()
{
- char version[256];
-
- int major, minor, req_major = 1, req_minor = 1;
-
- clGetPlatformInfo(cpPlatform, CL_PLATFORM_VERSION, sizeof(version), &version, NULL);
-
- if(sscanf(version, "OpenCL %d.%d", &major, &minor) < 2) {
- opencl_error(string_printf("OpenCL: failed to parse platform version string (%s).", version));
- return false;
- }
-
- if(!((major == req_major && minor >= req_minor) || (major > req_major))) {
- opencl_error(string_printf("OpenCL: platform version 1.1 or later required, found %d.%d", major, minor));
- return false;
- }
-
- clGetDeviceInfo(cdDevice, CL_DEVICE_OPENCL_C_VERSION, sizeof(version), &version, NULL);
-
- if(sscanf(version, "OpenCL C %d.%d", &major, &minor) < 2) {
- opencl_error(string_printf("OpenCL: failed to parse OpenCL C version string (%s).", version));
+ string error;
+ if(!opencl_platform_version_check(cpPlatform, &error)) {
+ opencl_error(error);
return false;
}
-
- if(!((major == req_major && minor >= req_minor) || (major > req_major))) {
- opencl_error(string_printf("OpenCL: C version 1.1 or later required, found %d.%d", major, minor));
+ if(!opencl_device_version_check(cdDevice, &error)) {
+ opencl_error(error);
return false;
}
-
return true;
}
- bool load_binary(const string& kernel_path, const string& clbin, const string *debug_src = NULL)
+ bool load_binary(const string& /*kernel_path*/,
+ const string& clbin,
+ string custom_kernel_build_options,
+ cl_program *program,
+ const string *debug_src = NULL)
{
/* read binary into memory */
vector<uint8_t> binary;
@@ -561,7 +694,7 @@ public:
size_t size = binary.size();
const uint8_t *bytes = &binary[0];
- cpProgram = clCreateProgramWithBinary(cxContext, 1, &cdDevice,
+ *program = clCreateProgramWithBinary(cxContext, 1, &cdDevice,
&size, &bytes, &status, &ciErr);
if(opencl_error(status) || opencl_error(ciErr)) {
@@ -569,16 +702,16 @@ public:
return false;
}
- if(!build_kernel(kernel_path, debug_src))
+ if(!build_kernel(program, custom_kernel_build_options, debug_src))
return false;
return true;
}
- bool save_binary(const string& clbin)
+ bool save_binary(cl_program *program, const string& clbin)
{
size_t size = 0;
- clGetProgramInfo(cpProgram, CL_PROGRAM_BINARY_SIZES, sizeof(size_t), &size, NULL);
+ clGetProgramInfo(*program, CL_PROGRAM_BINARY_SIZES, sizeof(size_t), &size, NULL);
if(!size)
return false;
@@ -586,7 +719,7 @@ public:
vector<uint8_t> binary(size);
uint8_t *bytes = &binary[0];
- clGetProgramInfo(cpProgram, CL_PROGRAM_BINARIES, sizeof(uint8_t*), &bytes, NULL);
+ clGetProgramInfo(*program, CL_PROGRAM_BINARIES, sizeof(uint8_t*), &bytes, NULL);
if(!path_write_binary(clbin, binary)) {
opencl_error(string_printf("OpenCL failed to write cached binary %s.", clbin.c_str()));
@@ -596,24 +729,30 @@ public:
return true;
}
- bool build_kernel(const string& kernel_path, const string *debug_src = NULL)
+ bool build_kernel(cl_program *kernel_program,
+ string custom_kernel_build_options,
+ const string *debug_src = NULL)
{
- string build_options = opencl_kernel_build_options(platform_name, debug_src);
-
- ciErr = clBuildProgram(cpProgram, 0, NULL, build_options.c_str(), NULL, NULL);
+ string build_options;
+ build_options = kernel_build_options(debug_src) + custom_kernel_build_options;
+
+ ciErr = clBuildProgram(*kernel_program, 0, NULL, build_options.c_str(), NULL, NULL);
/* show warnings even if build is successful */
size_t ret_val_size = 0;
- clGetProgramBuildInfo(cpProgram, cdDevice, CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size);
+ clGetProgramBuildInfo(*kernel_program, cdDevice, CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size);
if(ret_val_size > 1) {
- vector<char> build_log(ret_val_size+1);
- clGetProgramBuildInfo(cpProgram, cdDevice, CL_PROGRAM_BUILD_LOG, ret_val_size, &build_log[0], NULL);
+ vector<char> build_log(ret_val_size + 1);
+ clGetProgramBuildInfo(*kernel_program, cdDevice, CL_PROGRAM_BUILD_LOG, ret_val_size, &build_log[0], NULL);
build_log[ret_val_size] = '\0';
- fprintf(stderr, "OpenCL kernel build output:\n");
- fprintf(stderr, "%s\n", &build_log[0]);
+ /* Skip meaningless empty output from the NVidia compiler. */
+ if(!(ret_val_size == 2 && build_log[0] == '\n')) {
+ fprintf(stderr, "OpenCL kernel build output:\n");
+ fprintf(stderr, "%s\n", &build_log[0]);
+ }
}
if(ciErr != CL_SUCCESS) {
@@ -624,12 +763,15 @@ public:
return true;
}
- bool compile_kernel(const string& kernel_path, const string& kernel_md5, const string *debug_src = NULL)
+ bool compile_kernel(const string& kernel_path,
+ string source,
+ string custom_kernel_build_options,
+ cl_program *kernel_program,
+ const string *debug_src = NULL)
{
/* we compile kernels consisting of many files. unfortunately opencl
* kernel caches do not seem to recognize changes in included files.
* so we force recompile on changes by adding the md5 hash of all files */
- string source = "#include \"kernel.cl\" // " + kernel_md5 + "\n";
source = path_source_replace_includes(source, kernel_path);
if(debug_src)
@@ -638,15 +780,19 @@ public:
size_t source_len = source.size();
const char *source_str = source.c_str();
- cpProgram = clCreateProgramWithSource(cxContext, 1, &source_str, &source_len, &ciErr);
+ *kernel_program = clCreateProgramWithSource(cxContext, 1, &source_str, &source_len, &ciErr);
if(opencl_error(ciErr))
return false;
double starttime = time_dt();
printf("Compiling OpenCL kernel ...\n");
+ /* TODO(sergey): Report which kernel is being compiled
+ * as well (megakernel or which of split kernels etc..).
+ */
+ printf("Build flags: %s\n", custom_kernel_build_options.c_str());
- if(!build_kernel(kernel_path, debug_src))
+ if(!build_kernel(kernel_program, custom_kernel_build_options, debug_src))
return false;
printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime);
@@ -654,7 +800,7 @@ public:
return true;
}
- string device_md5_hash()
+ string device_md5_hash(string kernel_custom_build_options = "")
{
MD5Hash md5;
char version[256], driver[256], name[256], vendor[256];
@@ -669,13 +815,14 @@ public:
md5.append((uint8_t*)name, strlen(name));
md5.append((uint8_t*)driver, strlen(driver));
- string options = opencl_kernel_build_options(platform_name);
+ string options = kernel_build_options();
+ options += kernel_custom_build_options;
md5.append((uint8_t*)options.c_str(), options.size());
return md5.get_hex();
}
- bool load_kernels(bool experimental)
+ bool load_kernels(const DeviceRequestedFeatures& /*requested_features*/)
{
/* verify if device was initialized */
if(!device_initialized) {
@@ -685,7 +832,7 @@ public:
/* try to use cached kernel */
thread_scoped_lock cache_locker;
- cpProgram = OpenCLCache::get_program(cpPlatform, cdDevice, cache_locker);
+ cpProgram = OpenCLCache::get_program(cpPlatform, cdDevice, OpenCLCache::OCL_DEV_BASE_PROGRAM, cache_locker);
if(!cpProgram) {
/* verify we have right opencl version */
@@ -711,28 +858,27 @@ public:
}
/* if exists already, try use it */
- if(path_exists(clbin) && load_binary(kernel_path, clbin, debug_src)) {
+ if(path_exists(clbin) && load_binary(kernel_path, clbin, "", &cpProgram)) {
/* kernel loaded from binary */
}
else {
+
+ string init_kernel_source = "#include \"kernels/opencl/kernel.cl\" // " + kernel_md5 + "\n";
+
/* if does not exist or loading binary failed, compile kernel */
- if(!compile_kernel(kernel_path, kernel_md5, debug_src))
+ if(!compile_kernel(kernel_path, init_kernel_source, "", &cpProgram, debug_src))
return false;
/* save binary for reuse */
- if(!save_binary(clbin))
+ if(!save_binary(&cpProgram, clbin))
return false;
}
/* cache the program */
- OpenCLCache::store_program(cpPlatform, cdDevice, cpProgram, cache_locker);
+ OpenCLCache::store_program(cpPlatform, cdDevice, cpProgram, OpenCLCache::OCL_DEV_BASE_PROGRAM, cache_locker);
}
/* find kernels */
- ckPathTraceKernel = clCreateKernel(cpProgram, "kernel_ocl_path_trace", &ciErr);
- if(opencl_error(ciErr))
- return false;
-
ckFilmConvertByteKernel = clCreateKernel(cpProgram, "kernel_ocl_convert_to_byte", &ciErr);
if(opencl_error(ciErr))
return false;
@@ -752,7 +898,7 @@ public:
return true;
}
- ~OpenCLDevice()
+ ~OpenCLDeviceBase()
{
task_pool.stop();
@@ -765,12 +911,14 @@ public:
delete mt->second;
}
- if(ckPathTraceKernel)
- clReleaseKernel(ckPathTraceKernel);
if(ckFilmConvertByteKernel)
clReleaseKernel(ckFilmConvertByteKernel);
if(ckFilmConvertHalfFloatKernel)
clReleaseKernel(ckFilmConvertHalfFloatKernel);
+ if(ckShaderKernel)
+ clReleaseKernel(ckShaderKernel);
+ if(ckBakeKernel)
+ clReleaseKernel(ckBakeKernel);
if(cpProgram)
clReleaseProgram(cpProgram);
if(cqCommandQueue)
@@ -854,8 +1002,12 @@ public:
mem_copy_to(*i->second);
}
- void tex_alloc(const char *name, device_memory& mem, InterpolationType interpolation, bool periodic)
+ void tex_alloc(const char *name,
+ device_memory& mem,
+ InterpolationType /*interpolation*/,
+ bool /*periodic*/)
{
+ VLOG(1) << "Texture allocate: " << name << ", " << mem.memory_size() << " bytes.";
mem_alloc(mem, MEM_READ_ONLY);
mem_copy_to(mem);
assert(mem_map.find(name) == mem_map.end());
@@ -908,42 +1060,6 @@ public:
opencl_assert(clFlush(cqCommandQueue));
}
- void path_trace(RenderTile& rtile, int sample)
- {
- /* cast arguments to cl types */
- cl_mem d_data = CL_MEM_PTR(const_mem_map["__data"]->device_pointer);
- cl_mem d_buffer = CL_MEM_PTR(rtile.buffer);
- cl_mem d_rng_state = CL_MEM_PTR(rtile.rng_state);
- cl_int d_x = rtile.x;
- cl_int d_y = rtile.y;
- cl_int d_w = rtile.w;
- cl_int d_h = rtile.h;
- cl_int d_sample = sample;
- cl_int d_offset = rtile.offset;
- cl_int d_stride = rtile.stride;
-
- /* sample arguments */
- cl_uint narg = 0;
-
- opencl_assert(clSetKernelArg(ckPathTraceKernel, narg++, sizeof(d_data), (void*)&d_data));
- opencl_assert(clSetKernelArg(ckPathTraceKernel, narg++, sizeof(d_buffer), (void*)&d_buffer));
- opencl_assert(clSetKernelArg(ckPathTraceKernel, narg++, sizeof(d_rng_state), (void*)&d_rng_state));
-
-#define KERNEL_TEX(type, ttype, name) \
- set_kernel_arg_mem(ckPathTraceKernel, &narg, #name);
-#include "kernel_textures.h"
-
- opencl_assert(clSetKernelArg(ckPathTraceKernel, narg++, sizeof(d_sample), (void*)&d_sample));
- opencl_assert(clSetKernelArg(ckPathTraceKernel, narg++, sizeof(d_x), (void*)&d_x));
- opencl_assert(clSetKernelArg(ckPathTraceKernel, narg++, sizeof(d_y), (void*)&d_y));
- opencl_assert(clSetKernelArg(ckPathTraceKernel, narg++, sizeof(d_w), (void*)&d_w));
- opencl_assert(clSetKernelArg(ckPathTraceKernel, narg++, sizeof(d_h), (void*)&d_h));
- opencl_assert(clSetKernelArg(ckPathTraceKernel, narg++, sizeof(d_offset), (void*)&d_offset));
- opencl_assert(clSetKernelArg(ckPathTraceKernel, narg++, sizeof(d_stride), (void*)&d_stride));
-
- enqueue_kernel(ckPathTraceKernel, d_w, d_h);
- }
-
void set_kernel_arg_mem(cl_kernel kernel, cl_uint *narg, const char *name)
{
cl_mem ptr;
@@ -974,29 +1090,30 @@ public:
cl_int d_offset = task.offset;
cl_int d_stride = task.stride;
- /* sample arguments */
- cl_uint narg = 0;
-
cl_kernel ckFilmConvertKernel = (rgba_byte)? ckFilmConvertByteKernel: ckFilmConvertHalfFloatKernel;
- opencl_assert(clSetKernelArg(ckFilmConvertKernel, narg++, sizeof(d_data), (void*)&d_data));
- opencl_assert(clSetKernelArg(ckFilmConvertKernel, narg++, sizeof(d_rgba), (void*)&d_rgba));
- opencl_assert(clSetKernelArg(ckFilmConvertKernel, narg++, sizeof(d_buffer), (void*)&d_buffer));
+ cl_uint start_arg_index =
+ kernel_set_args(ckFilmConvertKernel,
+ 0,
+ d_data,
+ d_rgba,
+ d_buffer);
#define KERNEL_TEX(type, ttype, name) \
- set_kernel_arg_mem(ckFilmConvertKernel, &narg, #name);
+ set_kernel_arg_mem(ckFilmConvertKernel, &start_arg_index, #name);
#include "kernel_textures.h"
-
- opencl_assert(clSetKernelArg(ckFilmConvertKernel, narg++, sizeof(d_sample_scale), (void*)&d_sample_scale));
- opencl_assert(clSetKernelArg(ckFilmConvertKernel, narg++, sizeof(d_x), (void*)&d_x));
- opencl_assert(clSetKernelArg(ckFilmConvertKernel, narg++, sizeof(d_y), (void*)&d_y));
- opencl_assert(clSetKernelArg(ckFilmConvertKernel, narg++, sizeof(d_w), (void*)&d_w));
- opencl_assert(clSetKernelArg(ckFilmConvertKernel, narg++, sizeof(d_h), (void*)&d_h));
- opencl_assert(clSetKernelArg(ckFilmConvertKernel, narg++, sizeof(d_offset), (void*)&d_offset));
- opencl_assert(clSetKernelArg(ckFilmConvertKernel, narg++, sizeof(d_stride), (void*)&d_stride));
-
-
+#undef KERNEL_TEX
+
+ start_arg_index += kernel_set_args(ckFilmConvertKernel,
+ start_arg_index,
+ d_sample_scale,
+ d_x,
+ d_y,
+ d_w,
+ d_h,
+ d_offset,
+ d_stride);
enqueue_kernel(ckFilmConvertKernel, d_w, d_h);
}
@@ -1012,9 +1129,6 @@ public:
cl_int d_shader_w = task.shader_w;
cl_int d_offset = task.offset;
- /* sample arguments */
- cl_uint narg = 0;
-
cl_kernel kernel;
if(task.shader_eval_type >= SHADER_EVAL_BAKE)
@@ -1029,19 +1143,25 @@ public:
cl_int d_sample = sample;
- opencl_assert(clSetKernelArg(kernel, narg++, sizeof(d_data), (void*)&d_data));
- opencl_assert(clSetKernelArg(kernel, narg++, sizeof(d_input), (void*)&d_input));
- opencl_assert(clSetKernelArg(kernel, narg++, sizeof(d_output), (void*)&d_output));
+ cl_uint start_arg_index =
+ kernel_set_args(kernel,
+ 0,
+ d_data,
+ d_input,
+ d_output);
#define KERNEL_TEX(type, ttype, name) \
- set_kernel_arg_mem(kernel, &narg, #name);
+ set_kernel_arg_mem(kernel, &start_arg_index, #name);
#include "kernel_textures.h"
+#undef KERNEL_TEX
- opencl_assert(clSetKernelArg(kernel, narg++, sizeof(d_shader_eval_type), (void*)&d_shader_eval_type));
- opencl_assert(clSetKernelArg(kernel, narg++, sizeof(d_shader_x), (void*)&d_shader_x));
- opencl_assert(clSetKernelArg(kernel, narg++, sizeof(d_shader_w), (void*)&d_shader_w));
- opencl_assert(clSetKernelArg(kernel, narg++, sizeof(d_offset), (void*)&d_offset));
- opencl_assert(clSetKernelArg(kernel, narg++, sizeof(d_sample), (void*)&d_sample));
+ start_arg_index += kernel_set_args(kernel,
+ start_arg_index,
+ d_shader_eval_type,
+ d_shader_x,
+ d_shader_w,
+ d_offset,
+ d_sample);
enqueue_kernel(kernel, task.shader_w, 1);
@@ -1049,6 +1169,380 @@ public:
}
}
+ class OpenCLDeviceTask : public DeviceTask {
+ public:
+ OpenCLDeviceTask(OpenCLDeviceBase *device, DeviceTask& task)
+ : DeviceTask(task)
+ {
+ run = function_bind(&OpenCLDeviceBase::thread_run,
+ device,
+ this);
+ }
+ };
+
+ int get_split_task_count(DeviceTask& /*task*/)
+ {
+ return 1;
+ }
+
+ void task_add(DeviceTask& task)
+ {
+ task_pool.push(new OpenCLDeviceTask(this, task));
+ }
+
+ void task_wait()
+ {
+ task_pool.wait();
+ }
+
+ void task_cancel()
+ {
+ task_pool.cancel();
+ }
+
+ virtual void thread_run(DeviceTask * /*task*/) = 0;
+
+protected:
+
+ string kernel_build_options(const string *debug_src = NULL)
+ {
+ string build_options = " -cl-fast-relaxed-math ";
+
+ if(platform_name == "NVIDIA CUDA") {
+ build_options += "-D__KERNEL_OPENCL_NVIDIA__ "
+ "-cl-nv-maxrregcount=32 "
+ "-cl-nv-verbose ";
+
+ uint compute_capability_major, compute_capability_minor;
+ clGetDeviceInfo(cdDevice, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV,
+ sizeof(cl_uint), &compute_capability_major, NULL);
+ clGetDeviceInfo(cdDevice, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV,
+ sizeof(cl_uint), &compute_capability_minor, NULL);
+
+ build_options += string_printf("-D__COMPUTE_CAPABILITY__=%u ",
+ compute_capability_major * 100 +
+ compute_capability_minor * 10);
+ }
+
+ else if(platform_name == "Apple")
+ build_options += "-D__KERNEL_OPENCL_APPLE__ ";
+
+ else if(platform_name == "AMD Accelerated Parallel Processing")
+ build_options += "-D__KERNEL_OPENCL_AMD__ ";
+
+ else if(platform_name == "Intel(R) OpenCL") {
+ build_options += "-D__KERNEL_OPENCL_INTEL_CPU__ ";
+
+ /* Options for gdb source level kernel debugging.
+ * this segfaults on linux currently.
+ */
+ if(opencl_kernel_use_debug() && debug_src)
+ build_options += "-g -s \"" + *debug_src + "\" ";
+ }
+
+ if(opencl_kernel_use_debug())
+ build_options += "-D__KERNEL_OPENCL_DEBUG__ ";
+
+#ifdef WITH_CYCLES_DEBUG
+ build_options += "-D__KERNEL_DEBUG__ ";
+#endif
+
+ return build_options;
+ }
+
+ class ArgumentWrapper {
+ public:
+ ArgumentWrapper() : size(0), pointer(NULL) {}
+ template <typename T>
+ ArgumentWrapper(T& argument) : size(sizeof(argument)),
+ pointer(&argument) { }
+ size_t size;
+ void *pointer;
+ };
+
+ /* TODO(sergey): In the future we can use variadic templates, once
+ * C++0x is allowed. Should allow to clean this up a bit.
+ */
+ int kernel_set_args(cl_kernel kernel,
+ int start_argument_index,
+ const ArgumentWrapper& arg1 = ArgumentWrapper(),
+ const ArgumentWrapper& arg2 = ArgumentWrapper(),
+ const ArgumentWrapper& arg3 = ArgumentWrapper(),
+ const ArgumentWrapper& arg4 = ArgumentWrapper(),
+ const ArgumentWrapper& arg5 = ArgumentWrapper(),
+ const ArgumentWrapper& arg6 = ArgumentWrapper(),
+ const ArgumentWrapper& arg7 = ArgumentWrapper(),
+ const ArgumentWrapper& arg8 = ArgumentWrapper(),
+ const ArgumentWrapper& arg9 = ArgumentWrapper(),
+ const ArgumentWrapper& arg10 = ArgumentWrapper(),
+ const ArgumentWrapper& arg11 = ArgumentWrapper(),
+ const ArgumentWrapper& arg12 = ArgumentWrapper(),
+ const ArgumentWrapper& arg13 = ArgumentWrapper(),
+ const ArgumentWrapper& arg14 = ArgumentWrapper(),
+ const ArgumentWrapper& arg15 = ArgumentWrapper(),
+ const ArgumentWrapper& arg16 = ArgumentWrapper(),
+ const ArgumentWrapper& arg17 = ArgumentWrapper(),
+ const ArgumentWrapper& arg18 = ArgumentWrapper(),
+ const ArgumentWrapper& arg19 = ArgumentWrapper(),
+ const ArgumentWrapper& arg20 = ArgumentWrapper(),
+ const ArgumentWrapper& arg21 = ArgumentWrapper(),
+ const ArgumentWrapper& arg22 = ArgumentWrapper(),
+ const ArgumentWrapper& arg23 = ArgumentWrapper(),
+ const ArgumentWrapper& arg24 = ArgumentWrapper(),
+ const ArgumentWrapper& arg25 = ArgumentWrapper(),
+ const ArgumentWrapper& arg26 = ArgumentWrapper(),
+ const ArgumentWrapper& arg27 = ArgumentWrapper(),
+ const ArgumentWrapper& arg28 = ArgumentWrapper(),
+ const ArgumentWrapper& arg29 = ArgumentWrapper(),
+ const ArgumentWrapper& arg30 = ArgumentWrapper(),
+ const ArgumentWrapper& arg31 = ArgumentWrapper(),
+ const ArgumentWrapper& arg32 = ArgumentWrapper(),
+ const ArgumentWrapper& arg33 = ArgumentWrapper())
+ {
+ int current_arg_index = 0;
+#define FAKE_VARARG_HANDLE_ARG(arg) \
+ do { \
+ if(arg.pointer != NULL) { \
+ opencl_assert(clSetKernelArg( \
+ kernel, \
+ start_argument_index + current_arg_index, \
+ arg.size, arg.pointer)); \
+ ++current_arg_index; \
+ } \
+ else { \
+ return current_arg_index; \
+ } \
+ } while(false)
+ FAKE_VARARG_HANDLE_ARG(arg1);
+ FAKE_VARARG_HANDLE_ARG(arg2);
+ FAKE_VARARG_HANDLE_ARG(arg3);
+ FAKE_VARARG_HANDLE_ARG(arg4);
+ FAKE_VARARG_HANDLE_ARG(arg5);
+ FAKE_VARARG_HANDLE_ARG(arg6);
+ FAKE_VARARG_HANDLE_ARG(arg7);
+ FAKE_VARARG_HANDLE_ARG(arg8);
+ FAKE_VARARG_HANDLE_ARG(arg9);
+ FAKE_VARARG_HANDLE_ARG(arg10);
+ FAKE_VARARG_HANDLE_ARG(arg11);
+ FAKE_VARARG_HANDLE_ARG(arg12);
+ FAKE_VARARG_HANDLE_ARG(arg13);
+ FAKE_VARARG_HANDLE_ARG(arg14);
+ FAKE_VARARG_HANDLE_ARG(arg15);
+ FAKE_VARARG_HANDLE_ARG(arg16);
+ FAKE_VARARG_HANDLE_ARG(arg17);
+ FAKE_VARARG_HANDLE_ARG(arg18);
+ FAKE_VARARG_HANDLE_ARG(arg19);
+ FAKE_VARARG_HANDLE_ARG(arg20);
+ FAKE_VARARG_HANDLE_ARG(arg21);
+ FAKE_VARARG_HANDLE_ARG(arg22);
+ FAKE_VARARG_HANDLE_ARG(arg23);
+ FAKE_VARARG_HANDLE_ARG(arg24);
+ FAKE_VARARG_HANDLE_ARG(arg25);
+ FAKE_VARARG_HANDLE_ARG(arg26);
+ FAKE_VARARG_HANDLE_ARG(arg27);
+ FAKE_VARARG_HANDLE_ARG(arg28);
+ FAKE_VARARG_HANDLE_ARG(arg29);
+ FAKE_VARARG_HANDLE_ARG(arg30);
+ FAKE_VARARG_HANDLE_ARG(arg31);
+ FAKE_VARARG_HANDLE_ARG(arg32);
+ FAKE_VARARG_HANDLE_ARG(arg33);
+#undef FAKE_VARARG_HANDLE_ARG
+ return current_arg_index;
+ }
+
+ inline void release_kernel_safe(cl_kernel kernel)
+ {
+ if(kernel) {
+ clReleaseKernel(kernel);
+ }
+ }
+
+ inline void release_mem_object_safe(cl_mem mem)
+ {
+ if(mem != NULL) {
+ clReleaseMemObject(mem);
+ }
+ }
+
+ inline void release_program_safe(cl_program program)
+ {
+ if(program) {
+ clReleaseProgram(program);
+ }
+ }
+
+ string build_options_from_requested_features(
+ const DeviceRequestedFeatures& requested_features)
+ {
+ string build_options = "";
+ if(requested_features.experimental) {
+ build_options += " -D__KERNEL_EXPERIMENTAL__";
+ }
+ build_options += " -D__NODES_MAX_GROUP__=" +
+ string_printf("%d", requested_features.max_nodes_group);
+ build_options += " -D__NODES_FEATURES__=" +
+ string_printf("%d", requested_features.nodes_features);
+ build_options += string_printf(" -D__MAX_CLOSURE__=%d",
+ requested_features.max_closure);
+ if(!requested_features.use_hair) {
+ build_options += " -D__NO_HAIR__";
+ }
+ if(!requested_features.use_object_motion) {
+ build_options += " -D__NO_OBJECT_MOTION__";
+ }
+ if(!requested_features.use_camera_motion) {
+ build_options += " -D__NO_CAMERA_MOTION__";
+ }
+ return build_options;
+ }
+};
+
+class OpenCLDeviceMegaKernel : public OpenCLDeviceBase
+{
+public:
+ cl_kernel ckPathTraceKernel;
+ cl_program path_trace_program;
+
+ OpenCLDeviceMegaKernel(DeviceInfo& info, Stats &stats, bool background_)
+ : OpenCLDeviceBase(info, stats, background_)
+ {
+ ckPathTraceKernel = NULL;
+ path_trace_program = NULL;
+ }
+
+ bool load_kernels(const DeviceRequestedFeatures& requested_features)
+ {
+ /* Get Shader, bake and film convert kernels.
+ * It'll also do verification of OpenCL actually initialized.
+ */
+ if(!OpenCLDeviceBase::load_kernels(requested_features)) {
+ return false;
+ }
+
+ /* Try to use cached kernel. */
+ thread_scoped_lock cache_locker;
+ path_trace_program = OpenCLCache::get_program(cpPlatform,
+ cdDevice,
+ OpenCLCache::OCL_DEV_MEGAKERNEL_PROGRAM,
+ cache_locker);
+
+ if(!path_trace_program) {
+ /* Verify we have right opencl version. */
+ if(!opencl_version_check())
+ return false;
+
+ /* Calculate md5 hash to detect changes. */
+ string kernel_path = path_get("kernel");
+ string kernel_md5 = path_files_md5_hash(kernel_path);
+ string custom_kernel_build_options = "-D__COMPILE_ONLY_MEGAKERNEL__ ";
+ string device_md5 = device_md5_hash(custom_kernel_build_options);
+
+ /* Path to cached binary. */
+ string clbin = string_printf("cycles_kernel_%s_%s.clbin",
+ device_md5.c_str(),
+ kernel_md5.c_str());
+ clbin = path_user_get(path_join("cache", clbin));
+
+ /* Path to preprocessed source for debugging. */
+ string clsrc, *debug_src = NULL;
+ if(opencl_kernel_use_debug()) {
+ clsrc = string_printf("cycles_kernel_%s_%s.cl",
+ device_md5.c_str(),
+ kernel_md5.c_str());
+ clsrc = path_user_get(path_join("cache", clsrc));
+ debug_src = &clsrc;
+ }
+
+ /* If exists already, try use it. */
+ if(path_exists(clbin) && load_binary(kernel_path,
+ clbin,
+ custom_kernel_build_options,
+ &path_trace_program,
+ debug_src)) {
+ /* Kernel loaded from binary, nothing to do. */
+ }
+ else {
+ string init_kernel_source = "#include \"kernels/opencl/kernel.cl\" // " +
+ kernel_md5 + "\n";
+ /* If does not exist or loading binary failed, compile kernel. */
+ if(!compile_kernel(kernel_path,
+ init_kernel_source,
+ custom_kernel_build_options,
+ &path_trace_program,
+ debug_src))
+ {
+ return false;
+ }
+ /* Save binary for reuse. */
+ if(!save_binary(&path_trace_program, clbin)) {
+ return false;
+ }
+ }
+ /* Cache the program. */
+ OpenCLCache::store_program(cpPlatform,
+ cdDevice,
+ path_trace_program,
+ OpenCLCache::OCL_DEV_MEGAKERNEL_PROGRAM,
+ cache_locker);
+ }
+
+ /* Find kernels. */
+ ckPathTraceKernel = clCreateKernel(path_trace_program,
+ "kernel_ocl_path_trace",
+ &ciErr);
+ if(opencl_error(ciErr))
+ return false;
+ return true;
+ }
+
+ ~OpenCLDeviceMegaKernel()
+ {
+ task_pool.stop();
+ release_kernel_safe(ckPathTraceKernel);
+ release_program_safe(path_trace_program);
+ }
+
+ void path_trace(RenderTile& rtile, int sample)
+ {
+ /* Cast arguments to cl types. */
+ cl_mem d_data = CL_MEM_PTR(const_mem_map["__data"]->device_pointer);
+ cl_mem d_buffer = CL_MEM_PTR(rtile.buffer);
+ cl_mem d_rng_state = CL_MEM_PTR(rtile.rng_state);
+ cl_int d_x = rtile.x;
+ cl_int d_y = rtile.y;
+ cl_int d_w = rtile.w;
+ cl_int d_h = rtile.h;
+ cl_int d_offset = rtile.offset;
+ cl_int d_stride = rtile.stride;
+
+ /* Sample arguments. */
+ cl_int d_sample = sample;
+
+ cl_uint start_arg_index =
+ kernel_set_args(ckPathTraceKernel,
+ 0,
+ d_data,
+ d_buffer,
+ d_rng_state);
+
+#define KERNEL_TEX(type, ttype, name) \
+ set_kernel_arg_mem(ckPathTraceKernel, &start_arg_index, #name);
+#include "kernel_textures.h"
+#undef KERNEL_TEX
+
+ start_arg_index += kernel_set_args(ckPathTraceKernel,
+ start_arg_index,
+ d_sample,
+ d_x,
+ d_y,
+ d_w,
+ d_h,
+ d_offset,
+ d_stride);
+
+ enqueue_kernel(ckPathTraceKernel, d_w, d_h);
+ }
+
void thread_run(DeviceTask *task)
{
if(task->type == DeviceTask::FILM_CONVERT) {
@@ -1059,8 +1553,7 @@ public:
}
else if(task->type == DeviceTask::PATH_TRACE) {
RenderTile tile;
-
- /* keep rendering tiles until done */
+ /* Keep rendering tiles until done. */
while(task->acquire_tile(this, tile)) {
int start_sample = tile.start_sample;
int end_sample = tile.start_sample + tile.num_samples;
@@ -1078,127 +1571,1947 @@ public:
task->update_progress(&tile);
}
+ /* Complete kernel execution before release tile */
+ /* This helps in multi-device render;
+ * The device that reaches the critical-section function
+ * release_tile waits (stalling other devices from entering
+ * release_tile) for all kernels to complete. If device1 (a
+ * slow-render device) reaches release_tile first then it would
+ * stall device2 (a fast-render device) from proceeding to render
+ * next tile.
+ */
+ clFinish(cqCommandQueue);
+
task->release_tile(tile);
}
}
}
+};
- class OpenCLDeviceTask : public DeviceTask {
- public:
- OpenCLDeviceTask(OpenCLDevice *device, DeviceTask& task)
- : DeviceTask(task)
+/* TODO(sergey): This is to keep tile split on OpenCL level working
+ * for now, since without this view-port render does not work as it
+ * should.
+ *
+ * Ideally it'll be done on the higher level, but we need to get ready
+ * for merge rather soon, so let's keep split logic private here in
+ * the file.
+ */
+class SplitRenderTile : public RenderTile {
+public:
+ SplitRenderTile()
+ : RenderTile(),
+ buffer_offset_x(0),
+ buffer_offset_y(0),
+ rng_state_offset_x(0),
+ rng_state_offset_y(0),
+ buffer_rng_state_stride(0) {}
+
+ explicit SplitRenderTile(RenderTile& tile)
+ : RenderTile(),
+ buffer_offset_x(0),
+ buffer_offset_y(0),
+ rng_state_offset_x(0),
+ rng_state_offset_y(0),
+ buffer_rng_state_stride(0)
+ {
+ x = tile.x;
+ y = tile.y;
+ w = tile.w;
+ h = tile.h;
+ start_sample = tile.start_sample;
+ num_samples = tile.num_samples;
+ sample = tile.sample;
+ resolution = tile.resolution;
+ offset = tile.offset;
+ stride = tile.stride;
+ buffer = tile.buffer;
+ rng_state = tile.rng_state;
+ buffers = tile.buffers;
+ }
+
+ /* Split kernel is device global memory constrained;
+ * hence split kernel cant render big tile size's in
+ * one go. If the user sets a big tile size (big tile size
+ * is a term relative to the available device global memory),
+ * we split the tile further and then call path_trace on
+ * each of those split tiles. The following variables declared,
+ * assist in achieving that purpose
+ */
+ int buffer_offset_x;
+ int buffer_offset_y;
+ int rng_state_offset_x;
+ int rng_state_offset_y;
+ int buffer_rng_state_stride;
+};
+
+/* OpenCLDeviceSplitKernel's declaration/definition. */
+class OpenCLDeviceSplitKernel : public OpenCLDeviceBase
+{
+public:
+ /* Kernel declaration. */
+ cl_kernel ckPathTraceKernel_data_init;
+ cl_kernel ckPathTraceKernel_scene_intersect;
+ cl_kernel ckPathTraceKernel_lamp_emission;
+ cl_kernel ckPathTraceKernel_queue_enqueue;
+ cl_kernel ckPathTraceKernel_background_buffer_update;
+ cl_kernel ckPathTraceKernel_shader_eval;
+ cl_kernel ckPathTraceKernel_holdout_emission_blurring_pathtermination_ao;
+ cl_kernel ckPathTraceKernel_direct_lighting;
+ cl_kernel ckPathTraceKernel_shadow_blocked;
+ cl_kernel ckPathTraceKernel_next_iteration_setup;
+ cl_kernel ckPathTraceKernel_sum_all_radiance;
+
+ /* cl_program declaration. */
+ cl_program data_init_program;
+ cl_program scene_intersect_program;
+ cl_program lamp_emission_program;
+ cl_program queue_enqueue_program;
+ cl_program background_buffer_update_program;
+ cl_program shader_eval_program;
+ cl_program holdout_emission_blurring_pathtermination_ao_program;
+ cl_program direct_lighting_program;
+ cl_program shadow_blocked_program;
+ cl_program next_iteration_setup_program;
+ cl_program sum_all_radiance_program;
+
+ /* Global memory variables [porting]; These memory is used for
+ * co-operation between different kernels; Data written by one
+ * kernel will be available to another kernel via this global
+ * memory.
+ */
+ cl_mem rng_coop;
+ cl_mem throughput_coop;
+ cl_mem L_transparent_coop;
+ cl_mem PathRadiance_coop;
+ cl_mem Ray_coop;
+ cl_mem PathState_coop;
+ cl_mem Intersection_coop;
+ cl_mem kgbuffer; /* KernelGlobals buffer. */
+
+ /* Global buffers for ShaderData. */
+ cl_mem sd; /* ShaderData used in the main path-iteration loop. */
+ cl_mem sd_DL_shadow; /* ShaderData used in Direct Lighting and
+ * shadow_blocked kernel.
+ */
+
+ /* Global buffers of each member of ShaderData. */
+ cl_mem P_sd;
+ cl_mem P_sd_DL_shadow;
+ cl_mem N_sd;
+ cl_mem N_sd_DL_shadow;
+ cl_mem Ng_sd;
+ cl_mem Ng_sd_DL_shadow;
+ cl_mem I_sd;
+ cl_mem I_sd_DL_shadow;
+ cl_mem shader_sd;
+ cl_mem shader_sd_DL_shadow;
+ cl_mem flag_sd;
+ cl_mem flag_sd_DL_shadow;
+ cl_mem prim_sd;
+ cl_mem prim_sd_DL_shadow;
+ cl_mem type_sd;
+ cl_mem type_sd_DL_shadow;
+ cl_mem u_sd;
+ cl_mem u_sd_DL_shadow;
+ cl_mem v_sd;
+ cl_mem v_sd_DL_shadow;
+ cl_mem object_sd;
+ cl_mem object_sd_DL_shadow;
+ cl_mem time_sd;
+ cl_mem time_sd_DL_shadow;
+ cl_mem ray_length_sd;
+ cl_mem ray_length_sd_DL_shadow;
+ cl_mem ray_depth_sd;
+ cl_mem ray_depth_sd_DL_shadow;
+ cl_mem transparent_depth_sd;
+ cl_mem transparent_depth_sd_DL_shadow;
+
+ /* Ray differentials. */
+ cl_mem dP_sd, dI_sd;
+ cl_mem dP_sd_DL_shadow, dI_sd_DL_shadow;
+ cl_mem du_sd, dv_sd;
+ cl_mem du_sd_DL_shadow, dv_sd_DL_shadow;
+
+ /* Dp/Du */
+ cl_mem dPdu_sd, dPdv_sd;
+ cl_mem dPdu_sd_DL_shadow, dPdv_sd_DL_shadow;
+
+ /* Object motion. */
+ cl_mem ob_tfm_sd, ob_itfm_sd;
+ cl_mem ob_tfm_sd_DL_shadow, ob_itfm_sd_DL_shadow;
+
+ cl_mem closure_sd;
+ cl_mem closure_sd_DL_shadow;
+ cl_mem num_closure_sd;
+ cl_mem num_closure_sd_DL_shadow;
+ cl_mem randb_closure_sd;
+ cl_mem randb_closure_sd_DL_shadow;
+ cl_mem ray_P_sd;
+ cl_mem ray_P_sd_DL_shadow;
+ cl_mem ray_dP_sd;
+ cl_mem ray_dP_sd_DL_shadow;
+
+ /* Global memory required for shadow blocked and accum_radiance. */
+ cl_mem BSDFEval_coop;
+ cl_mem ISLamp_coop;
+ cl_mem LightRay_coop;
+ cl_mem AOAlpha_coop;
+ cl_mem AOBSDF_coop;
+ cl_mem AOLightRay_coop;
+ cl_mem Intersection_coop_AO;
+ cl_mem Intersection_coop_DL;
+
+#ifdef WITH_CYCLES_DEBUG
+ /* DebugData memory */
+ cl_mem debugdata_coop;
+#endif
+
+ /* Global state array that tracks ray state. */
+ cl_mem ray_state;
+
+ /* Per sample buffers. */
+ cl_mem per_sample_output_buffers;
+
+ /* Denotes which sample each ray is being processed for. */
+ cl_mem work_array;
+
+ /* Queue */
+ cl_mem Queue_data; /* Array of size queuesize * num_queues * sizeof(int). */
+ cl_mem Queue_index; /* Array of size num_queues * sizeof(int);
+ * Tracks the size of each queue.
+ */
+
+ /* Flag to make sceneintersect and lampemission kernel use queues. */
+ cl_mem use_queues_flag;
+
+ /* Amount of memory in output buffer associated with one pixel/thread. */
+ size_t per_thread_output_buffer_size;
+
+ /* Total allocatable available device memory. */
+ size_t total_allocatable_memory;
+
+ /* host version of ray_state; Used in checking host path-iteration
+ * termination.
+ */
+ char *hostRayStateArray;
+
+ /* Number of path-iterations to be done in one shot. */
+ unsigned int PathIteration_times;
+
+#ifdef __WORK_STEALING__
+ /* Work pool with respect to each work group. */
+ cl_mem work_pool_wgs;
+
+ /* Denotes the maximum work groups possible w.r.t. current tile size. */
+ unsigned int max_work_groups;
+#endif
+
+ /* clos_max value for which the kernels have been loaded currently. */
+ int current_max_closure;
+
+ /* Marked True in constructor and marked false at the end of path_trace(). */
+ bool first_tile;
+
+ OpenCLDeviceSplitKernel(DeviceInfo& info, Stats &stats, bool background_)
+ : OpenCLDeviceBase(info, stats, background_)
+ {
+ background = background_;
+
+ /* Initialize kernels. */
+ ckPathTraceKernel_data_init = NULL;
+ ckPathTraceKernel_scene_intersect = NULL;
+ ckPathTraceKernel_lamp_emission = NULL;
+ ckPathTraceKernel_background_buffer_update = NULL;
+ ckPathTraceKernel_shader_eval = NULL;
+ ckPathTraceKernel_holdout_emission_blurring_pathtermination_ao = NULL;
+ ckPathTraceKernel_direct_lighting = NULL;
+ ckPathTraceKernel_shadow_blocked = NULL;
+ ckPathTraceKernel_next_iteration_setup = NULL;
+ ckPathTraceKernel_sum_all_radiance = NULL;
+ ckPathTraceKernel_queue_enqueue = NULL;
+
+ /* Initialize program. */
+ data_init_program = NULL;
+ scene_intersect_program = NULL;
+ lamp_emission_program = NULL;
+ queue_enqueue_program = NULL;
+ background_buffer_update_program = NULL;
+ shader_eval_program = NULL;
+ holdout_emission_blurring_pathtermination_ao_program = NULL;
+ direct_lighting_program = NULL;
+ shadow_blocked_program = NULL;
+ next_iteration_setup_program = NULL;
+ sum_all_radiance_program = NULL;
+
+ /* Initialize cl_mem variables. */
+ kgbuffer = NULL;
+ sd = NULL;
+ sd_DL_shadow = NULL;
+
+ P_sd = NULL;
+ P_sd_DL_shadow = NULL;
+ N_sd = NULL;
+ N_sd_DL_shadow = NULL;
+ Ng_sd = NULL;
+ Ng_sd_DL_shadow = NULL;
+ I_sd = NULL;
+ I_sd_DL_shadow = NULL;
+ shader_sd = NULL;
+ shader_sd_DL_shadow = NULL;
+ flag_sd = NULL;
+ flag_sd_DL_shadow = NULL;
+ prim_sd = NULL;
+ prim_sd_DL_shadow = NULL;
+ type_sd = NULL;
+ type_sd_DL_shadow = NULL;
+ u_sd = NULL;
+ u_sd_DL_shadow = NULL;
+ v_sd = NULL;
+ v_sd_DL_shadow = NULL;
+ object_sd = NULL;
+ object_sd_DL_shadow = NULL;
+ time_sd = NULL;
+ time_sd_DL_shadow = NULL;
+ ray_length_sd = NULL;
+ ray_length_sd_DL_shadow = NULL;
+ ray_depth_sd = NULL;
+ ray_depth_sd_DL_shadow = NULL;
+ transparent_depth_sd = NULL;
+ transparent_depth_sd_DL_shadow = NULL;
+
+ /* Ray differentials. */
+ dP_sd = NULL;
+ dI_sd = NULL;
+ dP_sd_DL_shadow = NULL;
+ dI_sd_DL_shadow = NULL;
+ du_sd = NULL;
+ dv_sd = NULL;
+ du_sd_DL_shadow = NULL;
+ dv_sd_DL_shadow = NULL;
+
+ /* Dp/Du */
+ dPdu_sd = NULL;
+ dPdv_sd = NULL;
+ dPdu_sd_DL_shadow = NULL;
+ dPdv_sd_DL_shadow = NULL;
+
+ /* Object motion. */
+ ob_tfm_sd = NULL;
+ ob_itfm_sd = NULL;
+ ob_tfm_sd_DL_shadow = NULL;
+ ob_itfm_sd_DL_shadow = NULL;
+
+ closure_sd = NULL;
+ closure_sd_DL_shadow = NULL;
+ num_closure_sd = NULL;
+ num_closure_sd_DL_shadow = NULL;
+ randb_closure_sd = NULL;
+ randb_closure_sd_DL_shadow = NULL;
+ ray_P_sd = NULL;
+ ray_P_sd_DL_shadow = NULL;
+ ray_dP_sd = NULL;
+ ray_dP_sd_DL_shadow = NULL;
+
+ rng_coop = NULL;
+ throughput_coop = NULL;
+ L_transparent_coop = NULL;
+ PathRadiance_coop = NULL;
+ Ray_coop = NULL;
+ PathState_coop = NULL;
+ Intersection_coop = NULL;
+ ray_state = NULL;
+
+ AOAlpha_coop = NULL;
+ AOBSDF_coop = NULL;
+ AOLightRay_coop = NULL;
+ BSDFEval_coop = NULL;
+ ISLamp_coop = NULL;
+ LightRay_coop = NULL;
+ Intersection_coop_AO = NULL;
+ Intersection_coop_DL = NULL;
+
+#ifdef WITH_CYCLES_DEBUG
+ debugdata_coop = NULL;
+#endif
+
+ work_array = NULL;
+
+ /* Queue. */
+ Queue_data = NULL;
+ Queue_index = NULL;
+ use_queues_flag = NULL;
+
+ per_sample_output_buffers = NULL;
+
+ per_thread_output_buffer_size = 0;
+ hostRayStateArray = NULL;
+ PathIteration_times = PATH_ITER_INC_FACTOR;
+#ifdef __WORK_STEALING__
+ work_pool_wgs = NULL;
+ max_work_groups = 0;
+#endif
+ current_max_closure = -1;
+ first_tile = true;
+
+ /* Get device's maximum memory that can be allocated. */
+ ciErr = clGetDeviceInfo(cdDevice,
+ CL_DEVICE_MAX_MEM_ALLOC_SIZE,
+ sizeof(size_t),
+ &total_allocatable_memory,
+ NULL);
+ assert(ciErr == CL_SUCCESS);
+ if(platform_name == "AMD Accelerated Parallel Processing") {
+ /* This value is tweak-able; AMD platform does not seem to
+ * give maximum performance when all of CL_DEVICE_MAX_MEM_ALLOC_SIZE
+ * is considered for further computation.
+ */
+ total_allocatable_memory /= 2;
+ }
+ }
+
+ /* TODO(sergey): Seems really close to load_kernel(),
+ * could it be de-duplicated?
+ */
+ bool load_split_kernel(string kernel_path,
+ string kernel_init_source,
+ string clbin,
+ string custom_kernel_build_options,
+ cl_program *program,
+ const string *debug_src = NULL)
+ {
+ if(!opencl_version_check())
+ return false;
+
+ clbin = path_user_get(path_join("cache", clbin));
+
+ /* If exists already, try use it. */
+ if(path_exists(clbin) && load_binary(kernel_path,
+ clbin,
+ custom_kernel_build_options,
+ program,
+ debug_src)) {
+ /* Kernel loaded from binary. */
+ }
+ else {
+ /* If does not exist or loading binary failed, compile kernel. */
+ if(!compile_kernel(kernel_path,
+ kernel_init_source,
+ custom_kernel_build_options,
+ program,
+ debug_src))
+ {
+ return false;
+ }
+ /* Save binary for reuse. */
+ if(!save_binary(program, clbin)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /* Split kernel utility functions. */
+ size_t get_tex_size(const char *tex_name)
+ {
+ cl_mem ptr;
+ size_t ret_size = 0;
+ MemMap::iterator i = mem_map.find(tex_name);
+ if(i != mem_map.end()) {
+ ptr = CL_MEM_PTR(i->second);
+ ciErr = clGetMemObjectInfo(ptr,
+ CL_MEM_SIZE,
+ sizeof(ret_size),
+ &ret_size,
+ NULL);
+ assert(ciErr == CL_SUCCESS);
+ }
+ return ret_size;
+ }
+
+ size_t get_shader_closure_size(int max_closure)
+ {
+ return (sizeof(ShaderClosure) * max_closure);
+ }
+
+ size_t get_shader_data_size(size_t shader_closure_size)
+ {
+ /* ShaderData size without accounting for ShaderClosure array. */
+ size_t shader_data_size =
+ sizeof(ShaderData) - (sizeof(ShaderClosure) * MAX_CLOSURE);
+ return (shader_data_size + shader_closure_size);
+ }
+
+ /* Returns size of KernelGlobals structure associated with OpenCL. */
+ size_t get_KernelGlobals_size()
+ {
+ /* Copy dummy KernelGlobals related to OpenCL from kernel_globals.h to
+ * fetch its size.
+ */
+ typedef struct KernelGlobals {
+ ccl_constant KernelData *data;
+#define KERNEL_TEX(type, ttype, name) \
+ ccl_global type *name;
+#include "kernel_textures.h"
+#undef KERNEL_TEX
+ } KernelGlobals;
+
+ return sizeof(KernelGlobals);
+ }
+
+ /* Returns size of Structure of arrays implementation of. */
+ size_t get_shaderdata_soa_size()
+ {
+ size_t shader_soa_size = 0;
+
+#define SD_VAR(type, what) shader_soa_size += sizeof(void *);
+#define SD_CLOSURE_VAR(type, what, max_closure) shader_soa_size += sizeof(void *);
+ #include "kernel_shaderdata_vars.h"
+#undef SD_VAR
+#undef SD_CLOSURE_VAR
+
+ return shader_soa_size;
+ }
+
+ bool load_kernels(const DeviceRequestedFeatures& requested_features)
+ {
+ /* Get Shader, bake and film_convert kernels.
+ * It'll also do verification of OpenCL actually initialized.
+ */
+ if(!OpenCLDeviceBase::load_kernels(requested_features)) {
+ return false;
+ }
+
+ string kernel_path = path_get("kernel");
+ string kernel_md5 = path_files_md5_hash(kernel_path);
+ string device_md5;
+ string kernel_init_source;
+ string clbin;
+ string clsrc, *debug_src = NULL;
+
+ string build_options = "-D__SPLIT_KERNEL__";
+#ifdef __WORK_STEALING__
+ build_options += " -D__WORK_STEALING__";
+#endif
+ build_options += build_options_from_requested_features(requested_features);
+
+ /* Set compute device build option. */
+ cl_device_type device_type;
+ ciErr = clGetDeviceInfo(cdDevice,
+ CL_DEVICE_TYPE,
+ sizeof(cl_device_type),
+ &device_type,
+ NULL);
+ assert(ciErr == CL_SUCCESS);
+ if(device_type == CL_DEVICE_TYPE_GPU) {
+ build_options += " -D__COMPUTE_DEVICE_GPU__";
+ }
+
+#define GLUE(a, b) a ## b
+#define LOAD_KERNEL(name) \
+ do { \
+ kernel_init_source = "#include \"kernels/opencl/kernel_" #name ".cl\" // " + \
+ kernel_md5 + "\n"; \
+ device_md5 = device_md5_hash(build_options); \
+ clbin = string_printf("cycles_kernel_%s_%s_" #name ".clbin", \
+ device_md5.c_str(), kernel_md5.c_str()); \
+ if(opencl_kernel_use_debug()) { \
+ clsrc = string_printf("cycles_kernel_%s_%s_" #name ".cl", \
+ device_md5.c_str(), kernel_md5.c_str()); \
+ clsrc = path_user_get(path_join("cache", clsrc)); \
+ debug_src = &clsrc; \
+ } \
+ if(!load_split_kernel(kernel_path, kernel_init_source, clbin, \
+ build_options, \
+ &GLUE(name, _program), \
+ debug_src)) \
+ { \
+ fprintf(stderr, "Faled to compile %s\n", #name); \
+ return false; \
+ } \
+ } while(false)
+
+ LOAD_KERNEL(data_init);
+ LOAD_KERNEL(scene_intersect);
+ LOAD_KERNEL(lamp_emission);
+ LOAD_KERNEL(queue_enqueue);
+ LOAD_KERNEL(background_buffer_update);
+ LOAD_KERNEL(shader_eval);
+ LOAD_KERNEL(holdout_emission_blurring_pathtermination_ao);
+ LOAD_KERNEL(direct_lighting);
+ LOAD_KERNEL(shadow_blocked);
+ LOAD_KERNEL(next_iteration_setup);
+ LOAD_KERNEL(sum_all_radiance);
+
+#undef LOAD_KERNEL
+
+#define FIND_KERNEL(name) \
+ do { \
+ GLUE(ckPathTraceKernel_, name) = \
+ clCreateKernel(GLUE(name, _program), \
+ "kernel_ocl_path_trace_" #name, &ciErr); \
+ if(opencl_error(ciErr)) { \
+ fprintf(stderr,"Missing kernel kernel_ocl_path_trace_%s\n", #name); \
+ return false; \
+ } \
+ } while(false)
+
+ FIND_KERNEL(data_init);
+ FIND_KERNEL(scene_intersect);
+ FIND_KERNEL(lamp_emission);
+ FIND_KERNEL(queue_enqueue);
+ FIND_KERNEL(background_buffer_update);
+ FIND_KERNEL(shader_eval);
+ FIND_KERNEL(holdout_emission_blurring_pathtermination_ao);
+ FIND_KERNEL(direct_lighting);
+ FIND_KERNEL(shadow_blocked);
+ FIND_KERNEL(next_iteration_setup);
+ FIND_KERNEL(sum_all_radiance);
+#undef FIND_KERNEL
+#undef GLUE
+
+ current_max_closure = requested_features.max_closure;
+
+ return true;
+ }
+
+ ~OpenCLDeviceSplitKernel()
+ {
+ task_pool.stop();
+
+ /* Release kernels */
+ release_kernel_safe(ckPathTraceKernel_data_init);
+ release_kernel_safe(ckPathTraceKernel_scene_intersect);
+ release_kernel_safe(ckPathTraceKernel_lamp_emission);
+ release_kernel_safe(ckPathTraceKernel_queue_enqueue);
+ release_kernel_safe(ckPathTraceKernel_background_buffer_update);
+ release_kernel_safe(ckPathTraceKernel_shader_eval);
+ release_kernel_safe(ckPathTraceKernel_holdout_emission_blurring_pathtermination_ao);
+ release_kernel_safe(ckPathTraceKernel_direct_lighting);
+ release_kernel_safe(ckPathTraceKernel_shadow_blocked);
+ release_kernel_safe(ckPathTraceKernel_next_iteration_setup);
+ release_kernel_safe(ckPathTraceKernel_sum_all_radiance);
+
+ /* Release global memory */
+ release_mem_object_safe(P_sd);
+ release_mem_object_safe(P_sd_DL_shadow);
+ release_mem_object_safe(N_sd);
+ release_mem_object_safe(N_sd_DL_shadow);
+ release_mem_object_safe(Ng_sd);
+ release_mem_object_safe(Ng_sd_DL_shadow);
+ release_mem_object_safe(I_sd);
+ release_mem_object_safe(I_sd_DL_shadow);
+ release_mem_object_safe(shader_sd);
+ release_mem_object_safe(shader_sd_DL_shadow);
+ release_mem_object_safe(flag_sd);
+ release_mem_object_safe(flag_sd_DL_shadow);
+ release_mem_object_safe(prim_sd);
+ release_mem_object_safe(prim_sd_DL_shadow);
+ release_mem_object_safe(type_sd);
+ release_mem_object_safe(type_sd_DL_shadow);
+ release_mem_object_safe(u_sd);
+ release_mem_object_safe(u_sd_DL_shadow);
+ release_mem_object_safe(v_sd);
+ release_mem_object_safe(v_sd_DL_shadow);
+ release_mem_object_safe(object_sd);
+ release_mem_object_safe(object_sd_DL_shadow);
+ release_mem_object_safe(time_sd);
+ release_mem_object_safe(time_sd_DL_shadow);
+ release_mem_object_safe(ray_length_sd);
+ release_mem_object_safe(ray_length_sd_DL_shadow);
+ release_mem_object_safe(ray_depth_sd);
+ release_mem_object_safe(ray_depth_sd_DL_shadow);
+ release_mem_object_safe(transparent_depth_sd);
+ release_mem_object_safe(transparent_depth_sd_DL_shadow);
+
+ /* Ray differentials. */
+ release_mem_object_safe(dP_sd);
+ release_mem_object_safe(dP_sd_DL_shadow);
+ release_mem_object_safe(dI_sd);
+ release_mem_object_safe(dI_sd_DL_shadow);
+ release_mem_object_safe(du_sd);
+ release_mem_object_safe(du_sd_DL_shadow);
+ release_mem_object_safe(dv_sd);
+ release_mem_object_safe(dv_sd_DL_shadow);
+
+ /* Dp/Du */
+ release_mem_object_safe(dPdu_sd);
+ release_mem_object_safe(dPdu_sd_DL_shadow);
+ release_mem_object_safe(dPdv_sd);
+ release_mem_object_safe(dPdv_sd_DL_shadow);
+
+ /* Object motion. */
+ release_mem_object_safe(ob_tfm_sd);
+ release_mem_object_safe(ob_itfm_sd);
+
+ release_mem_object_safe(ob_tfm_sd_DL_shadow);
+ release_mem_object_safe(ob_itfm_sd_DL_shadow);
+
+ release_mem_object_safe(closure_sd);
+ release_mem_object_safe(closure_sd_DL_shadow);
+ release_mem_object_safe(num_closure_sd);
+ release_mem_object_safe(num_closure_sd_DL_shadow);
+ release_mem_object_safe(randb_closure_sd);
+ release_mem_object_safe(randb_closure_sd_DL_shadow);
+ release_mem_object_safe(ray_P_sd);
+ release_mem_object_safe(ray_P_sd_DL_shadow);
+ release_mem_object_safe(ray_dP_sd);
+ release_mem_object_safe(ray_dP_sd_DL_shadow);
+ release_mem_object_safe(rng_coop);
+ release_mem_object_safe(throughput_coop);
+ release_mem_object_safe(L_transparent_coop);
+ release_mem_object_safe(PathRadiance_coop);
+ release_mem_object_safe(Ray_coop);
+ release_mem_object_safe(PathState_coop);
+ release_mem_object_safe(Intersection_coop);
+ release_mem_object_safe(kgbuffer);
+ release_mem_object_safe(sd);
+ release_mem_object_safe(sd_DL_shadow);
+ release_mem_object_safe(ray_state);
+ release_mem_object_safe(AOAlpha_coop);
+ release_mem_object_safe(AOBSDF_coop);
+ release_mem_object_safe(AOLightRay_coop);
+ release_mem_object_safe(BSDFEval_coop);
+ release_mem_object_safe(ISLamp_coop);
+ release_mem_object_safe(LightRay_coop);
+ release_mem_object_safe(Intersection_coop_AO);
+ release_mem_object_safe(Intersection_coop_DL);
+#ifdef WITH_CYCLES_DEBUG
+ release_mem_object_safe(debugdata_coop);
+#endif
+ release_mem_object_safe(use_queues_flag);
+ release_mem_object_safe(Queue_data);
+ release_mem_object_safe(Queue_index);
+ release_mem_object_safe(work_array);
+#ifdef __WORK_STEALING__
+ release_mem_object_safe(work_pool_wgs);
+#endif
+ release_mem_object_safe(per_sample_output_buffers);
+
+ /* Release programs */
+ release_program_safe(data_init_program);
+ release_program_safe(scene_intersect_program);
+ release_program_safe(lamp_emission_program);
+ release_program_safe(queue_enqueue_program);
+ release_program_safe(background_buffer_update_program);
+ release_program_safe(shader_eval_program);
+ release_program_safe(holdout_emission_blurring_pathtermination_ao_program);
+ release_program_safe(direct_lighting_program);
+ release_program_safe(shadow_blocked_program);
+ release_program_safe(next_iteration_setup_program);
+ release_program_safe(sum_all_radiance_program);
+
+ if(hostRayStateArray != NULL) {
+ free(hostRayStateArray);
+ }
+ }
+
+ void path_trace(SplitRenderTile& rtile, int2 max_render_feasible_tile_size)
+ {
+ /* cast arguments to cl types */
+ cl_mem d_data = CL_MEM_PTR(const_mem_map["__data"]->device_pointer);
+ cl_mem d_buffer = CL_MEM_PTR(rtile.buffer);
+ cl_mem d_rng_state = CL_MEM_PTR(rtile.rng_state);
+ cl_int d_x = rtile.x;
+ cl_int d_y = rtile.y;
+ cl_int d_w = rtile.w;
+ cl_int d_h = rtile.h;
+ cl_int d_offset = rtile.offset;
+ cl_int d_stride = rtile.stride;
+
+ /* Make sure that set render feasible tile size is a multiple of local
+ * work size dimensions.
+ */
+ assert(max_render_feasible_tile_size.x % SPLIT_KERNEL_LOCAL_SIZE_X == 0);
+ assert(max_render_feasible_tile_size.y % SPLIT_KERNEL_LOCAL_SIZE_Y == 0);
+
+ size_t global_size[2];
+ size_t local_size[2] = {SPLIT_KERNEL_LOCAL_SIZE_X,
+ SPLIT_KERNEL_LOCAL_SIZE_Y};
+
+ /* Set the range of samples to be processed for every ray in
+ * path-regeneration logic.
+ */
+ cl_int start_sample = rtile.start_sample;
+ cl_int end_sample = rtile.start_sample + rtile.num_samples;
+ cl_int num_samples = rtile.num_samples;
+
+#ifdef __WORK_STEALING__
+ global_size[0] = (((d_w - 1) / local_size[0]) + 1) * local_size[0];
+ global_size[1] = (((d_h - 1) / local_size[1]) + 1) * local_size[1];
+ unsigned int num_parallel_samples = 1;
+#else
+ global_size[1] = (((d_h - 1) / local_size[1]) + 1) * local_size[1];
+ unsigned int num_threads = max_render_feasible_tile_size.x *
+ max_render_feasible_tile_size.y;
+ unsigned int num_tile_columns_possible = num_threads / global_size[1];
+ /* Estimate number of parallel samples that can be
+ * processed in parallel.
+ */
+ unsigned int num_parallel_samples = min(num_tile_columns_possible / d_w,
+ rtile.num_samples);
+ /* Wavefront size in AMD is 64.
+ * TODO(sergey): What about other platforms?
+ */
+ if(num_parallel_samples >= 64) {
+ /* TODO(sergey): Could use generic round-up here. */
+ num_parallel_samples = (num_parallel_samples / 64) * 64;
+ }
+ assert(num_parallel_samples != 0);
+
+ global_size[0] = d_w * num_parallel_samples;
+#endif /* __WORK_STEALING__ */
+
+ assert(global_size[0] * global_size[1] <=
+ max_render_feasible_tile_size.x * max_render_feasible_tile_size.y);
+
+ /* Allocate all required global memory once. */
+ if(first_tile) {
+ size_t num_global_elements = max_render_feasible_tile_size.x *
+ max_render_feasible_tile_size.y;
+ /* TODO(sergey): This will actually over-allocate if
+ * particular kernel does not support multiclosure.
+ */
+ size_t ShaderClosure_size = get_shader_closure_size(current_max_closure);
+
+#ifdef __WORK_STEALING__
+ /* Calculate max groups */
+ size_t max_global_size[2];
+ size_t tile_x = max_render_feasible_tile_size.x;
+ size_t tile_y = max_render_feasible_tile_size.y;
+ max_global_size[0] = (((tile_x - 1) / local_size[0]) + 1) * local_size[0];
+ max_global_size[1] = (((tile_y - 1) / local_size[1]) + 1) * local_size[1];
+ max_work_groups = (max_global_size[0] * max_global_size[1]) /
+ (local_size[0] * local_size[1]);
+ /* Allocate work_pool_wgs memory. */
+ work_pool_wgs = mem_alloc(max_work_groups * sizeof(unsigned int));
+#endif /* __WORK_STEALING__ */
+
+ /* Allocate queue_index memory only once. */
+ Queue_index = mem_alloc(NUM_QUEUES * sizeof(int));
+ use_queues_flag = mem_alloc(sizeof(char));
+ kgbuffer = mem_alloc(get_KernelGlobals_size());
+
+ /* Create global buffers for ShaderData. */
+ sd = mem_alloc(get_shaderdata_soa_size());
+ sd_DL_shadow = mem_alloc(get_shaderdata_soa_size());
+ P_sd = mem_alloc(num_global_elements * sizeof(float3));
+ P_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float3));
+ N_sd = mem_alloc(num_global_elements * sizeof(float3));
+ N_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float3));
+ Ng_sd = mem_alloc(num_global_elements * sizeof(float3));
+ Ng_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float3));
+ I_sd = mem_alloc(num_global_elements * sizeof(float3));
+ I_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float3));
+ shader_sd = mem_alloc(num_global_elements * sizeof(int));
+ shader_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(int));
+ flag_sd = mem_alloc(num_global_elements * sizeof(int));
+ flag_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(int));
+ prim_sd = mem_alloc(num_global_elements * sizeof(int));
+ prim_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(int));
+ type_sd = mem_alloc(num_global_elements * sizeof(int));
+ type_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(int));
+ u_sd = mem_alloc(num_global_elements * sizeof(float));
+ u_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float));
+ v_sd = mem_alloc(num_global_elements * sizeof(float));
+ v_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float));
+ object_sd = mem_alloc(num_global_elements * sizeof(int));
+ object_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(int));
+ time_sd = mem_alloc(num_global_elements * sizeof(float));
+ time_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float));
+ ray_length_sd = mem_alloc(num_global_elements * sizeof(float));
+ ray_length_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float));
+ ray_depth_sd = mem_alloc(num_global_elements * sizeof(int));
+ ray_depth_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(int));
+ transparent_depth_sd = mem_alloc(num_global_elements * sizeof(int));
+ transparent_depth_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(int));
+
+ /* Ray differentials. */
+ dP_sd = mem_alloc(num_global_elements * sizeof(differential3));
+ dP_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(differential3));
+ dI_sd = mem_alloc(num_global_elements * sizeof(differential3));
+ dI_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(differential3));
+ du_sd = mem_alloc(num_global_elements * sizeof(differential));
+ du_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(differential));
+ dv_sd = mem_alloc(num_global_elements * sizeof(differential));
+ dv_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(differential));
+
+ /* Dp/Du */
+ dPdu_sd = mem_alloc(num_global_elements * sizeof(float3));
+ dPdu_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float3));
+ dPdv_sd = mem_alloc(num_global_elements * sizeof(float3));
+ dPdv_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float3));
+
+ /* Object motion. */
+ ob_tfm_sd = mem_alloc(num_global_elements * sizeof(Transform));
+ ob_tfm_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(Transform));
+ ob_itfm_sd = mem_alloc(num_global_elements * sizeof(Transform));
+ ob_itfm_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(Transform));
+
+ closure_sd = mem_alloc(num_global_elements * ShaderClosure_size);
+ closure_sd_DL_shadow = mem_alloc(num_global_elements * 2 * ShaderClosure_size);
+ num_closure_sd = mem_alloc(num_global_elements * sizeof(int));
+ num_closure_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(int));
+ randb_closure_sd = mem_alloc(num_global_elements * sizeof(float));
+ randb_closure_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float));
+ ray_P_sd = mem_alloc(num_global_elements * sizeof(float3));
+ ray_P_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(float3));
+ ray_dP_sd = mem_alloc(num_global_elements * sizeof(differential3));
+ ray_dP_sd_DL_shadow = mem_alloc(num_global_elements * 2 * sizeof(differential3));
+
+ /* Creation of global memory buffers which are shared among
+ * the kernels.
+ */
+ rng_coop = mem_alloc(num_global_elements * sizeof(RNG));
+ throughput_coop = mem_alloc(num_global_elements * sizeof(float3));
+ L_transparent_coop = mem_alloc(num_global_elements * sizeof(float));
+ PathRadiance_coop = mem_alloc(num_global_elements * sizeof(PathRadiance));
+ Ray_coop = mem_alloc(num_global_elements * sizeof(Ray));
+ PathState_coop = mem_alloc(num_global_elements * sizeof(PathState));
+ Intersection_coop = mem_alloc(num_global_elements * sizeof(Intersection));
+ AOAlpha_coop = mem_alloc(num_global_elements * sizeof(float3));
+ AOBSDF_coop = mem_alloc(num_global_elements * sizeof(float3));
+ AOLightRay_coop = mem_alloc(num_global_elements * sizeof(Ray));
+ BSDFEval_coop = mem_alloc(num_global_elements * sizeof(BsdfEval));
+ ISLamp_coop = mem_alloc(num_global_elements * sizeof(int));
+ LightRay_coop = mem_alloc(num_global_elements * sizeof(Ray));
+ Intersection_coop_AO = mem_alloc(num_global_elements * sizeof(Intersection));
+ Intersection_coop_DL = mem_alloc(num_global_elements * sizeof(Intersection));
+
+#ifdef WITH_CYCLES_DEBUG
+ debugdata_coop = mem_alloc(num_global_elements * sizeof(DebugData));
+#endif
+
+ ray_state = mem_alloc(num_global_elements * sizeof(char));
+
+ hostRayStateArray = (char *)calloc(num_global_elements, sizeof(char));
+ assert(hostRayStateArray != NULL && "Can't create hostRayStateArray memory");
+
+ Queue_data = mem_alloc(num_global_elements * (NUM_QUEUES * sizeof(int)+sizeof(int)));
+ work_array = mem_alloc(num_global_elements * sizeof(unsigned int));
+ per_sample_output_buffers = mem_alloc(num_global_elements *
+ per_thread_output_buffer_size);
+ }
+
+ cl_int dQueue_size = global_size[0] * global_size[1];
+ cl_int total_num_rays = global_size[0] * global_size[1];
+
+ cl_uint start_arg_index =
+ kernel_set_args(ckPathTraceKernel_data_init,
+ 0,
+ kgbuffer,
+ sd,
+ sd_DL_shadow,
+ P_sd,
+ P_sd_DL_shadow,
+ N_sd,
+ N_sd_DL_shadow,
+ Ng_sd,
+ Ng_sd_DL_shadow,
+ I_sd,
+ I_sd_DL_shadow,
+ shader_sd,
+ shader_sd_DL_shadow,
+ flag_sd,
+ flag_sd_DL_shadow,
+ prim_sd,
+ prim_sd_DL_shadow,
+ type_sd,
+ type_sd_DL_shadow,
+ u_sd,
+ u_sd_DL_shadow,
+ v_sd,
+ v_sd_DL_shadow,
+ object_sd,
+ object_sd_DL_shadow,
+ time_sd,
+ time_sd_DL_shadow,
+ ray_length_sd,
+ ray_length_sd_DL_shadow,
+ ray_depth_sd,
+ ray_depth_sd_DL_shadow,
+ transparent_depth_sd,
+ transparent_depth_sd_DL_shadow);
+
+ /* Ray differentials. */
+ start_arg_index +=
+ kernel_set_args(ckPathTraceKernel_data_init,
+ start_arg_index,
+ dP_sd,
+ dP_sd_DL_shadow,
+ dI_sd,
+ dI_sd_DL_shadow,
+ du_sd,
+ du_sd_DL_shadow,
+ dv_sd,
+ dv_sd_DL_shadow);
+
+ /* Dp/Du */
+ start_arg_index +=
+ kernel_set_args(ckPathTraceKernel_data_init,
+ start_arg_index,
+ dPdu_sd,
+ dPdu_sd_DL_shadow,
+ dPdv_sd,
+ dPdv_sd_DL_shadow);
+
+ /* Object motion. */
+ start_arg_index +=
+ kernel_set_args(ckPathTraceKernel_data_init,
+ start_arg_index,
+ ob_tfm_sd,
+ ob_tfm_sd_DL_shadow,
+ ob_itfm_sd,
+ ob_itfm_sd_DL_shadow);
+
+ start_arg_index +=
+ kernel_set_args(ckPathTraceKernel_data_init,
+ start_arg_index,
+ closure_sd,
+ closure_sd_DL_shadow,
+ num_closure_sd,
+ num_closure_sd_DL_shadow,
+ randb_closure_sd,
+ randb_closure_sd_DL_shadow,
+ ray_P_sd,
+ ray_P_sd_DL_shadow,
+ ray_dP_sd,
+ ray_dP_sd_DL_shadow,
+ d_data,
+ per_sample_output_buffers,
+ d_rng_state,
+ rng_coop,
+ throughput_coop,
+ L_transparent_coop,
+ PathRadiance_coop,
+ Ray_coop,
+ PathState_coop,
+ ray_state);
+
+/* TODO(segrey): Avoid map lookup here. */
+#define KERNEL_TEX(type, ttype, name) \
+ set_kernel_arg_mem(ckPathTraceKernel_data_init, &start_arg_index, #name);
+#include "kernel_textures.h"
+#undef KERNEL_TEX
+
+ start_arg_index +=
+ kernel_set_args(ckPathTraceKernel_data_init,
+ start_arg_index,
+ start_sample,
+ d_x,
+ d_y,
+ d_w,
+ d_h,
+ d_offset,
+ d_stride,
+ rtile.rng_state_offset_x,
+ rtile.rng_state_offset_y,
+ rtile.buffer_rng_state_stride,
+ Queue_data,
+ Queue_index,
+ dQueue_size,
+ use_queues_flag,
+ work_array,
+#ifdef __WORK_STEALING__
+ work_pool_wgs,
+ num_samples,
+#endif
+#ifdef WITH_CYCLES_DEBUG
+ debugdata_coop,
+#endif
+ num_parallel_samples);
+
+ kernel_set_args(ckPathTraceKernel_scene_intersect,
+ 0,
+ kgbuffer,
+ d_data,
+ rng_coop,
+ Ray_coop,
+ PathState_coop,
+ Intersection_coop,
+ ray_state,
+ d_w,
+ d_h,
+ Queue_data,
+ Queue_index,
+ dQueue_size,
+ use_queues_flag,
+#ifdef WITH_CYCLES_DEBUG
+ debugdata_coop,
+#endif
+ num_parallel_samples);
+
+ kernel_set_args(ckPathTraceKernel_lamp_emission,
+ 0,
+ kgbuffer,
+ d_data,
+ sd,
+ throughput_coop,
+ PathRadiance_coop,
+ Ray_coop,
+ PathState_coop,
+ Intersection_coop,
+ ray_state,
+ d_w,
+ d_h,
+ Queue_data,
+ Queue_index,
+ dQueue_size,
+ use_queues_flag,
+ num_parallel_samples);
+
+ kernel_set_args(ckPathTraceKernel_queue_enqueue,
+ 0,
+ Queue_data,
+ Queue_index,
+ ray_state,
+ dQueue_size);
+
+ kernel_set_args(ckPathTraceKernel_background_buffer_update,
+ 0,
+ kgbuffer,
+ d_data,
+ sd,
+ per_sample_output_buffers,
+ d_rng_state,
+ rng_coop,
+ throughput_coop,
+ PathRadiance_coop,
+ Ray_coop,
+ PathState_coop,
+ L_transparent_coop,
+ ray_state,
+ d_w,
+ d_h,
+ d_x,
+ d_y,
+ d_stride,
+ rtile.rng_state_offset_x,
+ rtile.rng_state_offset_y,
+ rtile.buffer_rng_state_stride,
+ work_array,
+ Queue_data,
+ Queue_index,
+ dQueue_size,
+ end_sample,
+ start_sample,
+#ifdef __WORK_STEALING__
+ work_pool_wgs,
+ num_samples,
+#endif
+#ifdef WITH_CYCLES_DEBUG
+ debugdata_coop,
+#endif
+ num_parallel_samples);
+
+ kernel_set_args(ckPathTraceKernel_shader_eval,
+ 0,
+ kgbuffer,
+ d_data,
+ sd,
+ rng_coop,
+ Ray_coop,
+ PathState_coop,
+ Intersection_coop,
+ ray_state,
+ Queue_data,
+ Queue_index,
+ dQueue_size);
+
+ kernel_set_args(ckPathTraceKernel_holdout_emission_blurring_pathtermination_ao,
+ 0,
+ kgbuffer,
+ d_data,
+ sd,
+ per_sample_output_buffers,
+ rng_coop,
+ throughput_coop,
+ L_transparent_coop,
+ PathRadiance_coop,
+ PathState_coop,
+ Intersection_coop,
+ AOAlpha_coop,
+ AOBSDF_coop,
+ AOLightRay_coop,
+ d_w,
+ d_h,
+ d_x,
+ d_y,
+ d_stride,
+ ray_state,
+ work_array,
+ Queue_data,
+ Queue_index,
+ dQueue_size,
+#ifdef __WORK_STEALING__
+ start_sample,
+#endif
+ num_parallel_samples);
+
+ kernel_set_args(ckPathTraceKernel_direct_lighting,
+ 0,
+ kgbuffer,
+ d_data,
+ sd,
+ sd_DL_shadow,
+ rng_coop,
+ PathState_coop,
+ ISLamp_coop,
+ LightRay_coop,
+ BSDFEval_coop,
+ ray_state,
+ Queue_data,
+ Queue_index,
+ dQueue_size);
+
+ kernel_set_args(ckPathTraceKernel_shadow_blocked,
+ 0,
+ kgbuffer,
+ d_data,
+ sd_DL_shadow,
+ PathState_coop,
+ LightRay_coop,
+ AOLightRay_coop,
+ Intersection_coop_AO,
+ Intersection_coop_DL,
+ ray_state,
+ Queue_data,
+ Queue_index,
+ dQueue_size,
+ total_num_rays);
+
+ kernel_set_args(ckPathTraceKernel_next_iteration_setup,
+ 0,
+ kgbuffer,
+ d_data,
+ sd,
+ rng_coop,
+ throughput_coop,
+ PathRadiance_coop,
+ Ray_coop,
+ PathState_coop,
+ LightRay_coop,
+ ISLamp_coop,
+ BSDFEval_coop,
+ AOLightRay_coop,
+ AOBSDF_coop,
+ AOAlpha_coop,
+ ray_state,
+ Queue_data,
+ Queue_index,
+ dQueue_size,
+ use_queues_flag);
+
+ kernel_set_args(ckPathTraceKernel_sum_all_radiance,
+ 0,
+ d_data,
+ d_buffer,
+ per_sample_output_buffers,
+ num_parallel_samples,
+ d_w,
+ d_h,
+ d_stride,
+ rtile.buffer_offset_x,
+ rtile.buffer_offset_y,
+ rtile.buffer_rng_state_stride,
+ start_sample);
+
+ /* Macro for Enqueuing split kernels. */
+#define GLUE(a, b) a ## b
+#define ENQUEUE_SPLIT_KERNEL(kernelName, globalSize, localSize) \
+ opencl_assert(clEnqueueNDRangeKernel(cqCommandQueue, \
+ GLUE(ckPathTraceKernel_, \
+ kernelName), \
+ 2, \
+ NULL, \
+ globalSize, \
+ localSize, \
+ 0, \
+ NULL, \
+ NULL))
+
+ /* Enqueue ckPathTraceKernel_data_init kernel. */
+ ENQUEUE_SPLIT_KERNEL(data_init, global_size, local_size);
+ bool activeRaysAvailable = true;
+
+ /* Record number of time host intervention has been made */
+ unsigned int numHostIntervention = 0;
+ unsigned int numNextPathIterTimes = PathIteration_times;
+ while(activeRaysAvailable) {
+ /* Twice the global work size of other kernels for
+ * ckPathTraceKernel_shadow_blocked_direct_lighting. */
+ size_t global_size_shadow_blocked[2];
+ global_size_shadow_blocked[0] = global_size[0] * 2;
+ global_size_shadow_blocked[1] = global_size[1];
+
+ /* Do path-iteration in host [Enqueue Path-iteration kernels. */
+ for(int PathIter = 0; PathIter < PathIteration_times; PathIter++) {
+ ENQUEUE_SPLIT_KERNEL(scene_intersect, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(lamp_emission, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(queue_enqueue, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(background_buffer_update, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(shader_eval, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(holdout_emission_blurring_pathtermination_ao, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(direct_lighting, global_size, local_size);
+ ENQUEUE_SPLIT_KERNEL(shadow_blocked, global_size_shadow_blocked, local_size);
+ ENQUEUE_SPLIT_KERNEL(next_iteration_setup, global_size, local_size);
+ }
+
+ /* Read ray-state into Host memory to decide if we should exit
+ * path-iteration in host.
+ */
+ ciErr = clEnqueueReadBuffer(cqCommandQueue,
+ ray_state,
+ CL_TRUE,
+ 0,
+ global_size[0] * global_size[1] * sizeof(char),
+ hostRayStateArray,
+ 0,
+ NULL,
+ NULL);
+ assert(ciErr == CL_SUCCESS);
+
+ activeRaysAvailable = false;
+
+ for(int rayStateIter = 0;
+ rayStateIter < global_size[0] * global_size[1];
+ ++rayStateIter)
+ {
+ if(int8_t(hostRayStateArray[rayStateIter]) != RAY_INACTIVE) {
+ /* Not all rays are RAY_INACTIVE. */
+ activeRaysAvailable = true;
+ break;
+ }
+ }
+
+ if(activeRaysAvailable) {
+ numHostIntervention++;
+ PathIteration_times = PATH_ITER_INC_FACTOR;
+ /* Host intervention done before all rays become RAY_INACTIVE;
+ * Set do more initial iterations for the next tile.
+ */
+ numNextPathIterTimes += PATH_ITER_INC_FACTOR;
+ }
+ }
+
+ /* Execute SumALLRadiance kernel to accumulate radiance calculated in
+ * per_sample_output_buffers into RenderTile's output buffer.
+ */
+ size_t sum_all_radiance_local_size[2] = {16, 16};
+ size_t sum_all_radiance_global_size[2];
+ sum_all_radiance_global_size[0] =
+ (((d_w - 1) / sum_all_radiance_local_size[0]) + 1) *
+ sum_all_radiance_local_size[0];
+ sum_all_radiance_global_size[1] =
+ (((d_h - 1) / sum_all_radiance_local_size[1]) + 1) *
+ sum_all_radiance_local_size[1];
+ ENQUEUE_SPLIT_KERNEL(sum_all_radiance,
+ sum_all_radiance_global_size,
+ sum_all_radiance_local_size);
+
+#undef ENQUEUE_SPLIT_KERNEL
+#undef GLUE
+
+ if(numHostIntervention == 0) {
+ /* This means that we are executing kernel more than required
+ * Must avoid this for the next sample/tile.
+ */
+ PathIteration_times = ((numNextPathIterTimes - PATH_ITER_INC_FACTOR) <= 0) ?
+ PATH_ITER_INC_FACTOR : numNextPathIterTimes - PATH_ITER_INC_FACTOR;
+ }
+ else {
+ /* Number of path-iterations done for this tile is set as
+ * Initial path-iteration times for the next tile
+ */
+ PathIteration_times = numNextPathIterTimes;
+ }
+
+ first_tile = false;
+ }
+
+ /* Calculates the amount of memory that has to be always
+ * allocated in order for the split kernel to function.
+ * This memory is tile/scene-property invariant (meaning,
+ * the value returned by this function does not depend
+ * on the user set tile size or scene properties.
+ */
+ size_t get_invariable_mem_allocated()
+ {
+ size_t total_invariable_mem_allocated = 0;
+ size_t KernelGlobals_size = 0;
+ size_t ShaderData_SOA_size = 0;
+
+ KernelGlobals_size = get_KernelGlobals_size();
+ ShaderData_SOA_size = get_shaderdata_soa_size();
+
+ total_invariable_mem_allocated += KernelGlobals_size; /* KernelGlobals size */
+ total_invariable_mem_allocated += NUM_QUEUES * sizeof(unsigned int); /* Queue index size */
+ total_invariable_mem_allocated += sizeof(char); /* use_queues_flag size */
+ total_invariable_mem_allocated += ShaderData_SOA_size; /* sd size */
+ total_invariable_mem_allocated += ShaderData_SOA_size; /* sd_DL_shadow size */
+
+ return total_invariable_mem_allocated;
+ }
+
+ /* Calculate the memory that has-to-be/has-been allocated for
+ * the split kernel to function.
+ */
+ size_t get_tile_specific_mem_allocated(const int2 tile_size)
+ {
+ size_t tile_specific_mem_allocated = 0;
+
+ /* Get required tile info */
+ unsigned int user_set_tile_w = tile_size.x;
+ unsigned int user_set_tile_h = tile_size.y;
+
+#ifdef __WORK_STEALING__
+ /* Calculate memory to be allocated for work_pools in
+ * case of work_stealing.
+ */
+ size_t max_global_size[2];
+ size_t max_num_work_pools = 0;
+ max_global_size[0] =
+ (((user_set_tile_w - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_X;
+ max_global_size[1] =
+ (((user_set_tile_h - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_Y;
+ max_num_work_pools =
+ (max_global_size[0] * max_global_size[1]) /
+ (SPLIT_KERNEL_LOCAL_SIZE_X * SPLIT_KERNEL_LOCAL_SIZE_Y);
+ tile_specific_mem_allocated += max_num_work_pools * sizeof(unsigned int);
+#endif
+
+ tile_specific_mem_allocated +=
+ user_set_tile_w * user_set_tile_h * per_thread_output_buffer_size;
+ tile_specific_mem_allocated +=
+ user_set_tile_w * user_set_tile_h * sizeof(RNG);
+
+ return tile_specific_mem_allocated;
+ }
+
+ /* Calculates the texture memories and KernelData (d_data) memory
+ * that has been allocated.
+ */
+ size_t get_scene_specific_mem_allocated(cl_mem d_data)
+ {
+ size_t scene_specific_mem_allocated = 0;
+ /* Calculate texture memories. */
+#define KERNEL_TEX(type, ttype, name) \
+ scene_specific_mem_allocated += get_tex_size(#name);
+#include "kernel_textures.h"
+#undef KERNEL_TEX
+ size_t d_data_size;
+ ciErr = clGetMemObjectInfo(d_data,
+ CL_MEM_SIZE,
+ sizeof(d_data_size),
+ &d_data_size,
+ NULL);
+ assert(ciErr == CL_SUCCESS && "Can't get d_data mem object info");
+ scene_specific_mem_allocated += d_data_size;
+ return scene_specific_mem_allocated;
+ }
+
+ /* Calculate the memory required for one thread in split kernel. */
+ size_t get_per_thread_memory()
+ {
+ size_t shader_closure_size = 0;
+ size_t shaderdata_volume = 0;
+ shader_closure_size = get_shader_closure_size(current_max_closure);
+ /* TODO(sergey): This will actually over-allocate if
+ * particular kernel does not support multiclosure.
+ */
+ shaderdata_volume = get_shader_data_size(shader_closure_size);
+ size_t retval = sizeof(RNG)
+ + sizeof(float3) /* Throughput size */
+ + sizeof(float) /* L transparent size */
+ + sizeof(char) /* Ray state size */
+ + sizeof(unsigned int) /* Work element size */
+ + sizeof(int) /* ISLamp_size */
+ + sizeof(PathRadiance) + sizeof(Ray) + sizeof(PathState)
+ + sizeof(Intersection) /* Overall isect */
+ + sizeof(Intersection) /* Instersection_coop_AO */
+ + sizeof(Intersection) /* Intersection coop DL */
+ + shaderdata_volume /* Overall ShaderData */
+ + (shaderdata_volume * 2) /* ShaderData : DL and shadow */
+ + sizeof(Ray) + sizeof(BsdfEval)
+ + sizeof(float3) /* AOAlpha size */
+ + sizeof(float3) /* AOBSDF size */
+ + sizeof(Ray)
+ + (sizeof(int) * NUM_QUEUES)
+ + per_thread_output_buffer_size;
+ return retval;
+ }
+
+ /* Considers the total memory available in the device and
+ * and returns the maximum global work size possible.
+ */
+ size_t get_feasible_global_work_size(int2 tile_size, cl_mem d_data)
+ {
+ /* Calculate invariably allocated memory. */
+ size_t invariable_mem_allocated = get_invariable_mem_allocated();
+ /* Calculate tile specific allocated memory. */
+ size_t tile_specific_mem_allocated =
+ get_tile_specific_mem_allocated(tile_size);
+ /* Calculate scene specific allocated memory. */
+ size_t scene_specific_mem_allocated =
+ get_scene_specific_mem_allocated(d_data);
+ /* Calculate total memory available for the threads in global work size. */
+ size_t available_memory = total_allocatable_memory
+ - invariable_mem_allocated
+ - tile_specific_mem_allocated
+ - scene_specific_mem_allocated
+ - DATA_ALLOCATION_MEM_FACTOR;
+ size_t per_thread_memory_required = get_per_thread_memory();
+ return (available_memory / per_thread_memory_required);
+ }
+
+ /* Checks if the device has enough memory to render the whole tile;
+ * If not, we should split single tile into multiple tiles of small size
+ * and process them all.
+ */
+ bool need_to_split_tile(unsigned int d_w,
+ unsigned int d_h,
+ int2 max_render_feasible_tile_size)
+ {
+ size_t global_size_estimate[2];
+ /* TODO(sergey): Such round-ups are in quite few places, need to replace
+ * them with an utility macro.
+ */
+ global_size_estimate[0] =
+ (((d_w - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_X;
+ global_size_estimate[1] =
+ (((d_h - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_Y;
+ if((global_size_estimate[0] * global_size_estimate[1]) >
+ (max_render_feasible_tile_size.x * max_render_feasible_tile_size.y))
{
- run = function_bind(&OpenCLDevice::thread_run, device, this);
+ return true;
}
- };
+ else {
+ return false;
+ }
+ }
- int get_split_task_count(DeviceTask& task)
+ /* Considers the scene properties, global memory available in the device
+ * and returns a rectanglular tile dimension (approx the maximum)
+ * that should render on split kernel.
+ */
+ int2 get_max_render_feasible_tile_size(size_t feasible_global_work_size)
{
- return 1;
+ int2 max_render_feasible_tile_size;
+ int square_root_val = (int)sqrt(feasible_global_work_size);
+ max_render_feasible_tile_size.x = square_root_val;
+ max_render_feasible_tile_size.y = square_root_val;
+ /* Ciel round-off max_render_feasible_tile_size. */
+ int2 ceil_render_feasible_tile_size;
+ ceil_render_feasible_tile_size.x =
+ (((max_render_feasible_tile_size.x - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_X;
+ ceil_render_feasible_tile_size.y =
+ (((max_render_feasible_tile_size.y - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_Y;
+ if(ceil_render_feasible_tile_size.x * ceil_render_feasible_tile_size.y <=
+ feasible_global_work_size)
+ {
+ return ceil_render_feasible_tile_size;
+ }
+ /* Floor round-off max_render_feasible_tile_size. */
+ int2 floor_render_feasible_tile_size;
+ floor_render_feasible_tile_size.x =
+ (max_render_feasible_tile_size.x / SPLIT_KERNEL_LOCAL_SIZE_X) *
+ SPLIT_KERNEL_LOCAL_SIZE_X;
+ floor_render_feasible_tile_size.y =
+ (max_render_feasible_tile_size.y / SPLIT_KERNEL_LOCAL_SIZE_Y) *
+ SPLIT_KERNEL_LOCAL_SIZE_Y;
+ return floor_render_feasible_tile_size;
}
- void task_add(DeviceTask& task)
+ /* Try splitting the current tile into multiple smaller
+ * almost-square-tiles.
+ */
+ int2 get_split_tile_size(RenderTile rtile,
+ int2 max_render_feasible_tile_size)
{
- task_pool.push(new OpenCLDeviceTask(this, task));
+ int2 split_tile_size;
+ int num_global_threads = max_render_feasible_tile_size.x *
+ max_render_feasible_tile_size.y;
+ int d_w = rtile.w;
+ int d_h = rtile.h;
+ /* Ceil round off d_w and d_h */
+ d_w = (((d_w - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_X;
+ d_h = (((d_h - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_Y;
+ while(d_w * d_h > num_global_threads) {
+ /* Halve the longer dimension. */
+ if(d_w >= d_h) {
+ d_w = d_w / 2;
+ d_w = (((d_w - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_X;
+ }
+ else {
+ d_h = d_h / 2;
+ d_h = (((d_h - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_Y;
+ }
+ }
+ split_tile_size.x = d_w;
+ split_tile_size.y = d_h;
+ return split_tile_size;
}
- void task_wait()
+ /* Splits existing tile into multiple tiles of tile size split_tile_size. */
+ vector<SplitRenderTile> split_tiles(RenderTile rtile, int2 split_tile_size)
{
- task_pool.wait();
+ vector<SplitRenderTile> to_path_trace_rtile;
+ int d_w = rtile.w;
+ int d_h = rtile.h;
+ int num_tiles_x = (((d_w - 1) / split_tile_size.x) + 1);
+ int num_tiles_y = (((d_h - 1) / split_tile_size.y) + 1);
+ /* Buffer and rng_state offset calc. */
+ size_t offset_index = rtile.offset + (rtile.x + rtile.y * rtile.stride);
+ size_t offset_x = offset_index % rtile.stride;
+ size_t offset_y = offset_index / rtile.stride;
+ /* Resize to_path_trace_rtile. */
+ to_path_trace_rtile.resize(num_tiles_x * num_tiles_y);
+ for(int tile_iter_y = 0; tile_iter_y < num_tiles_y; tile_iter_y++) {
+ for(int tile_iter_x = 0; tile_iter_x < num_tiles_x; tile_iter_x++) {
+ int rtile_index = tile_iter_y * num_tiles_x + tile_iter_x;
+ to_path_trace_rtile[rtile_index].rng_state_offset_x = offset_x + tile_iter_x * split_tile_size.x;
+ to_path_trace_rtile[rtile_index].rng_state_offset_y = offset_y + tile_iter_y * split_tile_size.y;
+ to_path_trace_rtile[rtile_index].buffer_offset_x = offset_x + tile_iter_x * split_tile_size.x;
+ to_path_trace_rtile[rtile_index].buffer_offset_y = offset_y + tile_iter_y * split_tile_size.y;
+ to_path_trace_rtile[rtile_index].start_sample = rtile.start_sample;
+ to_path_trace_rtile[rtile_index].num_samples = rtile.num_samples;
+ to_path_trace_rtile[rtile_index].sample = rtile.sample;
+ to_path_trace_rtile[rtile_index].resolution = rtile.resolution;
+ to_path_trace_rtile[rtile_index].offset = rtile.offset;
+ to_path_trace_rtile[rtile_index].buffers = rtile.buffers;
+ to_path_trace_rtile[rtile_index].buffer = rtile.buffer;
+ to_path_trace_rtile[rtile_index].rng_state = rtile.rng_state;
+ to_path_trace_rtile[rtile_index].x = rtile.x + (tile_iter_x * split_tile_size.x);
+ to_path_trace_rtile[rtile_index].y = rtile.y + (tile_iter_y * split_tile_size.y);
+ to_path_trace_rtile[rtile_index].buffer_rng_state_stride = rtile.stride;
+ /* Fill width and height of the new render tile. */
+ to_path_trace_rtile[rtile_index].w = (tile_iter_x == (num_tiles_x - 1)) ?
+ (d_w - (tile_iter_x * split_tile_size.x)) /* Border tile */
+ : split_tile_size.x;
+ to_path_trace_rtile[rtile_index].h = (tile_iter_y == (num_tiles_y - 1)) ?
+ (d_h - (tile_iter_y * split_tile_size.y)) /* Border tile */
+ : split_tile_size.y;
+ to_path_trace_rtile[rtile_index].stride = to_path_trace_rtile[rtile_index].w;
+ }
+ }
+ return to_path_trace_rtile;
}
- void task_cancel()
+ void thread_run(DeviceTask *task)
{
- task_pool.cancel();
+ if(task->type == DeviceTask::FILM_CONVERT) {
+ film_convert(*task, task->buffer, task->rgba_byte, task->rgba_half);
+ }
+ else if(task->type == DeviceTask::SHADER) {
+ shader(*task);
+ }
+ else if(task->type == DeviceTask::PATH_TRACE) {
+ RenderTile tile;
+ bool initialize_data_and_check_render_feasibility = false;
+ bool need_to_split_tiles_further = false;
+ int2 max_render_feasible_tile_size;
+ size_t feasible_global_work_size;
+ const int2 tile_size = task->requested_tile_size;
+ /* Keep rendering tiles until done. */
+ while(task->acquire_tile(this, tile)) {
+ if(!initialize_data_and_check_render_feasibility) {
+ /* Initialize data. */
+ /* Calculate per_thread_output_buffer_size. */
+ size_t output_buffer_size = 0;
+ ciErr = clGetMemObjectInfo((cl_mem)tile.buffer,
+ CL_MEM_SIZE,
+ sizeof(output_buffer_size),
+ &output_buffer_size,
+ NULL);
+ assert(ciErr == CL_SUCCESS && "Can't get tile.buffer mem object info");
+ /* This value is different when running on AMD and NV. */
+ if(background) {
+ /* In offline render the number of buffer elements
+ * associated with tile.buffer is the current tile size.
+ */
+ per_thread_output_buffer_size =
+ output_buffer_size / (tile.w * tile.h);
+ }
+ else {
+ /* interactive rendering, unlike offline render, the number of buffer elements
+ * associated with tile.buffer is the entire viewport size.
+ */
+ per_thread_output_buffer_size =
+ output_buffer_size / (tile.buffers->params.width *
+ tile.buffers->params.height);
+ }
+ /* Check render feasibility. */
+ feasible_global_work_size = get_feasible_global_work_size(
+ tile_size,
+ CL_MEM_PTR(const_mem_map["__data"]->device_pointer));
+ max_render_feasible_tile_size =
+ get_max_render_feasible_tile_size(
+ feasible_global_work_size);
+ need_to_split_tiles_further =
+ need_to_split_tile(tile_size.x,
+ tile_size.y,
+ max_render_feasible_tile_size);
+ initialize_data_and_check_render_feasibility = true;
+ }
+ if(need_to_split_tiles_further) {
+ int2 split_tile_size =
+ get_split_tile_size(tile,
+ max_render_feasible_tile_size);
+ vector<SplitRenderTile> to_path_trace_render_tiles =
+ split_tiles(tile, split_tile_size);
+ /* Print message to console */
+ if(background && (to_path_trace_render_tiles.size() > 1)) {
+ fprintf(stderr, "Message : Tiles need to be split "
+ "further inside path trace (due to insufficient "
+ "device-global-memory for split kernel to "
+ "function) \n"
+ "The current tile of dimensions %dx%d is split "
+ "into tiles of dimension %dx%d for render \n",
+ tile.w, tile.h,
+ split_tile_size.x,
+ split_tile_size.y);
+ }
+ /* Process all split tiles. */
+ for(int tile_iter = 0;
+ tile_iter < to_path_trace_render_tiles.size();
+ ++tile_iter)
+ {
+ path_trace(to_path_trace_render_tiles[tile_iter],
+ max_render_feasible_tile_size);
+ }
+ }
+ else {
+ /* No splitting required; process the entire tile at once. */
+ /* Render feasible tile size is user-set-tile-size itself. */
+ max_render_feasible_tile_size.x =
+ (((tile_size.x - 1) / SPLIT_KERNEL_LOCAL_SIZE_X) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_X;
+ max_render_feasible_tile_size.y =
+ (((tile_size.y - 1) / SPLIT_KERNEL_LOCAL_SIZE_Y) + 1) *
+ SPLIT_KERNEL_LOCAL_SIZE_Y;
+ /* buffer_rng_state_stride is stride itself. */
+ SplitRenderTile split_tile(tile);
+ split_tile.buffer_rng_state_stride = tile.stride;
+ path_trace(split_tile, max_render_feasible_tile_size);
+ }
+ tile.sample = tile.start_sample + tile.num_samples;
+
+ /* Complete kernel execution before release tile. */
+ /* This helps in multi-device render;
+ * The device that reaches the critical-section function
+ * release_tile waits (stalling other devices from entering
+ * release_tile) for all kernels to complete. If device1 (a
+ * slow-render device) reaches release_tile first then it would
+ * stall device2 (a fast-render device) from proceeding to render
+ * next tile.
+ */
+ clFinish(cqCommandQueue);
+
+ task->release_tile(tile);
+ }
+ }
+ }
+
+protected:
+ cl_mem mem_alloc(size_t bufsize, cl_mem_flags mem_flag = CL_MEM_READ_WRITE)
+ {
+ cl_mem ptr;
+ ptr = clCreateBuffer(cxContext, mem_flag, bufsize, NULL, &ciErr);
+ if(opencl_error(ciErr)) {
+ assert(0);
+ }
+ return ptr;
}
};
Device *device_opencl_create(DeviceInfo& info, Stats &stats, bool background)
{
- return new OpenCLDevice(info, stats, background);
+ vector<OpenCLPlatformDevice> usable_devices;
+ opencl_get_usable_devices(&usable_devices);
+ assert(info.num < usable_devices.size());
+ OpenCLPlatformDevice& platform_device = usable_devices[info.num];
+ char name[256];
+ if(clGetPlatformInfo(platform_device.platform_id,
+ CL_PLATFORM_NAME,
+ sizeof(name),
+ &name,
+ NULL) != CL_SUCCESS)
+ {
+ VLOG(1) << "Failed to retrieve platform name, using mega kernel.";
+ return new OpenCLDeviceMegaKernel(info, stats, background);
+ }
+ string platform_name = name;
+ cl_device_type device_type;
+ if(clGetDeviceInfo(platform_device.device_id,
+ CL_DEVICE_TYPE,
+ sizeof(cl_device_type),
+ &device_type,
+ NULL) != CL_SUCCESS)
+ {
+ VLOG(1) << "Failed to retrieve device type, using mega kernel,";
+ return new OpenCLDeviceMegaKernel(info, stats, background);
+ }
+ if(opencl_kernel_use_split(platform_name, device_type)) {
+ VLOG(1) << "Using split kernel.";
+ return new OpenCLDeviceSplitKernel(info, stats, background);
+ } else {
+ VLOG(1) << "Using mega kernel.";
+ return new OpenCLDeviceMegaKernel(info, stats, background);
+ }
}
-bool device_opencl_init(void) {
+bool device_opencl_init(void)
+{
static bool initialized = false;
static bool result = false;
- if (initialized)
+ if(initialized)
return result;
initialized = true;
- // OpenCL disabled for now, only works with this environment variable set
- if(!getenv("CYCLES_OPENCL_TEST")) {
- result = false;
- }
- else {
- result = clewInit() == CLEW_SUCCESS;
- }
+ result = clewInit() == CLEW_SUCCESS;
return result;
}
void device_opencl_info(vector<DeviceInfo>& devices)
{
- vector<cl_device_id> device_ids;
- cl_uint num_devices = 0;
- vector<cl_platform_id> platform_ids;
- cl_uint num_platforms = 0;
-
- /* get devices */
- if(clGetPlatformIDs(0, NULL, &num_platforms) != CL_SUCCESS || num_platforms == 0)
- return;
-
- platform_ids.resize(num_platforms);
-
- if(clGetPlatformIDs(num_platforms, &platform_ids[0], NULL) != CL_SUCCESS)
- return;
-
- /* devices are numbered consecutively across platforms */
- int num_base = 0;
-
- for (int platform = 0; platform < num_platforms; platform++, num_base += num_devices) {
- num_devices = 0;
- if(clGetDeviceIDs(platform_ids[platform], opencl_device_type(), 0, NULL, &num_devices) != CL_SUCCESS || num_devices == 0)
+ vector<OpenCLPlatformDevice> usable_devices;
+ opencl_get_usable_devices(&usable_devices);
+ /* Devices are numbered consecutively across platforms. */
+ int num_devices = 0;
+ foreach(OpenCLPlatformDevice& platform_device, usable_devices) {
+ cl_platform_id platform_id = platform_device.platform_id;
+ cl_device_id device_id = platform_device.device_id;
+ /* We always increment the device number, so there;s 1:1 mapping from
+ * info.num to indexinside usable_devices vector.
+ */
+ ++num_devices;
+ char platform_name[256];
+ if(clGetPlatformInfo(platform_id,
+ CL_PLATFORM_NAME,
+ sizeof(platform_name),
+ &platform_name,
+ NULL) != CL_SUCCESS)
+ {
continue;
-
- device_ids.resize(num_devices);
-
- if(clGetDeviceIDs(platform_ids[platform], opencl_device_type(), num_devices, &device_ids[0], NULL) != CL_SUCCESS)
+ }
+ char device_name[1024] = "\0";
+ if(clGetDeviceInfo(device_id,
+ CL_DEVICE_NAME,
+ sizeof(device_name),
+ &device_name,
+ NULL) != CL_SUCCESS)
+ {
+ continue;
+ }
+ cl_device_type device_type;
+ if(clGetDeviceInfo(device_id,
+ CL_DEVICE_TYPE,
+ sizeof(cl_device_type),
+ &device_type,
+ NULL) != CL_SUCCESS)
+ {
continue;
+ }
+ DeviceInfo info;
+ info.type = DEVICE_OPENCL;
+ info.description = string_remove_trademark(string(device_name));
+ info.num = num_devices - 1;
+ info.id = string_printf("OPENCL_%d", info.num);
+ /* We don't know if it's used for display, but assume it is. */
+ info.display_device = true;
+ info.advanced_shading = opencl_kernel_use_advanced_shading(platform_name);
+ info.pack_images = true;
+ info.use_split_kernel = opencl_kernel_use_split(platform_name,
+ device_type);
+ devices.push_back(info);
+ }
+}
- char pname[256];
- clGetPlatformInfo(platform_ids[platform], CL_PLATFORM_NAME, sizeof(pname), &pname, NULL);
- string platform_name = pname;
+string device_opencl_capabilities(void)
+{
+ string result = "";
+ string error_msg = ""; /* Only used by opencl_assert(), but in the future
+ * it could also be nicely reported to the console.
+ */
+ cl_uint num_platforms = 0;
+ opencl_assert(clGetPlatformIDs(0, NULL, &num_platforms));
+ if(num_platforms == 0) {
+ return "No OpenCL platforms found\n";
+ }
+ result += string_printf("Number of platforms: %u\n", num_platforms);
- /* add devices */
- for(int num = 0; num < num_devices; num++) {
- cl_device_id device_id = device_ids[num];
- char name[1024] = "\0";
+ vector<cl_platform_id> platform_ids;
+ platform_ids.resize(num_platforms);
+ opencl_assert(clGetPlatformIDs(num_platforms, &platform_ids[0], NULL));
+
+#define APPEND_STRING_INFO(func, id, name, what) \
+ do { \
+ char data[1024] = "\0"; \
+ opencl_assert(func(id, what, sizeof(data), &data, NULL)); \
+ result += string_printf("%s: %s\n", name, data); \
+ } while(false)
+#define APPEND_PLATFORM_STRING_INFO(id, name, what) \
+ APPEND_STRING_INFO(clGetPlatformInfo, id, "\tPlatform " name, what)
+#define APPEND_DEVICE_STRING_INFO(id, name, what) \
+ APPEND_STRING_INFO(clGetDeviceInfo, id, "\t\t\tDevice " name, what)
- if(clGetDeviceInfo(device_id, CL_DEVICE_NAME, sizeof(name), &name, NULL) != CL_SUCCESS)
- continue;
+ vector<cl_device_id> device_ids;
+ for (cl_uint platform = 0; platform < num_platforms; ++platform) {
+ cl_platform_id platform_id = platform_ids[platform];
+
+ result += string_printf("Platform #%u\n", platform);
- DeviceInfo info;
+ APPEND_PLATFORM_STRING_INFO(platform_id, "Name", CL_PLATFORM_NAME);
+ APPEND_PLATFORM_STRING_INFO(platform_id, "Vendor", CL_PLATFORM_VENDOR);
+ APPEND_PLATFORM_STRING_INFO(platform_id, "Version", CL_PLATFORM_VERSION);
+ APPEND_PLATFORM_STRING_INFO(platform_id, "Profile", CL_PLATFORM_PROFILE);
+ APPEND_PLATFORM_STRING_INFO(platform_id, "Extensions", CL_PLATFORM_EXTENSIONS);
- info.type = DEVICE_OPENCL;
- info.description = string(name);
- info.num = num_base + num;
- info.id = string_printf("OPENCL_%d", info.num);
- /* we don't know if it's used for display, but assume it is */
- info.display_device = true;
- info.advanced_shading = opencl_kernel_use_advanced_shading(platform_name);
- info.pack_images = true;
+ cl_uint num_devices = 0;
+ opencl_assert(clGetDeviceIDs(platform_ids[platform],
+ CL_DEVICE_TYPE_ALL,
+ 0,
+ NULL,
+ &num_devices));
+ result += string_printf("\tNumber of devices: %u\n", num_devices);
- devices.push_back(info);
+ device_ids.resize(num_devices);
+ opencl_assert(clGetDeviceIDs(platform_ids[platform],
+ CL_DEVICE_TYPE_ALL,
+ num_devices,
+ &device_ids[0],
+ NULL));
+ for (cl_uint device = 0; device < num_devices; ++device) {
+ cl_device_id device_id = device_ids[device];
+
+ result += string_printf("\t\tDevice: #%u\n", device);
+
+ APPEND_DEVICE_STRING_INFO(device_id, "Name", CL_DEVICE_NAME);
+ APPEND_DEVICE_STRING_INFO(device_id, "Vendor", CL_DEVICE_VENDOR);
+ APPEND_DEVICE_STRING_INFO(device_id, "OpenCL C Version", CL_DEVICE_OPENCL_C_VERSION);
+ APPEND_DEVICE_STRING_INFO(device_id, "Profile", CL_DEVICE_PROFILE);
+ APPEND_DEVICE_STRING_INFO(device_id, "Version", CL_DEVICE_VERSION);
+ APPEND_DEVICE_STRING_INFO(device_id, "Extensions", CL_DEVICE_EXTENSIONS);
}
}
-}
-string device_opencl_capabilities(void)
-{
- /* TODO(sergey): Not implemented yet. */
- return "";
+#undef APPEND_STRING_INFO
+#undef APPEND_PLATFORM_STRING_INFO
+#undef APPEND_DEVICE_STRING_INFO
+
+ return result;
}
CCL_NAMESPACE_END
diff --git a/intern/cycles/device/device_task.cpp b/intern/cycles/device/device_task.cpp
index 2fe2f334176..d527540f300 100644
--- a/intern/cycles/device/device_task.cpp
+++ b/intern/cycles/device/device_task.cpp
@@ -111,7 +111,7 @@ void DeviceTask::update_progress(RenderTile *rtile)
if(update_tile_sample) {
double current_time = time_dt();
- if (current_time - last_update_time >= 1.0) {
+ if(current_time - last_update_time >= 1.0) {
update_tile_sample(*rtile);
last_update_time = current_time;
diff --git a/intern/cycles/device/device_task.h b/intern/cycles/device/device_task.h
index 84945bcf9a5..834ea60988a 100644
--- a/intern/cycles/device/device_task.h
+++ b/intern/cycles/device/device_task.h
@@ -57,14 +57,15 @@ public:
void update_progress(RenderTile *rtile);
- boost::function<bool(Device *device, RenderTile&)> acquire_tile;
- boost::function<void(void)> update_progress_sample;
- boost::function<void(RenderTile&)> update_tile_sample;
- boost::function<void(RenderTile&)> release_tile;
- boost::function<bool(void)> get_cancel;
+ function<bool(Device *device, RenderTile&)> acquire_tile;
+ function<void(void)> update_progress_sample;
+ function<void(RenderTile&)> update_tile_sample;
+ function<void(RenderTile&)> release_tile;
+ function<bool(void)> get_cancel;
bool need_finish_queue;
bool integrator_branched;
+ int2 requested_tile_size;
protected:
double last_update_time;
};