diff options
author | Sergey Sharybin <sergey.vfx@gmail.com> | 2016-05-18 12:15:28 +0300 |
---|---|---|
committer | Sergey Sharybin <sergey.vfx@gmail.com> | 2016-05-18 12:15:28 +0300 |
commit | 792e147e2ce6211af704ae20082f9e3d905afe32 (patch) | |
tree | 03f4e98cec1d582e6518de80bc17db8ff1427807 /intern/cycles/kernel/kernel_volume.h | |
parent | cbe7f9dd03634a29082f51d05a2b1b71c6fc6aef (diff) |
Cycles: Fix compilation error of CUDA kernels after recent volume commit
Apparently the code path with malloc() was enabled for CUDA.
Diffstat (limited to 'intern/cycles/kernel/kernel_volume.h')
-rw-r--r-- | intern/cycles/kernel/kernel_volume.h | 18 |
1 files changed, 13 insertions, 5 deletions
diff --git a/intern/cycles/kernel/kernel_volume.h b/intern/cycles/kernel/kernel_volume.h index 224c275b03d..0af5ff50619 100644 --- a/intern/cycles/kernel/kernel_volume.h +++ b/intern/cycles/kernel/kernel_volume.h @@ -627,6 +627,11 @@ ccl_device void kernel_volume_decoupled_record(KernelGlobals *kg, PathState *sta step_size = kernel_data.integrator.volume_step_size; /* compute exact steps in advance for malloc */ max_steps = max((int)ceilf(ray->t/step_size), 1); + if(max_steps > global_max_steps) { + max_steps = global_max_steps; + step_size = ray->t / (float)max_steps; + } +#ifdef __KERNEL_CPU__ /* NOTE: For the branched path tracing it's possible to have direct * and indirect light integration both having volume segments allocated. * We detect this using index in the pre-allocated memory. Currently we @@ -640,17 +645,16 @@ ccl_device void kernel_volume_decoupled_record(KernelGlobals *kg, PathState *sta const int index = kg->decoupled_volume_steps_index; assert(index < sizeof(kg->decoupled_volume_steps) / sizeof(*kg->decoupled_volume_steps)); - if(max_steps > global_max_steps) { - max_steps = global_max_steps; - step_size = ray->t / (float)max_steps; - } if(kg->decoupled_volume_steps[index] == NULL) { kg->decoupled_volume_steps[index] = (VolumeStep*)malloc(sizeof(VolumeStep)*global_max_steps); } segment->steps = kg->decoupled_volume_steps[index]; - random_jitter_offset = lcg_step_float(&state->rng_congruential) * step_size; ++kg->decoupled_volume_steps_index; +#else + segment->steps = (VolumeStep*)malloc(sizeof(VolumeStep)*max_steps); +#endif + random_jitter_offset = lcg_step_float(&state->rng_congruential) * step_size; } else { max_steps = 1; @@ -764,12 +768,16 @@ ccl_device void kernel_volume_decoupled_record(KernelGlobals *kg, PathState *sta ccl_device void kernel_volume_decoupled_free(KernelGlobals *kg, VolumeSegment *segment) { if(segment->steps != &segment->stack_step) { +#ifdef __KERNEL_CPU__ /* NOTE: We only allow free last allocated segment. * No random order of alloc/free is supported. */ assert(kg->decoupled_volume_steps_index > 0); assert(segment->steps == kg->decoupled_volume_steps[kg->decoupled_volume_steps_index - 1]); --kg->decoupled_volume_steps_index; +#else + free(segment->steps); +#endif } } |