From c47d669f247d4762cfeede867c43c638e40c14c3 Mon Sep 17 00:00:00 2001 From: Campbell Barton Date: Wed, 1 May 2019 21:14:11 +1000 Subject: Cleanup: comments (long lines) in cycles --- intern/cycles/blender/blender_session.cpp | 16 +++--- intern/cycles/bvh/bvh8.cpp | 52 +++++++++---------- intern/cycles/bvh/bvh8.h | 58 +++++++++++----------- intern/cycles/bvh/bvh_embree.cpp | 22 ++++---- intern/cycles/device/device.cpp | 3 +- intern/cycles/device/device_cuda.cpp | 5 +- intern/cycles/device/device_denoising.cpp | 12 +++-- intern/cycles/device/device_opencl.cpp | 4 +- intern/cycles/device/device_split_kernel.h | 2 +- intern/cycles/device/device_task.h | 3 +- intern/cycles/device/opencl/opencl.h | 4 +- intern/cycles/device/opencl/opencl_split.cpp | 8 +-- intern/cycles/kernel/bvh/obvh_shadow_all.h | 4 +- intern/cycles/kernel/bvh/obvh_traversal.h | 8 +-- intern/cycles/kernel/bvh/qbvh_nodes.h | 2 +- intern/cycles/kernel/bvh/qbvh_shadow_all.h | 2 +- .../cycles/kernel/closure/bsdf_ashikhmin_shirley.h | 19 +++---- intern/cycles/kernel/closure/bsdf_hair.h | 4 +- .../cycles/kernel/closure/bsdf_hair_principled.h | 3 +- .../cycles/kernel/closure/bsdf_microfacet_multi.h | 18 ++++--- .../kernel/closure/bsdf_microfacet_multi_impl.h | 23 +++++---- intern/cycles/kernel/closure/bsdf_util.h | 2 +- intern/cycles/kernel/closure/bssrdf.h | 6 ++- intern/cycles/kernel/filter/filter_features.h | 5 +- intern/cycles/kernel/filter/filter_features_sse.h | 4 +- intern/cycles/kernel/filter/filter_nlm_cpu.h | 3 +- intern/cycles/kernel/filter/filter_prefilter.h | 26 ++++++---- intern/cycles/kernel/filter/filter_transform.h | 8 +-- intern/cycles/kernel/filter/filter_transform_gpu.h | 8 +-- intern/cycles/kernel/filter/filter_transform_sse.h | 8 +-- intern/cycles/kernel/geom/geom_object.h | 3 +- .../cycles/kernel/geom/geom_triangle_intersect.h | 58 +++++++++++----------- intern/cycles/kernel/kernel_bake.h | 6 ++- intern/cycles/kernel/kernel_id_passes.h | 32 ++++++------ intern/cycles/kernel/kernel_light.h | 3 +- intern/cycles/kernel/kernel_montecarlo.h | 31 ++++++++---- intern/cycles/kernel/kernel_path.h | 12 ++--- intern/cycles/kernel/kernel_path_branched.h | 4 +- intern/cycles/kernel/kernel_path_surface.h | 12 +++-- intern/cycles/kernel/kernel_volume.h | 4 +- intern/cycles/kernel/kernels/cuda/kernel_config.h | 3 +- intern/cycles/kernel/osl/osl_closures.cpp | 8 +-- intern/cycles/kernel/osl/osl_services.cpp | 2 +- intern/cycles/kernel/shaders/stdosl.h | 18 +++---- intern/cycles/kernel/split/kernel_buffer_update.h | 4 +- intern/cycles/kernel/split/kernel_data_init.h | 8 +-- ..._holdout_emission_blurring_pathtermination_ao.h | 6 +-- .../kernel/split/kernel_next_iteration_setup.h | 6 +-- .../cycles/kernel/split/kernel_split_data_types.h | 11 ++-- intern/cycles/kernel/svm/svm_ao.h | 28 +++++------ intern/cycles/kernel/svm/svm_ies.h | 18 ++++--- intern/cycles/kernel/svm/svm_voronoi.h | 3 +- intern/cycles/render/camera.cpp | 6 ++- intern/cycles/render/denoising.cpp | 12 ++--- intern/cycles/render/denoising.h | 15 ++++-- intern/cycles/render/mesh.cpp | 5 +- intern/cycles/render/shader.h | 6 ++- intern/cycles/render/svm.cpp | 6 ++- intern/cycles/render/tile.cpp | 31 ++++++++---- intern/cycles/subd/subd_split.cpp | 2 +- intern/cycles/util/util_color.h | 3 +- intern/cycles/util/util_debug.h | 3 +- intern/cycles/util/util_half.h | 3 +- intern/cycles/util/util_ies.cpp | 28 ++++++----- intern/cycles/util/util_math_fast.h | 12 +++-- intern/cycles/util/util_math_intersect.h | 2 +- intern/cycles/util/util_math_matrix.h | 38 ++++++++------ intern/cycles/util/util_profiling.cpp | 3 +- intern/cycles/util/util_progress.h | 3 +- intern/cycles/util/util_task.cpp | 2 +- intern/cycles/util/util_types_float8.h | 52 +++++++++---------- intern/cycles/util/util_types_float8_impl.h | 52 +++++++++---------- 72 files changed, 504 insertions(+), 402 deletions(-) (limited to 'intern') diff --git a/intern/cycles/blender/blender_session.cpp b/intern/cycles/blender/blender_session.cpp index 29a97bf6546..3a7e5f02b1d 100644 --- a/intern/cycles/blender/blender_session.cpp +++ b/intern/cycles/blender/blender_session.cpp @@ -155,8 +155,8 @@ void BlenderSession::create_session() /* There is no single depsgraph to use for the entire render. * So we need to handle this differently. * - * We could loop over the final render result render layers in pipeline and keep Cycles unaware of multiple layers, - * or perhaps move syncing further down in the pipeline. + * We could loop over the final render result render layers in pipeline and keep Cycles unaware + * of multiple layers, or perhaps move syncing further down in the pipeline. */ /* create sync */ sync = new BlenderSync(b_engine, b_data, b_scene, scene, !background, session->progress); @@ -528,14 +528,15 @@ void BlenderSession::render(BL::Depsgraph &b_depsgraph_) /* Attempt to free all data which is held by Blender side, since at this * point we knwo that we've got everything to render current view layer. */ - /* At the moment we only free if we are not doing multi-view (or if we are rendering the last view). - * See T58142/D4239 for discussion. + /* At the moment we only free if we are not doing multi-view + * (or if we are rendering the last view). See T58142/D4239 for discussion. */ if (view_index == num_views - 1) { free_blender_memory_if_possible(); } - /* Make sure all views have different noise patterns. - hardcoded value just to make it random */ + /* Make sure all views have different noise patterns. - hardcoded value just to make it random + */ if (view_index != 0) { scene->integrator->seed += hash_int_2d(scene->integrator->seed, hash_int(view_index * 0xdeadbeef)); @@ -1057,8 +1058,9 @@ void BlenderSession::update_status_progress() } double current_time = time_dt(); - /* When rendering in a window, redraw the status at least once per second to keep the elapsed and remaining time up-to-date. - * For headless rendering, only report when something significant changes to keep the console output readable. */ + /* When rendering in a window, redraw the status at least once per second to keep the elapsed and + * remaining time up-to-date. For headless rendering, only report when something significant + * changes to keep the console output readable. */ if (status != last_status || (!headless && (current_time - last_status_time) > 1.0)) { b_engine.update_stats("", (timestatus + scene_status + status).c_str()); b_engine.update_memory_stats(mem_used, mem_peak); diff --git a/intern/cycles/bvh/bvh8.cpp b/intern/cycles/bvh/bvh8.cpp index e812d806b94..10fd01dd8d0 100644 --- a/intern/cycles/bvh/bvh8.cpp +++ b/intern/cycles/bvh/bvh8.cpp @@ -1,30 +1,30 @@ /* -* Original code Copyright 2017, Intel Corporation -* Modifications Copyright 2018, Blender Foundation. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* * Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above copyright -* notice, this list of conditions and the following disclaimer in the -* documentation and/or other materials provided with the distribution. -* * Neither the name of Intel Corporation nor the names of its contributors -* may be used to endorse or promote products derived from this software -* without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ + * Original code Copyright 2017, Intel Corporation + * Modifications Copyright 2018, Blender Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ #include "bvh/bvh8.h" diff --git a/intern/cycles/bvh/bvh8.h b/intern/cycles/bvh/bvh8.h index fc07eadcada..6292353c7d4 100644 --- a/intern/cycles/bvh/bvh8.h +++ b/intern/cycles/bvh/bvh8.h @@ -1,30 +1,30 @@ /* -* Original code Copyright 2017, Intel Corporation -* Modifications Copyright 2018, Blender Foundation. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* * Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above copyright -* notice, this list of conditions and the following disclaimer in the -* documentation and/or other materials provided with the distribution. -* * Neither the name of Intel Corporation nor the names of its contributors -* may be used to endorse or promote products derived from this software -* without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ + * Original code Copyright 2017, Intel Corporation + * Modifications Copyright 2018, Blender Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ #ifndef __BVH8_H__ #define __BVH8_H__ @@ -50,9 +50,9 @@ class Progress; #define BVH_UNALIGNED_ONODE_SIZE 28 /* BVH8 -* -* Octo BVH, with each node having eight children, to use with SIMD instructions. -*/ + * + * Octo BVH, with each node having eight children, to use with SIMD instructions. + */ class BVH8 : public BVH { protected: /* constructor */ diff --git a/intern/cycles/bvh/bvh_embree.cpp b/intern/cycles/bvh/bvh_embree.cpp index 5ef9622aba2..088ec759331 100644 --- a/intern/cycles/bvh/bvh_embree.cpp +++ b/intern/cycles/bvh/bvh_embree.cpp @@ -18,18 +18,19 @@ * It supports triangles, curves, object and deformation blur and instancing. * Not supported are thick line segments, those have no native equivalent in Embree. * They could be implemented using Embree's thick curves, at the expense of wasted memory. - * User defined intersections for Embree could also be an option, but since Embree only uses aligned BVHs - * for user geometry, this would come with reduced performance and/or higher memory usage. + * User defined intersections for Embree could also be an option, but since Embree only uses + * aligned BVHs for user geometry, this would come with reduced performance and/or higher memory + * usage. * - * Since Embree allows object to be either curves or triangles but not both, Cycles object IDs are maapped - * to Embree IDs by multiplying by two and adding one for curves. + * Since Embree allows object to be either curves or triangles but not both, Cycles object IDs are + * maapped to Embree IDs by multiplying by two and adding one for curves. * - * This implementation shares RTCDevices between Cycles instances. Eventually each instance should get - * a separate RTCDevice to correctly keep track of memory usage. + * This implementation shares RTCDevices between Cycles instances. Eventually each instance should + * get a separate RTCDevice to correctly keep track of memory usage. * - * Vertex and index buffers are duplicated between Cycles device arrays and Embree. These could be merged, - * which would requrie changes to intersection refinement, shader setup, mesh light sampling and a few - * other places in Cycles where direct access to vertex data is required. + * Vertex and index buffers are duplicated between Cycles device arrays and Embree. These could be + * merged, which would requrie changes to intersection refinement, shader setup, mesh light + * sampling and a few other places in Cycles where direct access to vertex data is required. */ #ifdef WITH_EMBREE @@ -40,7 +41,8 @@ # include "bvh/bvh_embree.h" -/* Kernel includes are necessary so that the filter function for Embree can access the packed BVH. */ +/* Kernel includes are necessary so that the filter function for Embree can access the packed BVH. + */ # include "kernel/bvh/bvh_embree.h" # include "kernel/kernel_compat_cpu.h" # include "kernel/split/kernel_split_data_types.h" diff --git a/intern/cycles/device/device.cpp b/intern/cycles/device/device.cpp index 16a68e8b855..47d111802cd 100644 --- a/intern/cycles/device/device.cpp +++ b/intern/cycles/device/device.cpp @@ -287,7 +287,8 @@ void Device::draw_pixels(device_memory &rgba, } glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer); - /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */ + /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered + */ glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW); float *vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY); diff --git a/intern/cycles/device/device_cuda.cpp b/intern/cycles/device/device_cuda.cpp index 936e1dccfa6..cac1edb188e 100644 --- a/intern/cycles/device/device_cuda.cpp +++ b/intern/cycles/device/device_cuda.cpp @@ -1019,7 +1019,7 @@ class CUDADevice : public Device { size_t bytes; cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name)); - //assert(bytes == size); + // assert(bytes == size); cuda_assert(cuMemcpyHtoD(mem, host, size)); } @@ -2127,7 +2127,8 @@ class CUDADevice : public Device { } glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer); - /* invalidate old contents - avoids stalling if buffer is still waiting in queue to be rendered */ + /* invalidate old contents - + * avoids stalling if buffer is still waiting in queue to be rendered */ glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(float), NULL, GL_STREAM_DRAW); vpointer = (float *)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY); diff --git a/intern/cycles/device/device_denoising.cpp b/intern/cycles/device/device_denoising.cpp index 05a7fb8ae4d..55548c98b97 100644 --- a/intern/cycles/device/device_denoising.cpp +++ b/intern/cycles/device/device_denoising.cpp @@ -104,7 +104,8 @@ void DenoisingTask::set_render_buffer(RenderTile *rtiles) void DenoisingTask::setup_denoising_buffer() { - /* Expand filter_area by radius pixels and clamp the result to the extent of the neighboring tiles */ + /* Expand filter_area by radius pixels and clamp the result to the extent of the neighboring + * tiles */ rect = rect_from_shape(filter_area.x, filter_area.y, filter_area.z, filter_area.w); rect = rect_expand(rect, radius); rect = rect_clip(rect, @@ -149,16 +150,19 @@ void DenoisingTask::prefilter_shadowing() device_sub_ptr buffer_var(buffer.mem, 5 * buffer.pass_stride, buffer.pass_stride); device_sub_ptr filtered_var(buffer.mem, 6 * buffer.pass_stride, buffer.pass_stride); - /* Get the A/B unfiltered passes, the combined sample variance, the estimated variance of the sample variance and the buffer variance. */ + /* Get the A/B unfiltered passes, the combined sample variance, the estimated variance of the + * sample variance and the buffer variance. */ functions.divide_shadow(*unfiltered_a, *unfiltered_b, *sample_var, *sample_var_var, *buffer_var); - /* Smooth the (generally pretty noisy) buffer variance using the spatial information from the sample variance. */ + /* Smooth the (generally pretty noisy) buffer variance using the spatial information from the + * sample variance. */ nlm_state.set_parameters(6, 3, 4.0f, 1.0f, false); functions.non_local_means(*buffer_var, *sample_var, *sample_var_var, *filtered_var); /* Reuse memory, the previous data isn't needed anymore. */ device_ptr filtered_a = *buffer_var, filtered_b = *sample_var; - /* Use the smoothed variance to filter the two shadow half images using each other for weight calculation. */ + /* Use the smoothed variance to filter the two shadow half images using each other for weight + * calculation. */ nlm_state.set_parameters(5, 3, 1.0f, 0.25f, false); functions.non_local_means(*unfiltered_a, *unfiltered_b, *filtered_var, filtered_a); functions.non_local_means(*unfiltered_b, *unfiltered_a, *filtered_var, filtered_b); diff --git a/intern/cycles/device/device_opencl.cpp b/intern/cycles/device/device_opencl.cpp index 99a8d2438d6..b07596c60ff 100644 --- a/intern/cycles/device/device_opencl.cpp +++ b/intern/cycles/device/device_opencl.cpp @@ -136,8 +136,8 @@ string device_opencl_capabilities() } string result = ""; string error_msg = ""; /* Only used by opencl_assert(), but in the future - * it could also be nicely reported to the console. - */ + * it could also be nicely reported to the console. + */ cl_uint num_platforms = 0; opencl_assert(device_opencl_get_num_platforms_safe(&num_platforms)); if (num_platforms == 0) { diff --git a/intern/cycles/device/device_split_kernel.h b/intern/cycles/device/device_split_kernel.h index c9fb2ac844f..6ff326bf214 100644 --- a/intern/cycles/device/device_split_kernel.h +++ b/intern/cycles/device/device_split_kernel.h @@ -27,7 +27,7 @@ CCL_NAMESPACE_BEGIN * Since some bytes may be needed for aligning chunks of memory; * This is the amount of memory that we dedicate for that purpose. */ -#define DATA_ALLOCATION_MEM_FACTOR 5000000 //5MB +#define DATA_ALLOCATION_MEM_FACTOR 5000000 // 5MB /* Types used for split kernel */ diff --git a/intern/cycles/device/device_task.h b/intern/cycles/device/device_task.h index 5cc2e5e25db..a04062ed4ef 100644 --- a/intern/cycles/device/device_task.h +++ b/intern/cycles/device/device_task.h @@ -40,7 +40,8 @@ class DenoiseParams { float strength; /* Preserve more or less detail based on feature passes. */ float feature_strength; - /* When removing pixels that don't carry information, use a relative threshold instead of an absolute one. */ + /* When removing pixels that don't carry information, + * use a relative threshold instead of an absolute one. */ bool relative_pca; /* How many frames before and after the current center frame are included. */ int neighbor_frames; diff --git a/intern/cycles/device/opencl/opencl.h b/intern/cycles/device/opencl/opencl.h index e7bafa0b8a8..70773902790 100644 --- a/intern/cycles/device/opencl/opencl.h +++ b/intern/cycles/device/opencl/opencl.h @@ -358,8 +358,8 @@ class OpenCLDevice : public Device { OpenCLSplitPrograms(OpenCLDevice *device); ~OpenCLSplitPrograms(); - /* Load the kernels and put the created kernels in the given `programs` - * paramter. */ + /* Load the kernels and put the created kernels in the given + * `programs` paramter. */ void load_kernels(vector &programs, const DeviceRequestedFeatures &requested_features, bool is_preview = false); diff --git a/intern/cycles/device/opencl/opencl_split.cpp b/intern/cycles/device/opencl/opencl_split.cpp index 70b1a643044..442b92100bb 100644 --- a/intern/cycles/device/opencl/opencl_split.cpp +++ b/intern/cycles/device/opencl/opencl_split.cpp @@ -265,7 +265,7 @@ void OpenCLDevice::OpenCLSplitPrograms::load_kernels( ADD_SPLIT_KERNEL_PROGRAM(shader_eval); /* Quick kernels bundled in a single program to reduce overhead of starting - * Blender processes. */ + * Blender processes. */ program_split = OpenCLDevice::OpenCLProgram( device, "split_bundle", @@ -668,7 +668,8 @@ OpenCLDevice::OpenCLDevice(DeviceInfo &info, Stats &stats, Profiler &profiler, b return; } - /* Allocate this right away so that texture_info is placed at offset 0 in the device memory buffers */ + /* Allocate this right away so that texture_info + * is placed at offset 0 in the device memory buffers. */ texture_info.resize(1); memory_manager.alloc("texture_info", texture_info); @@ -1149,7 +1150,8 @@ void OpenCLDevice::tex_alloc(device_memory &mem) << string_human_readable_size(mem.memory_size()) << ")"; memory_manager.alloc(mem.name, mem); - /* Set the pointer to non-null to keep code that inspects its value from thinking its unallocated. */ + /* Set the pointer to non-null to keep code that inspects its value from thinking its + * unallocated. */ mem.device_pointer = 1; textures[mem.name] = &mem; textures_need_update = true; diff --git a/intern/cycles/kernel/bvh/obvh_shadow_all.h b/intern/cycles/kernel/bvh/obvh_shadow_all.h index 4bae519578a..b7ab75b723c 100644 --- a/intern/cycles/kernel/bvh/obvh_shadow_all.h +++ b/intern/cycles/kernel/bvh/obvh_shadow_all.h @@ -431,7 +431,7 @@ ccl_device bool BVH_FUNCTION_FULL_NAME(OBVH)(KernelGlobals *kg, } prim_addr++; - } //while + } // while } else { kernel_assert((kernel_tex_fetch(__prim_type, (prim_addr)) & PRIMITIVE_ALL) == @@ -568,7 +568,7 @@ ccl_device bool BVH_FUNCTION_FULL_NAME(OBVH)(KernelGlobals *kg, } prim_addr++; - } //while prim + } // while prim } } #if BVH_FEATURE(BVH_INSTANCING) diff --git a/intern/cycles/kernel/bvh/obvh_traversal.h b/intern/cycles/kernel/bvh/obvh_traversal.h index b24e9977ffd..9095233f8b6 100644 --- a/intern/cycles/kernel/bvh/obvh_traversal.h +++ b/intern/cycles/kernel/bvh/obvh_traversal.h @@ -333,8 +333,8 @@ ccl_device bool BVH_FUNCTION_FULL_NAME(OBVH)(KernelGlobals *kg, } /* Eight children are hit, push all onto stack and sort 8 - * stack items, continue with closest child. - */ + * stack items, continue with closest child. + */ r = __bscf(child_mask); int c7 = __float_as_int(cnodes[r]); float d7 = ((float *)&dist)[r]; @@ -409,7 +409,7 @@ ccl_device bool BVH_FUNCTION_FULL_NAME(OBVH)(KernelGlobals *kg, return true; } } - } //for + } // for } else { kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type); @@ -430,7 +430,7 @@ ccl_device bool BVH_FUNCTION_FULL_NAME(OBVH)(KernelGlobals *kg, return true; } } - } //prim count + } // prim count break; } #if BVH_FEATURE(BVH_MOTION) diff --git a/intern/cycles/kernel/bvh/qbvh_nodes.h b/intern/cycles/kernel/bvh/qbvh_nodes.h index bf2389eebad..070406fb18a 100644 --- a/intern/cycles/kernel/bvh/qbvh_nodes.h +++ b/intern/cycles/kernel/bvh/qbvh_nodes.h @@ -127,7 +127,7 @@ ccl_device_inline void qbvh_stack_sort(QBVHStackItem *ccl_restrict s1, /* Axis-aligned nodes intersection */ -//ccl_device_inline int qbvh_aligned_node_intersect(KernelGlobals *ccl_restrict kg, +// ccl_device_inline int qbvh_aligned_node_intersect(KernelGlobals *ccl_restrict kg, static int qbvh_aligned_node_intersect(KernelGlobals *ccl_restrict kg, const ssef &isect_near, const ssef &isect_far, diff --git a/intern/cycles/kernel/bvh/qbvh_shadow_all.h b/intern/cycles/kernel/bvh/qbvh_shadow_all.h index 3845afd8969..682251bf25b 100644 --- a/intern/cycles/kernel/bvh/qbvh_shadow_all.h +++ b/intern/cycles/kernel/bvh/qbvh_shadow_all.h @@ -37,7 +37,7 @@ ccl_device bool BVH_FUNCTION_FULL_NAME(QBVH)(KernelGlobals *kg, uint *num_hits) { /* TODO(sergey): - * - Test if pushing distance on the stack helps. + * - Test if pushing distance on the stack helps. * - Likely and unlikely for if() statements. * - Test restrict attribute for pointers. */ diff --git a/intern/cycles/kernel/closure/bsdf_ashikhmin_shirley.h b/intern/cycles/kernel/closure/bsdf_ashikhmin_shirley.h index b3b1c37748d..6495ae743ab 100644 --- a/intern/cycles/kernel/closure/bsdf_ashikhmin_shirley.h +++ b/intern/cycles/kernel/closure/bsdf_ashikhmin_shirley.h @@ -85,15 +85,11 @@ ccl_device_forceinline float3 bsdf_ashikhmin_shirley_eval_reflect(const ShaderCl float HdotI = fmaxf(fabsf(dot(H, I)), 1e-6f); float HdotN = fmaxf(dot(H, N), 1e-6f); - float pump = - 1.0f / - fmaxf( - 1e-6f, - (HdotI * - fmaxf( - NdotO, - NdotI))); /* pump from original paper (first derivative disc., but cancels the HdotI in the pdf nicely) */ - /*float pump = 1.0f / fmaxf(1e-4f, ((NdotO + NdotI) * (NdotO*NdotI))); */ /* pump from d-brdf paper */ + /* pump from original paper + * (first derivative disc., but cancels the HdotI in the pdf nicely) */ + float pump = 1.0f / fmaxf(1e-6f, (HdotI * fmaxf(NdotO, NdotI))); + /* pump from d-brdf paper */ + /*float pump = 1.0f / fmaxf(1e-4f, ((NdotO + NdotI) * (NdotO*NdotI))); */ float n_x = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_x); float n_y = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_y); @@ -105,9 +101,8 @@ ccl_device_forceinline float3 bsdf_ashikhmin_shirley_eval_reflect(const ShaderCl float norm = (n_x + 1.0f) / (8.0f * M_PI_F); out = NdotO * norm * lobe * pump; - *pdf = - norm * lobe / - HdotI; /* this is p_h / 4(H.I) (conversion from 'wh measure' to 'wi measure', eq. 8 in paper) */ + /* this is p_h / 4(H.I) (conversion from 'wh measure' to 'wi measure', eq. 8 in paper). */ + *pdf = norm * lobe / HdotI; } else { /* anisotropic */ diff --git a/intern/cycles/kernel/closure/bsdf_hair.h b/intern/cycles/kernel/closure/bsdf_hair.h index 6b2a9a97d30..4b6f5b3b439 100644 --- a/intern/cycles/kernel/closure/bsdf_hair.h +++ b/intern/cycles/kernel/closure/bsdf_hair.h @@ -224,7 +224,7 @@ ccl_device int bsdf_hair_reflection_sample(const ShaderClosure *sc, fast_sincosf(phi, &sinphi, &cosphi); *omega_in = (cosphi * costheta_i) * locy - (sinphi * costheta_i) * locx + (sintheta_i)*Tg; - //differentials - TODO: find a better approximation for the reflective bounce + // differentials - TODO: find a better approximation for the reflective bounce #ifdef __RAY_DIFFERENTIALS__ *domega_in_dx = 2 * dot(locy, dIdx) * locy - dIdx; *domega_in_dy = 2 * dot(locy, dIdy) * locy - dIdy; @@ -285,7 +285,7 @@ ccl_device int bsdf_hair_transmission_sample(const ShaderClosure *sc, fast_sincosf(phi, &sinphi, &cosphi); *omega_in = (cosphi * costheta_i) * locy - (sinphi * costheta_i) * locx + (sintheta_i)*Tg; - //differentials - TODO: find a better approximation for the transmission bounce + // differentials - TODO: find a better approximation for the transmission bounce #ifdef __RAY_DIFFERENTIALS__ *domega_in_dx = 2 * dot(locy, dIdx) * locy - dIdx; *domega_in_dy = 2 * dot(locy, dIdy) * locy - dIdy; diff --git a/intern/cycles/kernel/closure/bsdf_hair_principled.h b/intern/cycles/kernel/closure/bsdf_hair_principled.h index a4bba2fbf6c..4db5a6cc830 100644 --- a/intern/cycles/kernel/closure/bsdf_hair_principled.h +++ b/intern/cycles/kernel/closure/bsdf_hair_principled.h @@ -60,7 +60,8 @@ ccl_device_inline float cos_from_sin(const float s) return safe_sqrtf(1.0f - s * s); } -/* Gives the change in direction in the normal plane for the given angles and p-th-order scattering. */ +/* Gives the change in direction in the normal plane for the given angles and p-th-order + * scattering. */ ccl_device_inline float delta_phi(int p, float gamma_o, float gamma_t) { return 2.0f * p * gamma_t - 2.0f * gamma_o + p * M_PI_F; diff --git a/intern/cycles/kernel/closure/bsdf_microfacet_multi.h b/intern/cycles/kernel/closure/bsdf_microfacet_multi.h index 2cc1a9c5299..07be33ee6b5 100644 --- a/intern/cycles/kernel/closure/bsdf_microfacet_multi.h +++ b/intern/cycles/kernel/closure/bsdf_microfacet_multi.h @@ -16,7 +16,8 @@ CCL_NAMESPACE_BEGIN -/* Most of the code is based on the supplemental implementations from https://eheitzresearch.wordpress.com/240-2/. */ +/* Most of the code is based on the supplemental implementations from + * https://eheitzresearch.wordpress.com/240-2/. */ /* === GGX Microfacet distribution functions === */ @@ -80,7 +81,8 @@ ccl_device_forceinline float2 mf_sampleP22_11(const float cosI, return make_float2(slopeX, -slopeY); } -/* Visible normal sampling for the GGX distribution (based on page 7 of the supplemental implementation). */ +/* Visible normal sampling for the GGX distribution + * (based on page 7 of the supplemental implementation). */ ccl_device_forceinline float3 mf_sample_vndf(const float3 wi, const float2 alpha, const float randx, @@ -134,7 +136,8 @@ ccl_device_forceinline float3 mf_eval_phase_glossy(const float3 w, return make_float3(phase, phase, phase); } -/* Phase function for dielectric transmissive materials, including both reflection and refraction according to the dielectric fresnel term. */ +/* Phase function for dielectric transmissive materials, including both reflection and refraction + * according to the dielectric fresnel term. */ ccl_device_forceinline float3 mf_sample_phase_glass( const float3 wi, const float eta, const float3 wm, const float randV, bool *outside) { @@ -227,7 +230,8 @@ ccl_device_forceinline float mf_G1(const float3 w, const float C1, const float l return powf(C1, lambda); } -/* Sampling from the visible height distribution (based on page 17 of the supplemental implementation). */ +/* Sampling from the visible height distribution (based on page 17 of the supplemental + * implementation). */ ccl_device_forceinline bool mf_sample_height( const float3 w, float *h, float *C1, float *G1, float *lambda, const float U) { @@ -254,7 +258,8 @@ ccl_device_forceinline bool mf_sample_height( } /* === PDF approximations for the different phase functions. === - * As explained in bsdf_microfacet_multi_impl.h, using approximations with MIS still produces an unbiased result. */ + * As explained in bsdf_microfacet_multi_impl.h, using approximations with MIS still produces an + * unbiased result. */ /* Approximation for the albedo of the single-scattering GGX distribution, * the missing energy is then approximated as a diffuse reflection for the PDF. */ @@ -342,7 +347,8 @@ ccl_device_forceinline float mf_glass_pdf(const float3 wi, } } -/* === Actual random walk implementations, one version of mf_eval and mf_sample per phase function. === */ +/* === Actual random walk implementations === */ +/* One version of mf_eval and mf_sample per phase function. */ #define MF_NAME_JOIN(x, y) x##_##y #define MF_NAME_EVAL(x, y) MF_NAME_JOIN(x, y) diff --git a/intern/cycles/kernel/closure/bsdf_microfacet_multi_impl.h b/intern/cycles/kernel/closure/bsdf_microfacet_multi_impl.h index 79247ee8057..04d9b22d7d2 100644 --- a/intern/cycles/kernel/closure/bsdf_microfacet_multi_impl.h +++ b/intern/cycles/kernel/closure/bsdf_microfacet_multi_impl.h @@ -16,14 +16,14 @@ /* Evaluate the BSDF from wi to wo. * Evaluation is split into the analytical single-scattering BSDF and the multi-scattering BSDF, - * which is evaluated stochastically through a random walk. At each bounce (except for the first one), - * the amount of reflection from here towards wo is evaluated before bouncing again. + * which is evaluated stochastically through a random walk. At each bounce (except for the first + * one), the amount of reflection from here towards wo is evaluated before bouncing again. * - * Because of the random walk, the evaluation is not deterministic, but its expected value is equal to - * the correct BSDF, which is enough for Monte-Carlo rendering. The PDF also can't be determined - * analytically, so the single-scattering PDF plus a diffuse term to account for the multi-scattered - * energy is used. In combination with MIS, that is enough to produce an unbiased result, although - * the balance heuristic isn't necessarily optimal anymore. + * Because of the random walk, the evaluation is not deterministic, but its expected value is equal + * to the correct BSDF, which is enough for Monte-Carlo rendering. The PDF also can't be determined + * analytically, so the single-scattering PDF plus a diffuse term to account for the + * multi-scattered energy is used. In combination with MIS, that is enough to produce an unbiased + * result, although the balance heuristic isn't necessarily optimal anymore. */ ccl_device_forceinline float3 MF_FUNCTION_FULL_NAME(mf_eval)(float3 wi, float3 wo, @@ -36,7 +36,8 @@ ccl_device_forceinline float3 MF_FUNCTION_FULL_NAME(mf_eval)(float3 wi, bool use_fresnel, const float3 cspec0) { - /* Evaluating for a shallower incoming direction produces less noise, and the properties of the BSDF guarantee reciprocity. */ + /* Evaluating for a shallower incoming direction produces less noise, and the properties of the + * BSDF guarantee reciprocity. */ bool swapped = false; #ifdef MF_MULTI_GLASS if (wi.z * wo.z < 0.0f) { @@ -180,9 +181,9 @@ ccl_device_forceinline float3 MF_FUNCTION_FULL_NAME(mf_eval)(float3 wi, return eval; } -/* Perform a random walk on the microsurface starting from wi, returning the direction in which the walk - * escaped the surface in wo. The function returns the throughput between wi and wo. - * Without reflection losses due to coloring or fresnel absorption in conductors, the sampling is optimal. +/* Perform a random walk on the microsurface starting from wi, returning the direction in which the + * walk escaped the surface in wo. The function returns the throughput between wi and wo. Without + * reflection losses due to coloring or fresnel absorption in conductors, the sampling is optimal. */ ccl_device_forceinline float3 MF_FUNCTION_FULL_NAME(mf_sample)(float3 wi, float3 *wo, diff --git a/intern/cycles/kernel/closure/bsdf_util.h b/intern/cycles/kernel/closure/bsdf_util.h index a9a27edd7de..3bce47caedb 100644 --- a/intern/cycles/kernel/closure/bsdf_util.h +++ b/intern/cycles/kernel/closure/bsdf_util.h @@ -155,7 +155,7 @@ interpolate_fresnel_color(float3 L, float3 H, float ior, float F0, float3 cspec0 /* Calculate the fresnel interpolation factor * The value from fresnel_dielectric_cos(...) has to be normalized because * the cspec0 keeps the F0 color - */ + */ float F0_norm = 1.0f / (1.0f - F0); float FH = (fresnel_dielectric_cos(dot(L, H), ior) - F0) * F0_norm; diff --git a/intern/cycles/kernel/closure/bssrdf.h b/intern/cycles/kernel/closure/bssrdf.h index 57804eca269..a7d9f90b443 100644 --- a/intern/cycles/kernel/closure/bssrdf.h +++ b/intern/cycles/kernel/closure/bssrdf.h @@ -450,7 +450,8 @@ ccl_device void bssrdf_sample(const ShaderClosure *sc, float xi, float *r, float else if (bssrdf->type == CLOSURE_BSSRDF_GAUSSIAN_ID) { bssrdf_gaussian_sample(radius, xi, r, h); } - else { /*if(bssrdf->type == CLOSURE_BSSRDF_BURLEY_ID || bssrdf->type == CLOSURE_BSSRDF_PRINCIPLED_ID)*/ + else { /* if (bssrdf->type == CLOSURE_BSSRDF_BURLEY_ID || + * bssrdf->type == CLOSURE_BSSRDF_PRINCIPLED_ID) */ bssrdf_burley_sample(radius, xi, r, h); } } @@ -466,7 +467,8 @@ ccl_device float bssrdf_channel_pdf(const Bssrdf *bssrdf, float radius, float r) else if (bssrdf->type == CLOSURE_BSSRDF_GAUSSIAN_ID) { return bssrdf_gaussian_pdf(radius, r); } - else { /*if(bssrdf->type == CLOSURE_BSSRDF_BURLEY_ID || bssrdf->type == CLOSURE_BSSRDF_PRINCIPLED_ID)*/ + else { /* if (bssrdf->type == CLOSURE_BSSRDF_BURLEY_ID || + * bssrdf->type == CLOSURE_BSSRDF_PRINCIPLED_ID)*/ return bssrdf_burley_pdf(radius, r); } } diff --git a/intern/cycles/kernel/filter/filter_features.h b/intern/cycles/kernel/filter/filter_features.h index 809ccfe8be6..8a2af957146 100644 --- a/intern/cycles/kernel/filter/filter_features.h +++ b/intern/cycles/kernel/filter/filter_features.h @@ -18,8 +18,9 @@ CCL_NAMESPACE_BEGIN #define ccl_get_feature(buffer, pass) (buffer)[(pass)*pass_stride] -/* Loop over the pixels in the range [low.x, high.x) x [low.y, high.y).+ * pixel_buffer always points to the current pixel in the first pass. - * Repeat the loop for every secondary frame if there are any. */ +/* Loop over the pixels in the range [low.x, high.x) x [low.y, high.y).+ * pixel_buffer always + * points to the current pixel in the first pass. Repeat the loop for every secondary frame if + * there are any. */ #define FOR_PIXEL_WINDOW \ for (int frame = 0; frame < tile_info->num_frames; frame++) { \ pixel.z = tile_info->frames[frame]; \ diff --git a/intern/cycles/kernel/filter/filter_features_sse.h b/intern/cycles/kernel/filter/filter_features_sse.h index 1e0d6e93453..7bbd17066fd 100644 --- a/intern/cycles/kernel/filter/filter_features_sse.h +++ b/intern/cycles/kernel/filter/filter_features_sse.h @@ -20,8 +20,8 @@ CCL_NAMESPACE_BEGIN /* Loop over the pixels in the range [low.x, high.x) x [low.y, high.y), 4 at a time. * pixel_buffer always points to the first of the 4 current pixel in the first pass. - * x4 and y4 contain the coordinates of the four pixels, active_pixels contains a mask that's set for all pixels within the window. - * Repeat the loop for every secondary frame if there are any. */ + * x4 and y4 contain the coordinates of the four pixels, active_pixels contains a mask that's set + * for all pixels within the window. Repeat the loop for every secondary frame if there are any. */ #define FOR_PIXEL_WINDOW_SSE \ for (int frame = 0; frame < tile_info->num_frames; frame++) { \ pixel.z = tile_info->frames[frame]; \ diff --git a/intern/cycles/kernel/filter/filter_nlm_cpu.h b/intern/cycles/kernel/filter/filter_nlm_cpu.h index a94266a8786..24200c29203 100644 --- a/intern/cycles/kernel/filter/filter_nlm_cpu.h +++ b/intern/cycles/kernel/filter/filter_nlm_cpu.h @@ -197,7 +197,8 @@ ccl_device_inline void kernel_filter_nlm_construct_gramian(int dx, bool use_time) { int4 clip_area = rect_clip(rect, filter_window); - /* fy and fy are in filter-window-relative coordinates, while x and y are in feature-window-relative coordinates. */ + /* fy and fy are in filter-window-relative coordinates, + * while x and y are in feature-window-relative coordinates. */ for (int y = clip_area.y; y < clip_area.w; y++) { for (int x = clip_area.x; x < clip_area.z; x++) { const int low = max(rect.x, x - f); diff --git a/intern/cycles/kernel/filter/filter_prefilter.h b/intern/cycles/kernel/filter/filter_prefilter.h index 8211311313d..b48a3f3f68b 100644 --- a/intern/cycles/kernel/filter/filter_prefilter.h +++ b/intern/cycles/kernel/filter/filter_prefilter.h @@ -16,14 +16,19 @@ CCL_NAMESPACE_BEGIN -/* First step of the shadow prefiltering, performs the shadow division and stores all data +/** + * First step of the shadow prefiltering, performs the shadow division and stores all data * in a nice and easy rectangular array that can be passed to the NLM filter. * * Calculates: - * unfiltered: Contains the two half images of the shadow feature pass - * sampleVariance: The sample-based variance calculated in the kernel. Note: This calculation is biased in general, and especially here since the variance of the ratio can only be approximated. - * sampleVarianceV: Variance of the sample variance estimation, quite noisy (since it's essentially the buffer variance of the two variance halves) - * bufferVariance: The buffer-based variance of the shadow feature. Unbiased, but quite noisy. + * \param unfiltered: Contains the two half images of the shadow feature pass + * \param sampleVariance: The sample-based variance calculated in the kernel. + * Note: This calculation is biased in general, + * and especially here since the variance of the ratio can only be approximated. + * \param sampleVarianceV: Variance of the sample variance estimation, quite noisy + * (since it's essentially the buffer variance of the two variance halves) + * \param bufferVariance: The buffer-based variance of the shadow feature. + * Unbiased, but quite noisy. */ ccl_device void kernel_filter_divide_shadow(int sample, CCL_FILTER_TILE_INFO, @@ -204,10 +209,10 @@ ccl_device void kernel_filter_detect_outliers(int x, if (L > ref) { /* The pixel appears to be an outlier. - * However, it may just be a legitimate highlight. Therefore, it is checked how likely it is that the pixel - * should actually be at the reference value: - * If the reference is within the 3-sigma interval, the pixel is assumed to be a statistical outlier. - * Otherwise, it is very unlikely that the pixel should be darker, which indicates a legitimate highlight. + * However, it may just be a legitimate highlight. Therefore, it is checked how likely it is + * that the pixel should actually be at the reference value: If the reference is within the + * 3-sigma interval, the pixel is assumed to be a statistical outlier. Otherwise, it is very + * unlikely that the pixel should be darker, which indicates a legitimate highlight. */ if (pixel_variance < 0.0f || pixel_variance > 9.0f * max_variance) { @@ -219,7 +224,8 @@ ccl_device void kernel_filter_detect_outliers(int x, float stddev = sqrtf(pixel_variance); if (L - 3 * stddev < ref) { /* The pixel is an outlier, so negate the depth value to mark it as one. - * Also, scale its brightness down to the outlier threshold to avoid trouble with the NLM weights. */ + * Also, scale its brightness down to the outlier threshold to avoid trouble with the NLM + * weights. */ depth[idx] = -depth[idx]; float fac = ref / L; color *= fac; diff --git a/intern/cycles/kernel/filter/filter_transform.h b/intern/cycles/kernel/filter/filter_transform.h index 69e3c7c458d..585c4b33787 100644 --- a/intern/cycles/kernel/filter/filter_transform.h +++ b/intern/cycles/kernel/filter/filter_transform.h @@ -55,7 +55,8 @@ ccl_device void kernel_filter_construct_transform(const float *ccl_restrict buff math_vector_scale(feature_means, 1.0f / num_pixels, num_features); - /* === Scale the shifted feature passes to a range of [-1; 1], will be baked into the transform later. === */ + /* === Scale the shifted feature passes to a range of [-1; 1] === + * Will be baked into the transform later. */ float feature_scale[DENOISE_FEATURES]; math_vector_zero(feature_scale, num_features); @@ -69,8 +70,9 @@ ccl_device void kernel_filter_construct_transform(const float *ccl_restrict buff filter_calculate_scale(feature_scale, use_time); /* === Generate the feature transformation. === - * This transformation maps the num_features-dimentional feature space to a reduced feature (r-feature) space - * which generally has fewer dimensions. This mainly helps to prevent overfitting. */ + * This transformation maps the num_features-dimentional feature space to a reduced feature + * (r-feature) space which generally has fewer dimensions. This mainly helps to prevent + * overfitting. */ float feature_matrix[DENOISE_FEATURES * DENOISE_FEATURES]; math_matrix_zero(feature_matrix, num_features); FOR_PIXEL_WINDOW diff --git a/intern/cycles/kernel/filter/filter_transform_gpu.h b/intern/cycles/kernel/filter/filter_transform_gpu.h index 89cddfd927f..41bbadb621d 100644 --- a/intern/cycles/kernel/filter/filter_transform_gpu.h +++ b/intern/cycles/kernel/filter/filter_transform_gpu.h @@ -61,7 +61,8 @@ ccl_device void kernel_filter_construct_transform(const ccl_global float *ccl_re math_vector_scale(feature_means, 1.0f / num_pixels, num_features); - /* === Scale the shifted feature passes to a range of [-1; 1], will be baked into the transform later. === */ + /* === Scale the shifted feature passes to a range of [-1; 1] === + * Will be baked into the transform later. */ float feature_scale[DENOISE_FEATURES]; math_vector_zero(feature_scale, num_features); @@ -75,8 +76,9 @@ ccl_device void kernel_filter_construct_transform(const ccl_global float *ccl_re filter_calculate_scale(feature_scale, use_time); /* === Generate the feature transformation. === - * This transformation maps the num_features-dimentional feature space to a reduced feature (r-feature) space - * which generally has fewer dimensions. This mainly helps to prevent overfitting. */ + * This transformation maps the num_features-dimentional feature space to a reduced feature + * (r-feature) space which generally has fewer dimensions. This mainly helps to prevent + * overfitting. */ float feature_matrix[DENOISE_FEATURES * DENOISE_FEATURES]; math_matrix_zero(feature_matrix, num_features); FOR_PIXEL_WINDOW diff --git a/intern/cycles/kernel/filter/filter_transform_sse.h b/intern/cycles/kernel/filter/filter_transform_sse.h index 22397b292db..830444645d7 100644 --- a/intern/cycles/kernel/filter/filter_transform_sse.h +++ b/intern/cycles/kernel/filter/filter_transform_sse.h @@ -58,7 +58,8 @@ ccl_device void kernel_filter_construct_transform(const float *ccl_restrict buff feature_means[i] = reduce_add(feature_means[i]) * pixel_scale; } - /* === Scale the shifted feature passes to a range of [-1; 1], will be baked into the transform later. === */ + /* === Scale the shifted feature passes to a range of [-1; 1] === + * Will be baked into the transform later. */ float4 feature_scale[DENOISE_FEATURES]; math_vector_zero_sse(feature_scale, num_features); FOR_PIXEL_WINDOW_SSE @@ -72,8 +73,9 @@ ccl_device void kernel_filter_construct_transform(const float *ccl_restrict buff filter_calculate_scale_sse(feature_scale, use_time); /* === Generate the feature transformation. === - * This transformation maps the num_features-dimentional feature space to a reduced feature (r-feature) space - * which generally has fewer dimensions. This mainly helps to prevent overfitting. */ + * This transformation maps the num_features-dimentional feature space to a reduced feature + * (r-feature) space which generally has fewer dimensions. This mainly helps to prevent + * overfitting. */ float4 feature_matrix_sse[DENOISE_FEATURES * DENOISE_FEATURES]; math_matrix_zero_sse(feature_matrix_sse, num_features); FOR_PIXEL_WINDOW_SSE diff --git a/intern/cycles/kernel/geom/geom_object.h b/intern/cycles/kernel/geom/geom_object.h index 2792fd64c61..f410e6e27e2 100644 --- a/intern/cycles/kernel/geom/geom_object.h +++ b/intern/cycles/kernel/geom/geom_object.h @@ -386,7 +386,8 @@ ccl_device float3 particle_angular_velocity(KernelGlobals *kg, int particle) ccl_device_inline float3 bvh_clamp_direction(float3 dir) { - /* clamp absolute values by exp2f(-80.0f) to avoid division by zero when calculating inverse direction */ + /* clamp absolute values by exp2f(-80.0f) to avoid division by zero when calculating inverse + * direction */ #if defined(__KERNEL_SSE__) && defined(__KERNEL_SSE2__) const ssef oopes(8.271806E-25f, 8.271806E-25f, 8.271806E-25f, 0.0f); const ssef mask = _mm_cmpgt_ps(fabs(dir), oopes); diff --git a/intern/cycles/kernel/geom/geom_triangle_intersect.h b/intern/cycles/kernel/geom/geom_triangle_intersect.h index bcad03102d2..9c6fd498a80 100644 --- a/intern/cycles/kernel/geom/geom_triangle_intersect.h +++ b/intern/cycles/kernel/geom/geom_triangle_intersect.h @@ -178,7 +178,7 @@ ccl_device_inline int ray_triangle_intersect8(KernelGlobals *kg, _mm256_cmpeq_epi32(two256, UVW_256_1)); unsigned char mask_minmaxUVW_pos = _mm256_movemask_ps(_mm256_castsi256_ps(mask_minmaxUVW_256)); - if ((mask_minmaxUVW_pos & prim_num_mask) == prim_num_mask) { //all bits set + if ((mask_minmaxUVW_pos & prim_num_mask) == prim_num_mask) { // all bits set return false; } @@ -375,7 +375,7 @@ ccl_device_inline int triangle_intersect8(KernelGlobals *kg, tri_b[i] = *(__m128 *)&kg->__prim_tri_verts.data[tri_vindex++]; tri_c[i] = *(__m128 *)&kg->__prim_tri_verts.data[tri_vindex++]; } - //create 9 or 12 placeholders + // create 9 or 12 placeholders tri[0] = _mm256_castps128_ps256(tri_a[0]); //_mm256_zextps128_ps256 tri[1] = _mm256_castps128_ps256(tri_b[0]); //_mm256_zextps128_ps256 tri[2] = _mm256_castps128_ps256(tri_c[0]); //_mm256_zextps128_ps256 @@ -401,40 +401,40 @@ ccl_device_inline int triangle_intersect8(KernelGlobals *kg, } //------------------------------------------------ - //0! Xa0 Ya0 Za0 1 Xa4 Ya4 Za4 1 - //1! Xb0 Yb0 Zb0 1 Xb4 Yb4 Zb4 1 - //2! Xc0 Yc0 Zc0 1 Xc4 Yc4 Zc4 1 + // 0! Xa0 Ya0 Za0 1 Xa4 Ya4 Za4 1 + // 1! Xb0 Yb0 Zb0 1 Xb4 Yb4 Zb4 1 + // 2! Xc0 Yc0 Zc0 1 Xc4 Yc4 Zc4 1 - //3! Xa1 Ya1 Za1 1 Xa5 Ya5 Za5 1 - //4! Xb1 Yb1 Zb1 1 Xb5 Yb5 Zb5 1 - //5! Xc1 Yc1 Zc1 1 Xc5 Yc5 Zc5 1 + // 3! Xa1 Ya1 Za1 1 Xa5 Ya5 Za5 1 + // 4! Xb1 Yb1 Zb1 1 Xb5 Yb5 Zb5 1 + // 5! Xc1 Yc1 Zc1 1 Xc5 Yc5 Zc5 1 - //6! Xa2 Ya2 Za2 1 Xa6 Ya6 Za6 1 - //7! Xb2 Yb2 Zb2 1 Xb6 Yb6 Zb6 1 - //8! Xc2 Yc2 Zc2 1 Xc6 Yc6 Zc6 1 + // 6! Xa2 Ya2 Za2 1 Xa6 Ya6 Za6 1 + // 7! Xb2 Yb2 Zb2 1 Xb6 Yb6 Zb6 1 + // 8! Xc2 Yc2 Zc2 1 Xc6 Yc6 Zc6 1 - //9! Xa3 Ya3 Za3 1 Xa7 Ya7 Za7 1 - //10! Xb3 Yb3 Zb3 1 Xb7 Yb7 Zb7 1 - //11! Xc3 Yc3 Zc3 1 Xc7 Yc7 Zc7 1 + // 9! Xa3 Ya3 Za3 1 Xa7 Ya7 Za7 1 + // 10! Xb3 Yb3 Zb3 1 Xb7 Yb7 Zb7 1 + // 11! Xc3 Yc3 Zc3 1 Xc7 Yc7 Zc7 1 //"transpose" - tritmp[0] = _mm256_unpacklo_ps(tri[0], tri[3]); //0! Xa0 Xa1 Ya0 Ya1 Xa4 Xa5 Ya4 Ya5 - tritmp[1] = _mm256_unpackhi_ps(tri[0], tri[3]); //1! Za0 Za1 1 1 Za4 Za5 1 1 + tritmp[0] = _mm256_unpacklo_ps(tri[0], tri[3]); // 0! Xa0 Xa1 Ya0 Ya1 Xa4 Xa5 Ya4 Ya5 + tritmp[1] = _mm256_unpackhi_ps(tri[0], tri[3]); // 1! Za0 Za1 1 1 Za4 Za5 1 1 - tritmp[2] = _mm256_unpacklo_ps(tri[6], tri[9]); //2! Xa2 Xa3 Ya2 Ya3 Xa6 Xa7 Ya6 Ya7 - tritmp[3] = _mm256_unpackhi_ps(tri[6], tri[9]); //3! Za2 Za3 1 1 Za6 Za7 1 1 + tritmp[2] = _mm256_unpacklo_ps(tri[6], tri[9]); // 2! Xa2 Xa3 Ya2 Ya3 Xa6 Xa7 Ya6 Ya7 + tritmp[3] = _mm256_unpackhi_ps(tri[6], tri[9]); // 3! Za2 Za3 1 1 Za6 Za7 1 1 - tritmp[4] = _mm256_unpacklo_ps(tri[1], tri[4]); //4! Xb0 Xb1 Yb0 Yb1 Xb4 Xb5 Yb4 Yb5 - tritmp[5] = _mm256_unpackhi_ps(tri[1], tri[4]); //5! Zb0 Zb1 1 1 Zb4 Zb5 1 1 + tritmp[4] = _mm256_unpacklo_ps(tri[1], tri[4]); // 4! Xb0 Xb1 Yb0 Yb1 Xb4 Xb5 Yb4 Yb5 + tritmp[5] = _mm256_unpackhi_ps(tri[1], tri[4]); // 5! Zb0 Zb1 1 1 Zb4 Zb5 1 1 - tritmp[6] = _mm256_unpacklo_ps(tri[7], tri[10]); //6! Xb2 Xb3 Yb2 Yb3 Xb6 Xb7 Yb6 Yb7 - tritmp[7] = _mm256_unpackhi_ps(tri[7], tri[10]); //7! Zb2 Zb3 1 1 Zb6 Zb7 1 1 + tritmp[6] = _mm256_unpacklo_ps(tri[7], tri[10]); // 6! Xb2 Xb3 Yb2 Yb3 Xb6 Xb7 Yb6 Yb7 + tritmp[7] = _mm256_unpackhi_ps(tri[7], tri[10]); // 7! Zb2 Zb3 1 1 Zb6 Zb7 1 1 - tritmp[8] = _mm256_unpacklo_ps(tri[2], tri[5]); //8! Xc0 Xc1 Yc0 Yc1 Xc4 Xc5 Yc4 Yc5 - tritmp[9] = _mm256_unpackhi_ps(tri[2], tri[5]); //9! Zc0 Zc1 1 1 Zc4 Zc5 1 1 + tritmp[8] = _mm256_unpacklo_ps(tri[2], tri[5]); // 8! Xc0 Xc1 Yc0 Yc1 Xc4 Xc5 Yc4 Yc5 + tritmp[9] = _mm256_unpackhi_ps(tri[2], tri[5]); // 9! Zc0 Zc1 1 1 Zc4 Zc5 1 1 - tritmp[10] = _mm256_unpacklo_ps(tri[8], tri[11]); //10! Xc2 Xc3 Yc2 Yc3 Xc6 Xc7 Yc6 Yc7 - tritmp[11] = _mm256_unpackhi_ps(tri[8], tri[11]); //11! Zc2 Zc3 1 1 Zc6 Zc7 1 1 + tritmp[10] = _mm256_unpacklo_ps(tri[8], tri[11]); // 10! Xc2 Xc3 Yc2 Yc3 Xc6 Xc7 Yc6 Yc7 + tritmp[11] = _mm256_unpackhi_ps(tri[8], tri[11]); // 11! Zc2 Zc3 1 1 Zc6 Zc7 1 1 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ triA[0] = _mm256_castpd_ps( @@ -459,13 +459,13 @@ ccl_device_inline int triangle_intersect8(KernelGlobals *kg, triC[0] = _mm256_castpd_ps( _mm256_unpacklo_pd(_mm256_castps_pd(tritmp[8]), - _mm256_castps_pd(tritmp[10]))); //Xc0 Xc1 Xc2 Xc3 Xc4 Xc5 Xc6 Xc7 + _mm256_castps_pd(tritmp[10]))); // Xc0 Xc1 Xc2 Xc3 Xc4 Xc5 Xc6 Xc7 triC[1] = _mm256_castpd_ps( _mm256_unpackhi_pd(_mm256_castps_pd(tritmp[8]), - _mm256_castps_pd(tritmp[10]))); //Yc0 Yc1 Yc2 Yc3 Yc4 Yc5 Yc6 Yc7 + _mm256_castps_pd(tritmp[10]))); // Yc0 Yc1 Yc2 Yc3 Yc4 Yc5 Yc6 Yc7 triC[2] = _mm256_castpd_ps( _mm256_unpacklo_pd(_mm256_castps_pd(tritmp[9]), - _mm256_castps_pd(tritmp[11]))); //Zc0 Zc1 Zc2 Zc3 Zc4 Zc5 Zc6 Zc7 + _mm256_castps_pd(tritmp[11]))); // Zc0 Zc1 Zc2 Zc3 Zc4 Zc5 Zc6 Zc7 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ diff --git a/intern/cycles/kernel/kernel_bake.h b/intern/cycles/kernel/kernel_bake.h index 10b71bc6bdf..cd1ca5ea7ec 100644 --- a/intern/cycles/kernel/kernel_bake.h +++ b/intern/cycles/kernel/kernel_bake.h @@ -72,7 +72,8 @@ ccl_device_inline void compute_light_pass( # ifdef __SUBSURFACE__ /* sample subsurface scattering */ if ((pass_filter & BAKE_FILTER_SUBSURFACE) && (sd->flag & SD_BSSRDF)) { - /* when mixing BSSRDF and BSDF closures we should skip BSDF lighting if scattering was successful */ + /* When mixing BSSRDF and BSDF closures we should skip BSDF lighting + * if scattering was successful. */ SubsurfaceIndirectRays ss_indirect; kernel_path_subsurface_init_indirect(&ss_indirect); if (kernel_path_subsurface_scatter( @@ -123,7 +124,8 @@ ccl_device_inline void compute_light_pass( # ifdef __SUBSURFACE__ /* sample subsurface scattering */ if ((pass_filter & BAKE_FILTER_SUBSURFACE) && (sd->flag & SD_BSSRDF)) { - /* when mixing BSSRDF and BSDF closures we should skip BSDF lighting if scattering was successful */ + /* When mixing BSSRDF and BSDF closures we should skip BSDF lighting + * if scattering was successful. */ kernel_branched_path_subsurface_scatter( kg, sd, &indirect_sd, &emission_sd, &L_sample, &state, &ray, throughput); } diff --git a/intern/cycles/kernel/kernel_id_passes.h b/intern/cycles/kernel/kernel_id_passes.h index c1f4e39e5e7..1ca42e933d1 100644 --- a/intern/cycles/kernel/kernel_id_passes.h +++ b/intern/cycles/kernel/kernel_id_passes.h @@ -1,18 +1,18 @@ /* -* Copyright 2018 Blender Foundation -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2018 Blender Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ CCL_NAMESPACE_BEGIN @@ -32,7 +32,7 @@ ccl_device_inline void kernel_write_id_slots(ccl_global float *buffer, /* If the loop reaches an empty slot, the ID isn't in any slot yet - so add it! */ if (id_buffer[slot].x == ID_NONE) { /* Use an atomic to claim this slot. - * If a different thread got here first, try again from this slot on. */ + * If a different thread got here first, try again from this slot on. */ float old_id = atomic_compare_and_swap_float(buffer + slot * 2, ID_NONE, id); if (old_id != ID_NONE && old_id != id) { continue; @@ -54,7 +54,7 @@ ccl_device_inline void kernel_write_id_slots(ccl_global float *buffer, break; } /* If there already is a slot for that ID, add the weight. - * If no slot was found, add it to the last. */ + * If no slot was found, add it to the last. */ else if (id_buffer[slot].x == id || slot == num_slots - 1) { id_buffer[slot].y += weight; break; diff --git a/intern/cycles/kernel/kernel_light.h b/intern/cycles/kernel/kernel_light.h index 5e24f8dedaf..9128bfa9d95 100644 --- a/intern/cycles/kernel/kernel_light.h +++ b/intern/cycles/kernel/kernel_light.h @@ -524,7 +524,8 @@ ccl_device float background_light_pdf(KernelGlobals *kg, float3 P, float3 direct portal_pdf = background_portal_pdf(kg, P, direction, -1, &is_possible) * portal_sampling_pdf; if (!is_possible) { /* Portal sampling is not possible here because all portals point to the wrong side. - * If map sampling is possible, it would be used instead, otherwise fallback sampling is used. */ + * If map sampling is possible, it would be used instead, + * otherwise fallback sampling is used. */ if (portal_sampling_pdf == 1.0f) { return kernel_data.integrator.pdf_lights / M_4PI_F; } diff --git a/intern/cycles/kernel/kernel_montecarlo.h b/intern/cycles/kernel/kernel_montecarlo.h index a933be970c2..acd5086be3a 100644 --- a/intern/cycles/kernel/kernel_montecarlo.h +++ b/intern/cycles/kernel/kernel_montecarlo.h @@ -199,21 +199,27 @@ ccl_device float3 ensure_valid_reflection(float3 Ng, float3 I, float3 N) float NdotNg = dot(N, Ng); float3 X = normalize(N - NdotNg * Ng); + /* Keep math expressions. */ + /* clang-format off */ /* Calculate N.z and N.x in the local coordinate system. * * The goal of this computation is to find a N' that is rotated towards Ng just enough * to lift R' above the threshold (here called t), therefore dot(R', Ng) = t. * - * According to the standard reflection equation, this means that we want dot(2*dot(N', I)*N' - I, Ng) = t. + * According to the standard reflection equation, + * this means that we want dot(2*dot(N', I)*N' - I, Ng) = t. * - * Since the Z axis of our local coordinate system is Ng, dot(x, Ng) is just x.z, so we get 2*dot(N', I)*N'.z - I.z = t. + * Since the Z axis of our local coordinate system is Ng, dot(x, Ng) is just x.z, so we get + * 2*dot(N', I)*N'.z - I.z = t. * - * The rotation is simple to express in the coordinate system we formed - since N lies in the X-Z-plane, we know that - * N' will also lie in the X-Z-plane, so N'.y = 0 and therefore dot(N', I) = N'.x*I.x + N'.z*I.z . + * The rotation is simple to express in the coordinate system we formed - + * since N lies in the X-Z-plane, we know that N' will also lie in the X-Z-plane, + * so N'.y = 0 and therefore dot(N', I) = N'.x*I.x + N'.z*I.z . * * Furthermore, we want N' to be normalized, so N'.x = sqrt(1 - N'.z^2). * - * With these simplifications, we get the final equation 2*(sqrt(1 - N'.z^2)*I.x + N'.z*I.z)*N'.z - I.z = t. + * With these simplifications, + * we get the final equation 2*(sqrt(1 - N'.z^2)*I.x + N'.z*I.z)*N'.z - I.z = t. * * The only unknown here is N'.z, so we can solve for that. * @@ -227,8 +233,11 @@ ccl_device float3 ensure_valid_reflection(float3 Ng, float3 I, float3 N) * c = I.z*t + a * N'.z = +-sqrt(0.5*(+-b + c)/a) * - * Two solutions can immediately be discarded because they're negative so N' would lie in the lower hemisphere. + * Two solutions can immediately be discarded because they're negative so N' would lie in the + * lower hemisphere. */ + /* clang-format on */ + float Ix = dot(I, X), Iz = dot(I, Ng); float Ix2 = sqr(Ix), Iz2 = sqr(Iz); float a = Ix2 + Iz2; @@ -237,8 +246,9 @@ ccl_device float3 ensure_valid_reflection(float3 Ng, float3 I, float3 N) float c = Iz * threshold + a; /* Evaluate both solutions. - * In many cases one can be immediately discarded (if N'.z would be imaginary or larger than one), so check for that first. - * If no option is viable (might happen in extreme cases like N being in the wrong hemisphere), give up and return Ng. */ + * In many cases one can be immediately discarded (if N'.z would be imaginary or larger than + * one), so check for that first. If no option is viable (might happen in extreme cases like N + * being in the wrong hemisphere), give up and return Ng. */ float fac = 0.5f / a; float N1_z2 = fac * (b + c), N2_z2 = fac * (-b + c); bool valid1 = (N1_z2 > 1e-5f) && (N1_z2 <= (1.0f + 1e-5f)); @@ -256,8 +266,9 @@ ccl_device float3 ensure_valid_reflection(float3 Ng, float3 I, float3 N) valid1 = (R1 >= 1e-5f); valid2 = (R2 >= 1e-5f); if (valid1 && valid2) { - /* If both solutions are valid, return the one with the shallower reflection since it will be closer to the input - * (if the original reflection wasn't shallow, we would not be in this part of the function). */ + /* If both solutions are valid, return the one with the shallower reflection since it will be + * closer to the input (if the original reflection wasn't shallow, we would not be in this + * part of the function). */ N_new = (R1 < R2) ? N1 : N2; } else { diff --git a/intern/cycles/kernel/kernel_path.h b/intern/cycles/kernel/kernel_path.h index aa9ce3621c7..f3e2a8a234a 100644 --- a/intern/cycles/kernel/kernel_path.h +++ b/intern/cycles/kernel/kernel_path.h @@ -437,8 +437,8 @@ ccl_device void kernel_path_indirect(KernelGlobals *kg, } /* path termination. this is a strange place to put the termination, it's - * mainly due to the mixed in MIS that we use. gives too many unneeded - * shader evaluations, only need emission if we are going to terminate */ + * mainly due to the mixed in MIS that we use. gives too many unneeded + * shader evaluations, only need emission if we are going to terminate */ float probability = path_state_continuation_probability(kg, state, throughput); if (probability == 0.0f) { @@ -464,7 +464,7 @@ ccl_device void kernel_path_indirect(KernelGlobals *kg, # ifdef __SUBSURFACE__ /* bssrdf scatter to a different location on the same object, replacing - * the closures with a diffuse BSDF */ + * the closures with a diffuse BSDF */ if (sd->flag & SD_BSSRDF) { if (kernel_path_subsurface_scatter( kg, sd, emission_sd, L, state, ray, &throughput, &ss_indirect)) { @@ -575,8 +575,8 @@ ccl_device_forceinline void kernel_path_integrate(KernelGlobals *kg, } /* path termination. this is a strange place to put the termination, it's - * mainly due to the mixed in MIS that we use. gives too many unneeded - * shader evaluations, only need emission if we are going to terminate */ + * mainly due to the mixed in MIS that we use. gives too many unneeded + * shader evaluations, only need emission if we are going to terminate */ float probability = path_state_continuation_probability(kg, state, throughput); if (probability == 0.0f) { @@ -601,7 +601,7 @@ ccl_device_forceinline void kernel_path_integrate(KernelGlobals *kg, # ifdef __SUBSURFACE__ /* bssrdf scatter to a different location on the same object, replacing - * the closures with a diffuse BSDF */ + * the closures with a diffuse BSDF */ if (sd.flag & SD_BSSRDF) { if (kernel_path_subsurface_scatter( kg, &sd, emission_sd, L, state, ray, &throughput, &ss_indirect)) { diff --git a/intern/cycles/kernel/kernel_path_branched.h b/intern/cycles/kernel/kernel_path_branched.h index e8ce61024b3..f3a1ea3f4fd 100644 --- a/intern/cycles/kernel/kernel_path_branched.h +++ b/intern/cycles/kernel/kernel_path_branched.h @@ -428,8 +428,8 @@ ccl_device void kernel_branched_path_integrate(KernelGlobals *kg, /* transparency termination */ if (state.flag & PATH_RAY_TRANSPARENT) { /* path termination. this is a strange place to put the termination, it's - * mainly due to the mixed in MIS that we use. gives too many unneeded - * shader evaluations, only need emission if we are going to terminate */ + * mainly due to the mixed in MIS that we use. gives too many unneeded + * shader evaluations, only need emission if we are going to terminate */ float probability = path_state_continuation_probability(kg, &state, throughput); if (probability == 0.0f) { diff --git a/intern/cycles/kernel/kernel_path_surface.h b/intern/cycles/kernel/kernel_path_surface.h index 6251313c5f8..a1ab4951565 100644 --- a/intern/cycles/kernel/kernel_path_surface.h +++ b/intern/cycles/kernel/kernel_path_surface.h @@ -18,7 +18,8 @@ CCL_NAMESPACE_BEGIN #if defined(__BRANCHED_PATH__) || defined(__SUBSURFACE__) || defined(__SHADOW_TRICKS__) || \ defined(__BAKING__) -/* branched path tracing: connect path directly to position on one or more lights and add it to L */ +/* branched path tracing: connect path directly to position on one or more lights and add it to L + */ ccl_device_noinline void kernel_branched_path_surface_connect_light( KernelGlobals *kg, ShaderData *sd, @@ -62,8 +63,10 @@ ccl_device_noinline void kernel_branched_path_surface_connect_light( LightSample ls; if (lamp_light_sample(kg, i, light_u, light_v, sd->P, &ls)) { - /* The sampling probability returned by lamp_light_sample assumes that all lights were sampled. - * However, this code only samples lamps, so if the scene also had mesh lights, the real probability is twice as high. */ + /* The sampling probability returned by lamp_light_sample assumes that all lights were + * sampled. + * However, this code only samples lamps, so if the scene also had mesh lights, the real + * probability is twice as high. */ if (kernel_data.integrator.pdf_triangles != 0.0f) ls.pdf *= 2.0f; @@ -109,7 +112,8 @@ ccl_device_noinline void kernel_branched_path_surface_connect_light( LightSample ls; if (light_sample(kg, light_u, light_v, sd->time, sd->P, state->bounce, &ls)) { - /* Same as above, probability needs to be corrected since the sampling was forced to select a mesh light. */ + /* Same as above, probability needs to be corrected since the sampling was forced to + * select a mesh light. */ if (kernel_data.integrator.num_all_lights) ls.pdf *= 2.0f; diff --git a/intern/cycles/kernel/kernel_volume.h b/intern/cycles/kernel/kernel_volume.h index e024003252f..1705f58b87d 100644 --- a/intern/cycles/kernel/kernel_volume.h +++ b/intern/cycles/kernel/kernel_volume.h @@ -559,7 +559,7 @@ kernel_volume_integrate_heterogeneous_distance(KernelGlobals *kg, float dt = new_t - t; /* use random position inside this segment to sample shader, - * for last shorter step we remap it to fit within the segment. */ + * for last shorter step we remap it to fit within the segment. */ if (new_t == ray->t) { step_offset *= (new_t - t) / step_size; } @@ -794,7 +794,7 @@ ccl_device void kernel_volume_decoupled_record(KernelGlobals *kg, float dt = new_t - t; /* use random position inside this segment to sample shader, - * for last shorter step we remap it to fit within the segment. */ + * for last shorter step we remap it to fit within the segment. */ if (new_t == ray->t) { step_offset *= (new_t - t) / step_size; } diff --git a/intern/cycles/kernel/kernels/cuda/kernel_config.h b/intern/cycles/kernel/kernels/cuda/kernel_config.h index d9f349837a8..3ec00762e72 100644 --- a/intern/cycles/kernel/kernels/cuda/kernel_config.h +++ b/intern/cycles/kernel/kernels/cuda/kernel_config.h @@ -61,7 +61,8 @@ /* tunable parameters */ # define CUDA_THREADS_BLOCK_WIDTH 16 -/* CUDA 9.0 seems to cause slowdowns on high-end Pascal cards unless we increase the number of registers */ +/* CUDA 9.0 seems to cause slowdowns on high-end Pascal cards unless we increase the number of + * registers */ # if __CUDACC_VER_MAJOR__ >= 9 && __CUDA_ARCH__ >= 600 # define CUDA_KERNEL_MAX_REGISTERS 64 # else diff --git a/intern/cycles/kernel/osl/osl_closures.cpp b/intern/cycles/kernel/osl/osl_closures.cpp index aa7e2727577..27205df3732 100644 --- a/intern/cycles/kernel/osl/osl_closures.cpp +++ b/intern/cycles/kernel/osl/osl_closures.cpp @@ -497,8 +497,8 @@ class MicrofacetFresnelClosure : public CBSDFClosure { MicrofacetBsdf *alloc(ShaderData *sd, int path_flag, float3 weight) { /* Technically, the MultiGGX Glass closure may also transmit. However, - * since this is set statically and only used for caustic flags, this - * is probably as good as it gets. */ + * since this is set statically and only used for caustic flags, this + * is probably as good as it gets. */ if (skip(sd, path_flag, LABEL_GLOSSY | LABEL_REFLECT)) { return NULL; } @@ -715,8 +715,8 @@ class MicrofacetMultiFresnelClosure : public CBSDFClosure { MicrofacetBsdf *alloc(ShaderData *sd, int path_flag, float3 weight) { /* Technically, the MultiGGX closure may also transmit. However, - * since this is set statically and only used for caustic flags, this - * is probably as good as it gets. */ + * since this is set statically and only used for caustic flags, this + * is probably as good as it gets. */ if (skip(sd, path_flag, LABEL_GLOSSY | LABEL_REFLECT)) { return NULL; } diff --git a/intern/cycles/kernel/osl/osl_services.cpp b/intern/cycles/kernel/osl/osl_services.cpp index e6d0016808d..6404690224a 100644 --- a/intern/cycles/kernel/osl/osl_services.cpp +++ b/intern/cycles/kernel/osl/osl_services.cpp @@ -1017,7 +1017,7 @@ bool OSLRenderServices::texture(ustring filename, PtexPtr r(ptex_cache->get(filename.c_str(), error)); if (!r) { - //std::cerr << error.c_str() << std::endl; + // std::cerr << error.c_str() << std::endl; return false; } diff --git a/intern/cycles/kernel/shaders/stdosl.h b/intern/cycles/kernel/shaders/stdosl.h index 2762b414ce4..6515d914909 100644 --- a/intern/cycles/kernel/shaders/stdosl.h +++ b/intern/cycles/kernel/shaders/stdosl.h @@ -387,14 +387,14 @@ point rotate(point p, float angle, point a, point b) vector axis = normalize(b - a); float cosang, sinang; /* Older OSX has major issues with sincos() function, - * it's likely a big in OSL or LLVM. For until we've - * updated to new versions of this libraries we'll - * use a workaround to prevent possible crashes on all - * the platforms. - * - * Shouldn't be that bad because it's mainly used for - * anisotropic shader where angle is usually constant. - */ + * it's likely a big in OSL or LLVM. For until we've + * updated to new versions of this libraries we'll + * use a workaround to prevent possible crashes on all + * the platforms. + * + * Shouldn't be that bad because it's mainly used for + * anisotropic shader where angle is usually constant. + */ #if 0 sincos(angle, sinang, cosang); #else @@ -425,7 +425,7 @@ point rotate(point p, float angle, point a, point b) normal ensure_valid_reflection(normal Ng, vector I, normal N) { /* The implementation here mirrors the one in kernel_montecarlo.h, - * check there for an explanation of the algorithm. */ + * check there for an explanation of the algorithm. */ float sqr(float x) { diff --git a/intern/cycles/kernel/split/kernel_buffer_update.h b/intern/cycles/kernel/split/kernel_buffer_update.h index e77743350dc..e37be5b405e 100644 --- a/intern/cycles/kernel/split/kernel_buffer_update.h +++ b/intern/cycles/kernel/split/kernel_buffer_update.h @@ -132,8 +132,8 @@ ccl_device void kernel_buffer_update(KernelGlobals *kg, if (ray->t != 0.0f) { /* Initialize throughput, path radiance, Ray, PathState; - * These rays proceed with path-iteration. - */ + * These rays proceed with path-iteration. + */ *throughput = make_float3(1.0f, 1.0f, 1.0f); path_radiance_init(L, kernel_data.film.use_light_pass); path_state_init(kg, diff --git a/intern/cycles/kernel/split/kernel_data_init.h b/intern/cycles/kernel/split/kernel_data_init.h index 52930843f56..2f83a10316d 100644 --- a/intern/cycles/kernel/split/kernel_data_init.h +++ b/intern/cycles/kernel/split/kernel_data_init.h @@ -46,10 +46,10 @@ void KERNEL_FUNCTION_FULL_NAME(data_init)( int sh, int offset, int stride, - ccl_global int *Queue_index, /* Tracks the number of elements in queues */ - int queuesize, /* size (capacity) of the queue */ - ccl_global char * - use_queues_flag, /* flag to decide if scene-intersect kernel should use queues to fetch ray index */ + ccl_global int *Queue_index, /* Tracks the number of elements in queues */ + int queuesize, /* size (capacity) of the queue */ + ccl_global char *use_queues_flag, /* flag to decide if scene-intersect kernel should use queues + to fetch ray index */ ccl_global unsigned int *work_pools, /* Work pool for each work group */ unsigned int num_samples, ccl_global float *buffer) diff --git a/intern/cycles/kernel/split/kernel_holdout_emission_blurring_pathtermination_ao.h b/intern/cycles/kernel/split/kernel_holdout_emission_blurring_pathtermination_ao.h index 63bc5a8e0ce..5cd4131e2ae 100644 --- a/intern/cycles/kernel/split/kernel_holdout_emission_blurring_pathtermination_ao.h +++ b/intern/cycles/kernel/split/kernel_holdout_emission_blurring_pathtermination_ao.h @@ -114,9 +114,9 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao( if (IS_STATE(ray_state, ray_index, RAY_ACTIVE)) { /* Path termination. this is a strange place to put the termination, it's - * mainly due to the mixed in MIS that we use. gives too many unneeded - * shader evaluations, only need emission if we are going to terminate. - */ + * mainly due to the mixed in MIS that we use. gives too many unneeded + * shader evaluations, only need emission if we are going to terminate. + */ float probability = path_state_continuation_probability(kg, state, throughput); if (probability == 0.0f) { diff --git a/intern/cycles/kernel/split/kernel_next_iteration_setup.h b/intern/cycles/kernel/split/kernel_next_iteration_setup.h index 781ce869374..3c2f6038035 100644 --- a/intern/cycles/kernel/split/kernel_next_iteration_setup.h +++ b/intern/cycles/kernel/split/kernel_next_iteration_setup.h @@ -109,9 +109,9 @@ ccl_device void kernel_next_iteration_setup(KernelGlobals *kg, if (ccl_global_id(0) == 0 && ccl_global_id(1) == 0) { /* If we are here, then it means that scene-intersect kernel - * has already been executed atleast once. From the next time, - * scene-intersect kernel may operate on queues to fetch ray index - */ + * has already been executed atleast once. From the next time, + * scene-intersect kernel may operate on queues to fetch ray index + */ *kernel_split_params.use_queues_flag = 1; /* Mark queue indices of QUEUE_SHADOW_RAY_CAST_AO_RAYS and diff --git a/intern/cycles/kernel/split/kernel_split_data_types.h b/intern/cycles/kernel/split/kernel_split_data_types.h index 6ff3f5bdb55..ac4a450ca2b 100644 --- a/intern/cycles/kernel/split/kernel_split_data_types.h +++ b/intern/cycles/kernel/split/kernel_split_data_types.h @@ -19,7 +19,8 @@ CCL_NAMESPACE_BEGIN -/* parameters used by the split kernels, we use a single struct to avoid passing these to each kernel */ +/* parameters used by the split kernels, we use a single struct to avoid passing these to each + * kernel */ typedef struct SplitParams { WorkTile tile; @@ -112,7 +113,8 @@ typedef ccl_global struct SplitBranchedState { SPLIT_DATA_BRANCHED_ENTRIES \ SPLIT_DATA_ENTRY(ShaderData, _sd, 0) -/* entries to be copied to inactive rays when sharing branched samples (TODO: which are actually needed?) */ +/* Entries to be copied to inactive rays when sharing branched samples + * (TODO: which are actually needed?) */ #define SPLIT_DATA_ENTRIES_BRANCHED_SHARED \ SPLIT_DATA_ENTRY(ccl_global float3, throughput, 1) \ SPLIT_DATA_ENTRY(PathRadiance, path_radiance, 1) \ @@ -134,8 +136,9 @@ typedef struct SplitData { SPLIT_DATA_ENTRIES #undef SPLIT_DATA_ENTRY - /* this is actually in a separate buffer from the rest of the split state data (so it can be read back from - * the host easily) but is still used the same as the other data so we have it here in this struct as well + /* this is actually in a separate buffer from the rest of the split state data (so it can be read + * back from the host easily) but is still used the same as the other data so we have it here in + * this struct as well */ ccl_global char *ray_state; } SplitData; diff --git a/intern/cycles/kernel/svm/svm_ao.h b/intern/cycles/kernel/svm/svm_ao.h index 2efce5cb890..62413979201 100644 --- a/intern/cycles/kernel/svm/svm_ao.h +++ b/intern/cycles/kernel/svm/svm_ao.h @@ -1,18 +1,18 @@ /* -* Copyright 2011-2018 Blender Foundation -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2011-2018 Blender Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ CCL_NAMESPACE_BEGIN diff --git a/intern/cycles/kernel/svm/svm_ies.h b/intern/cycles/kernel/svm/svm_ies.h index 9434c0c5505..f13527c03db 100644 --- a/intern/cycles/kernel/svm/svm_ies.h +++ b/intern/cycles/kernel/svm/svm_ies.h @@ -21,12 +21,12 @@ CCL_NAMESPACE_BEGIN ccl_device_inline float interpolate_ies_vertical( KernelGlobals *kg, int ofs, int v, int v_num, float v_frac, int h) { - /* Since lookups are performed in spherical coordinates, clamping the coordinates at the low end of v - * (corresponding to the north pole) would result in artifacts. - * The proper way of dealing with this would be to lookup the corresponding value on the other side of the pole, - * but since the horizontal coordinates might be nonuniform, this would require yet another interpolation. - * Therefore, the assumtion is made that the light is going to be symmetrical, which means that we can just take - * the corresponding value at the current horizontal coordinate. */ + /* Since lookups are performed in spherical coordinates, clamping the coordinates at the low end + * of v (corresponding to the north pole) would result in artifacts. The proper way of dealing + * with this would be to lookup the corresponding value on the other side of the pole, but since + * the horizontal coordinates might be nonuniform, this would require yet another interpolation. + * Therefore, the assumtion is made that the light is going to be symmetrical, which means that + * we can just take the corresponding value at the current horizontal coordinate. */ #define IES_LOOKUP(v) kernel_tex_fetch(__ies, ofs + h * v_num + (v)) /* If v is zero, assume symmetry and read at v=1 instead of v=-1. */ @@ -66,7 +66,8 @@ ccl_device_inline float kernel_ies_interp(KernelGlobals *kg, /* Lookup the angles to find the table position. */ int h_i, v_i; - /* TODO(lukas): Consider using bisection. Probably not worth it for the vast majority of IES files. */ + /* TODO(lukas): Consider using bisection. + * Probably not worth it for the vast majority of IES files. */ for (h_i = 0; IES_LOOKUP_ANGLE_H(h_i + 1) < h_angle; h_i++) ; for (v_i = 0; IES_LOOKUP_ANGLE_V(v_i + 1) < v_angle; v_i++) @@ -83,7 +84,8 @@ ccl_device_inline float kernel_ies_interp(KernelGlobals *kg, /* Perform cubic interpolation along the horizontal coordinate to get the intensity value. * If h_i is zero, just wrap around since the horizontal angles always go over the full circle. - * However, the last entry (360°) equals the first one, so we need to wrap around to the one before that. */ + * However, the last entry (360°) equals the first one, so we need to wrap around to the one + * before that. */ float a = interpolate_ies_vertical( kg, ofs, v_i, v_num, v_frac, (h_i == 0) ? h_num - 2 : h_i - 1); float b = interpolate_ies_vertical(kg, ofs, v_i, v_num, v_frac, h_i); diff --git a/intern/cycles/kernel/svm/svm_voronoi.h b/intern/cycles/kernel/svm/svm_voronoi.h index c311aefaf38..3e28a316169 100644 --- a/intern/cycles/kernel/svm/svm_voronoi.h +++ b/intern/cycles/kernel/svm/svm_voronoi.h @@ -70,7 +70,8 @@ ccl_device void voronoi_neighbors( } } - /* To keep the shortest four distances and associated points we have to keep them in sorted order. */ + /* To keep the shortest four distances and associated points we have to keep them in sorted + * order. */ if (d < da[0]) { da[3] = da[2]; da[2] = da[1]; diff --git a/intern/cycles/render/camera.cpp b/intern/cycles/render/camera.cpp index a950324d4d4..baa3ce77f84 100644 --- a/intern/cycles/render/camera.cpp +++ b/intern/cycles/render/camera.cpp @@ -642,7 +642,8 @@ float Camera::world_to_raster_size(float3 P) float3 D = normalize(Ddiff); res = len(dist * dDdx - dot(dist * dDdx, D) * D); - /* Decent approx distance to frustum (doesn't handle corners correctly, but not that big of a deal) */ + /* Decent approx distance to frustum + * (doesn't handle corners correctly, but not that big of a deal) */ float f_dist = 0.0f; if (offscreen_dicing_scale > 1.0f) { @@ -686,7 +687,8 @@ float Camera::world_to_raster_size(float3 P) f_dist = max(f_dist, *d); } else { - /* Possibly far enough behind the frustum to use distance to origin instead of edge */ + /* Possibly far enough behind the frustum to use distance to origin instead of edge + */ test_o = true; } } diff --git a/intern/cycles/render/denoising.cpp b/intern/cycles/render/denoising.cpp index c4f21d9c771..82bbfa5f823 100644 --- a/intern/cycles/render/denoising.cpp +++ b/intern/cycles/render/denoising.cpp @@ -69,8 +69,8 @@ static void print_progress(int num, int total, int frame, int num_frames) fflush(stdout); } -/* Splits in at its last dot, setting suffix to the part after the dot and in to the part before it. - * Returns whether a dot was found. */ +/* Splits in at its last dot, setting suffix to the part after the dot and in to the part before + * it. Returns whether a dot was found. */ static bool split_last_dot(string &in, string &suffix) { size_t pos = in.rfind("."); @@ -84,9 +84,8 @@ static bool split_last_dot(string &in, string &suffix) /* Separate channel names as generated by Blender. * If views is true: - * Inputs are expected in the form RenderLayer.Pass.View.Channel, sets renderlayer to "RenderLayer.View" - * Otherwise: - * Inputs are expected in the form RenderLayer.Pass.Channel */ + * Inputs are expected in the form RenderLayer.Pass.View.Channel, sets renderlayer to + * "RenderLayer.View" Otherwise: Inputs are expected in the form RenderLayer.Pass.Channel */ static bool parse_channel_name( string name, string &renderlayer, string &pass, string &channel, bool multiview_channels) { @@ -631,7 +630,8 @@ bool DenoiseImage::parse_channels(const ImageSpec &in_spec, string &error) layer.name = name; layer.samples = samples; - /* If the sample value isn't set yet, check if there is a layer-specific one in the input file. */ + /* If the sample value isn't set yet, check if there is a layer-specific one in the input file. + */ if (layer.samples < 1) { string sample_string = in_spec.get_string_attribute("cycles." + name + ".samples", ""); if (sample_string != "") { diff --git a/intern/cycles/render/denoising.h b/intern/cycles/render/denoising.h index dcb842a4603..c234d00eb49 100644 --- a/intern/cycles/render/denoising.h +++ b/intern/cycles/render/denoising.h @@ -87,14 +87,17 @@ struct DenoiseImageLayer { /* input_to_image_channel of the secondary frames, if any are used. */ vector> neighbor_input_to_image_channel; - /* Write i-th channel of the processing output to output_to_image_channel[i]-th channel of the file. */ + /* Write i-th channel of the processing output to output_to_image_channel[i]-th channel of the + * file. */ vector output_to_image_channel; - /* Detect whether this layer contains a full set of channels and set up the offsets accordingly. */ + /* Detect whether this layer contains a full set of channels and set up the offsets accordingly. + */ bool detect_denoising_channels(); /* Map the channels of a secondary frame to the channels that are required for processing, - * fill neighbor_input_to_image_channel if all are present or return false if a channel are missing. */ + * fill neighbor_input_to_image_channel if all are present or return false if a channel are + * missing. */ bool match_channels(int neighbor, const std::vector &channelnames, const std::vector &neighbor_channelnames); @@ -125,7 +128,8 @@ class DenoiseImage { void free(); - /* Open the input image, parse its channels, open the output image and allocate the output buffer. */ + /* Open the input image, parse its channels, open the output image and allocate the output + * buffer. */ bool load(const string &in_filepath, string &error); /* Load neighboring frames. */ @@ -139,7 +143,8 @@ class DenoiseImage { bool save_output(const string &out_filepath, string &error); protected: - /* Parse input file channels, separate them into DenoiseImageLayers, detect DenoiseImageLayers with full channel sets, + /* Parse input file channels, separate them into DenoiseImageLayers, + * detect DenoiseImageLayers with full channel sets, * fill layers and set up the output channels and passthrough map. */ bool parse_channels(const ImageSpec &in_spec, string &error); diff --git a/intern/cycles/render/mesh.cpp b/intern/cycles/render/mesh.cpp index 54dacf5d1f4..f1622493455 100644 --- a/intern/cycles/render/mesh.cpp +++ b/intern/cycles/render/mesh.cpp @@ -839,8 +839,9 @@ void Mesh::add_undisplaced() size_t size = attr->buffer_size( this, (subdivision_type == SUBDIVISION_NONE) ? ATTR_PRIM_TRIANGLE : ATTR_PRIM_SUBD); - /* Center points for ngons aren't stored in Mesh::verts but are included in size since they will be - * calculated later, we subtract them from size here so we don't have an overflow while copying. + /* Center points for ngons aren't stored in Mesh::verts but are included in size since they will + * be calculated later, we subtract them from size here so we don't have an overflow while + * copying. */ size -= num_ngons * attr->data_sizeof(); diff --git a/intern/cycles/render/shader.h b/intern/cycles/render/shader.h index 600b0cc59d4..f74204df355 100644 --- a/intern/cycles/render/shader.h +++ b/intern/cycles/render/shader.h @@ -143,8 +143,10 @@ class Shader : public Node { Shader(); ~Shader(); - /* Checks whether the shader consists of just a emission node with fixed inputs that's connected directly to the output. - * If yes, it sets the content of emission to the constant value (color * strength), which is then used for speeding up light evaluation. */ + /* Checks whether the shader consists of just a emission node with fixed inputs that's connected + * directly to the output. + * If yes, it sets the content of emission to the constant value (color * strength), which is + * then used for speeding up light evaluation. */ bool is_constant_emission(float3 *emission); void set_graph(ShaderGraph *graph); diff --git a/intern/cycles/render/svm.cpp b/intern/cycles/render/svm.cpp index d8e3e24f39e..040aa074f28 100644 --- a/intern/cycles/render/svm.cpp +++ b/intern/cycles/render/svm.cpp @@ -773,7 +773,8 @@ void SVMCompiler::compile_type(Shader *shader, ShaderGraph *graph, ShaderType ty compile_failed = false; } - /* for bump shaders we fall thru to the surface shader, but if this is any other kind of shader it ends here */ + /* for bump shaders we fall thru to the surface shader, but if this is any other kind of shader + * it ends here */ if (type != SHADER_TYPE_BUMP) { add_node(NODE_END, 0, 0, 0); } @@ -828,7 +829,8 @@ void SVMCompiler::compile( { scoped_timer timer((summary != NULL) ? &summary->time_generate_surface : NULL); compile_type(shader, shader->graph, SHADER_TYPE_SURFACE); - /* only set jump offset if there's no bump shader, as the bump shader will fall thru to this one if it exists */ + /* only set jump offset if there's no bump shader, as the bump shader will fall thru to this + * one if it exists */ if (!has_bump) { svm_nodes[index].y = svm_nodes.size(); } diff --git a/intern/cycles/render/tile.cpp b/intern/cycles/render/tile.cpp index 3148b5ef664..9ef0c695667 100644 --- a/intern/cycles/render/tile.cpp +++ b/intern/cycles/render/tile.cpp @@ -170,8 +170,9 @@ void TileManager::set_samples(int num_samples_) } else { uint64_t pixel_samples = 0; - /* While rendering in the viewport, the initial preview resolution is increased to the native resolution - * before the actual rendering begins. Therefore, additional pixel samples will be rendered. */ + /* While rendering in the viewport, the initial preview resolution is increased to the native + * resolution before the actual rendering begins. Therefore, additional pixel samples will be + * rendered. */ int divider = max(get_divider(params.width, params.height, start_resolution) / 2, pixel_size); while (divider > pixel_size) { int image_w = max(1, params.width / divider); @@ -190,8 +191,9 @@ void TileManager::set_samples(int num_samples_) } } -/* If sliced is false, splits image into tiles and assigns equal amount of tiles to every render device. - * If sliced is true, slice image into as much pieces as how many devices are rendering this image. */ +/* If sliced is false, splits image into tiles and assigns equal amount of tiles to every render + * device. If sliced is true, slice image into as much pieces as how many devices are rendering + * this image. */ int TileManager::gen_tiles(bool sliced) { int resolution = state.resolution_divider; @@ -255,7 +257,8 @@ int TileManager::gen_tiles(bool sliced) } int2 pos = block * block_size + tile * tile_size + offset; - /* Only add tiles which are in the image (tiles outside of the image can be generated since the spiral is always square). */ + /* Only add tiles which are in the image (tiles outside of the image can be generated since + * the spiral is always square). */ if (pos.x >= 0 && pos.y >= 0 && pos.x < image_w && pos.y < image_h) { int w = min(tile_size.x, image_w - pos.x); int h = min(tile_size.y, image_h - pos.y); @@ -336,7 +339,8 @@ int TileManager::gen_tiles(bool sliced) cur_tiles++; if (cur_tiles == tiles_per_device) { - /* Tiles are already generated in Bottom-to-Top order, so no sort is necessary in that case. */ + /* Tiles are already generated in Bottom-to-Top order, so no sort is necessary in that + * case. */ if (tile_order != TILE_BOTTOM_TO_TOP) { tile_list->sort(TileComparator(tile_order, center, &state.tiles[0])); } @@ -398,7 +402,8 @@ int TileManager::get_neighbor_index(int index, int neighbor) return ny * state.tile_stride + nx; } -/* Checks whether all neighbors of a tile (as well as the tile itself) are at least at state min_state. */ +/* Checks whether all neighbors of a tile (as well as the tile itself) are at least at state + * min_state. */ bool TileManager::check_neighbor_state(int index, Tile::State min_state) { if (index < 0 || state.tiles[index].state < min_state) { @@ -415,7 +420,8 @@ bool TileManager::check_neighbor_state(int index, Tile::State min_state) return true; } -/* Returns whether the tile should be written (and freed if no denoising is used) instead of updating. */ +/* Returns whether the tile should be written (and freed if no denoising is used) instead of + * updating. */ bool TileManager::finish_tile(int index, bool &delete_tile) { delete_tile = false; @@ -432,7 +438,8 @@ bool TileManager::finish_tile(int index, bool &delete_tile) return true; } state.tiles[index].state = Tile::RENDERED; - /* For each neighbor and the tile itself, check whether all of its neighbors have been rendered. If yes, it can be denoised. */ + /* For each neighbor and the tile itself, check whether all of its neighbors have been + * rendered. If yes, it can be denoised. */ for (int neighbor = 0; neighbor < 9; neighbor++) { int nindex = get_neighbor_index(index, neighbor); if (check_neighbor_state(nindex, Tile::RENDERED)) { @@ -444,13 +451,15 @@ bool TileManager::finish_tile(int index, bool &delete_tile) } case Tile::DENOISE: { state.tiles[index].state = Tile::DENOISED; - /* For each neighbor and the tile itself, check whether all of its neighbors have been denoised. If yes, it can be freed. */ + /* For each neighbor and the tile itself, check whether all of its neighbors have been + * denoised. If yes, it can be freed. */ for (int neighbor = 0; neighbor < 9; neighbor++) { int nindex = get_neighbor_index(index, neighbor); if (check_neighbor_state(nindex, Tile::DENOISED)) { state.tiles[nindex].state = Tile::DONE; /* It can happen that the tile just finished denoising and already can be freed here. - * However, in that case it still has to be written before deleting, so we can't delete it yet. */ + * However, in that case it still has to be written before deleting, so we can't delete + * it yet. */ if (neighbor == 8) { delete_tile = true; } diff --git a/intern/cycles/subd/subd_split.cpp b/intern/cycles/subd/subd_split.cpp index 803363bc240..e6603632ba7 100644 --- a/intern/cycles/subd/subd_split.cpp +++ b/intern/cycles/subd/subd_split.cpp @@ -141,7 +141,7 @@ void DiagSplit::split(QuadDice::SubPatch &sub, QuadDice::EdgeFactors &ef, int de bool split_v = (ef.tv0 == DSPLIT_NON_UNIFORM || ef.tv1 == DSPLIT_NON_UNIFORM); /* Split subpatches such that the ratio of T for opposite edges doesn't - * exceed 1.5, this reduces over tessellation for some patches + * exceed 1.5, this reduces over tessellation for some patches */ bool tmp_split_v = split_v; if (!split_u && min(ef.tu0, ef.tu1) > 8 && min(ef.tu0, ef.tu1) * 1.5f < max(ef.tu0, ef.tu1)) diff --git a/intern/cycles/util/util_color.h b/intern/cycles/util/util_color.h index ca4c393f66e..85f241c6221 100644 --- a/intern/cycles/util/util_color.h +++ b/intern/cycles/util/util_color.h @@ -167,7 +167,8 @@ ccl_device float3 xyY_to_xyz(float x, float y, float Y) #ifdef __KERNEL_SSE2__ /* * Calculate initial guess for arg^exp based on float representation - * This method gives a constant bias, which can be easily compensated by multiplication with bias_coeff. + * This method gives a constant bias, + * which can be easily compensated by multiplication with bias_coeff. * Gives better results for exponents near 1 (e. g. 4/5). * exp = exponent, encoded as uint32_t * e2coeff = 2^(127/exponent - 127) * bias_coeff^(1/exponent), encoded as uint32_t diff --git a/intern/cycles/util/util_debug.h b/intern/cycles/util/util_debug.h index d668ddc6d6c..83d9e96ffa5 100644 --- a/intern/cycles/util/util_debug.h +++ b/intern/cycles/util/util_debug.h @@ -141,7 +141,8 @@ class DebugFlags { /* Use debug version of the kernel. */ bool debug; - /* TODO(mai): Currently this is only for OpenCL, but we should have it implemented for all devices. */ + /* TODO(mai): Currently this is only for OpenCL, but we should have it implemented for all + * devices. */ /* Artificial memory limit in bytes (0 if disabled). */ size_t mem_limit; }; diff --git a/intern/cycles/util/util_half.h b/intern/cycles/util/util_half.h index 9c40f5310c2..647e9cf2fd6 100644 --- a/intern/cycles/util/util_half.h +++ b/intern/cycles/util/util_half.h @@ -36,7 +36,8 @@ CCL_NAMESPACE_BEGIN /* CUDA has its own half data type, no need to define then */ # ifndef __KERNEL_CUDA__ -/* Implementing this as a class rather than a typedef so that the compiler can tell it apart from unsigned shorts. */ +/* Implementing this as a class rather than a typedef so that the compiler can tell it apart from + * unsigned shorts. */ class half { public: half() : v(0) diff --git a/intern/cycles/util/util_ies.cpp b/intern/cycles/util/util_ies.cpp index ff5c709b406..7c24a4ec28c 100644 --- a/intern/cycles/util/util_ies.cpp +++ b/intern/cycles/util/util_ies.cpp @@ -155,7 +155,8 @@ bool IESFile::parse(ustring ies) type = (IESType)parser.get_long(); /* Photometric type */ /* TODO(lukas): Test whether the current type B processing can also deal with type A files. - * In theory the only difference should be orientation which we ignore anyways, but with IES you never know... + * In theory the only difference should be orientation which we ignore anyways, but with IES you + * never know... */ if (type != TYPE_B && type != TYPE_C) { return false; @@ -173,12 +174,13 @@ bool IESFile::parse(ustring ies) * Cycles expects radiometric quantities, though, which requires a conversion. * However, the Luminous efficacy (ratio of lumens per Watt) depends on the spectral distribution * of the light source since lumens take human perception into account. - * Since this spectral distribution is not known from the IES file, a typical one must be assumed. - * The D65 standard illuminant has a Luminous efficacy of 177.83, which is used here to convert to Watt/sr. - * A more advanced approach would be to add a Blackbody Temperature input to the node and numerically - * integrate the Luminous efficacy from the resulting spectral distribution. - * Also, the Watt/sr value must be multiplied by 4*pi to get the Watt value that Cycles expects - * for lamp strength. Therefore, the conversion here uses 4*pi/177.83 as a Candela to Watt factor. + * Since this spectral distribution is not known from the IES file, a typical one must be + * assumed. The D65 standard illuminant has a Luminous efficacy of 177.83, which is used here to + * convert to Watt/sr. A more advanced approach would be to add a Blackbody Temperature input to + * the node and numerically integrate the Luminous efficacy from the resulting spectral + * distribution. Also, the Watt/sr value must be multiplied by 4*pi to get the Watt value that + * Cycles expects for lamp strength. Therefore, the conversion here uses 4*pi/177.83 as a Candela + * to Watt factor. */ factor *= 0.0706650768394; @@ -294,7 +296,8 @@ bool IESFile::process_type_b() bool IESFile::process_type_c() { if (h_angles[0] == 90.0f) { - /* Some files are stored from 90° to 270°, so we just rotate them to the regular 0°-180° range here. */ + /* Some files are stored from 90° to 270°, so we just rotate them to the regular 0°-180° range + * here. */ for (int i = 0; i < h_angles.size(); i++) { h_angles[i] -= 90.0f; } @@ -311,8 +314,9 @@ bool IESFile::process_type_c() if (h_angles[h_angles.size() - 1] == 90.0f) { /* Only one quadrant is defined, so we need to mirror twice (from one to two, then to four). - * Since the two->four mirroring step might also be required if we get an input of two quadrants, - * we only do the first mirror here and later do the second mirror in either case. */ + * Since the two->four mirroring step might also be required if we get an input of two + * quadrants, we only do the first mirror here and later do the second mirror in either case. + */ int hnum = h_angles.size(); for (int i = hnum - 2; i >= 0; i--) { h_angles.push_back(180.0f - h_angles[i]); @@ -329,8 +333,8 @@ bool IESFile::process_type_c() } } - /* Some files skip the 360° entry (contrary to standard) because it's supposed to be identical to the 0° entry. - * If the file has a discernible order in its spacing, just fix this. */ + /* Some files skip the 360° entry (contrary to standard) because it's supposed to be identical to + * the 0° entry. If the file has a discernible order in its spacing, just fix this. */ if (h_angles[h_angles.size() - 1] != 360.0f) { int hnum = h_angles.size(); float last_step = h_angles[hnum - 1] - h_angles[hnum - 2]; diff --git a/intern/cycles/util/util_math_fast.h b/intern/cycles/util/util_math_fast.h index 872271666aa..dbed83ab84d 100644 --- a/intern/cycles/util/util_math_fast.h +++ b/intern/cycles/util/util_math_fast.h @@ -282,8 +282,10 @@ ccl_device float fast_acosf(float x) const float m = (f < 1.0f) ? 1.0f - (1.0f - f) : 1.0f; /* Based on http://www.pouet.net/topic.php?which=9132&page=2 * 85% accurate (ulp 0) - * Examined 2130706434 values of acos: 15.2000597 avg ulp diff, 4492 max ulp, 4.51803e-05 max error // without "denormal crush" - * Examined 2130706434 values of acos: 15.2007108 avg ulp diff, 4492 max ulp, 4.51803e-05 max error // with "denormal crush" + * Examined 2130706434 values of acos: + * 15.2000597 avg ulp diff, 4492 max ulp, 4.51803e-05 max error // without "denormal crush" + * Examined 2130706434 values of acos: + * 15.2007108 avg ulp diff, 4492 max ulp, 4.51803e-05 max error // with "denormal crush" */ const float a = sqrtf(1.0f - m) * (1.5707963267f + m * (-0.213300989f + m * (0.077980478f + m * -0.02164095f))); @@ -312,8 +314,10 @@ ccl_device float fast_atanf(float x) const float s = 1.0f - (1.0f - k); /* Crush denormals. */ const float t = s * s; /* http://mathforum.org/library/drmath/view/62672.html - * Examined 4278190080 values of atan: 2.36864877 avg ulp diff, 302 max ulp, 6.55651e-06 max error // (with denormals) - * Examined 4278190080 values of atan: 171160502 avg ulp diff, 855638016 max ulp, 6.55651e-06 max error // (crush denormals) + * Examined 4278190080 values of atan: + * 2.36864877 avg ulp diff, 302 max ulp, 6.55651e-06 max error // (with denormals) + * Examined 4278190080 values of atan: + * 171160502 avg ulp diff, 855638016 max ulp, 6.55651e-06 max error // (crush denormals) */ float r = s * madd(0.43157974f, t, 1.0f) / madd(madd(0.05831938f, t, 0.76443945f), t, 1.0f); if (a > 1.0f) { diff --git a/intern/cycles/util/util_math_intersect.h b/intern/cycles/util/util_math_intersect.h index 95ac231c611..fa3a541eea9 100644 --- a/intern/cycles/util/util_math_intersect.h +++ b/intern/cycles/util/util_math_intersect.h @@ -163,7 +163,7 @@ ccl_device_forceinline bool ray_triangle_intersect(float3 ray_P, /* Calculate geometry normal and denominator. */ const float3 Ng1 = cross(e1, e0); - //const Vec3vfM Ng1 = stable_triangle_normal(e2,e1,e0); + // const Vec3vfM Ng1 = stable_triangle_normal(e2,e1,e0); const float3 Ng = Ng1 + Ng1; const float den = dot3(Ng, dir); /* Avoid division by 0. */ diff --git a/intern/cycles/util/util_math_matrix.h b/intern/cycles/util/util_math_matrix.h index fe80fab6ebd..e1c7d5d838f 100644 --- a/intern/cycles/util/util_math_matrix.h +++ b/intern/cycles/util/util_math_matrix.h @@ -110,7 +110,8 @@ ccl_device_inline void math_vec3_add_strided( } /* Elementary matrix operations. - * Note: TriMatrix refers to a square matrix that is symmetric, and therefore its upper-triangular part isn't stored. */ + * Note: TriMatrix refers to a square matrix that is symmetric, + * and therefore its upper-triangular part isn't stored. */ ccl_device_inline void math_trimatrix_add_diagonal(ccl_global float *A, int n, @@ -196,7 +197,8 @@ ccl_device void math_trimatrix_cholesky(ccl_global float *A, int n, int stride) } } -/* Solve A*S=y for S given A and y, where A is symmetrical positive-semidefinite and both inputs are destroyed in the process. +/* Solve A*S=y for S given A and y, + * where A is symmetrical positive-semidefinite and both inputs are destroyed in the process. * * We can apply Cholesky decomposition to find a lower triangular L so that L*Lt = A. * With that we get (L*Lt)*S = L*(Lt*S) = L*b = y, defining b as Lt*S. @@ -204,15 +206,16 @@ ccl_device void math_trimatrix_cholesky(ccl_global float *A, int n, int stride) * Then, the remaining problem is Lt*S = b, which again can be solved easily. * * This is useful for solving the normal equation S=inv(Xt*W*X)*Xt*W*y, since Xt*W*X is - * symmetrical positive-semidefinite by construction, so we can just use this function with A=Xt*W*X and y=Xt*W*y. */ + * symmetrical positive-semidefinite by construction, + * so we can just use this function with A=Xt*W*X and y=Xt*W*y. */ ccl_device_inline void math_trimatrix_vec3_solve(ccl_global float *A, ccl_global float3 *y, int n, int stride) { /* Since the first entry of the design row is always 1, the upper-left element of XtWX is a good - * heuristic for the amount of pixels considered (with weighting), therefore the amount of correction - * is scaled based on it. */ + * heuristic for the amount of pixels considered (with weighting), + * therefore the amount of correction is scaled based on it. */ math_trimatrix_add_diagonal(A, n, 3e-7f * A[0], stride); /* Improve the numerical stability. */ math_trimatrix_cholesky(A, n, stride); /* Replace A with L so that L*Lt = A. */ @@ -234,8 +237,8 @@ ccl_device_inline void math_trimatrix_vec3_solve(ccl_global float *A, } /* Perform the Jacobi Eigenvalue Methon on matrix A. - * A is assumed to be a symmetrical matrix, therefore only the lower-triangular part is ever accessed. - * The algorithm overwrites the contents of A. + * A is assumed to be a symmetrical matrix, therefore only the lower-triangular part is ever + * accessed. The algorithm overwrites the contents of A. * * After returning, A will be overwritten with D, which is (almost) diagonal, * and V will contain the eigenvectors of the original A in its rows (!), @@ -263,7 +266,8 @@ ccl_device void math_matrix_jacobi_eigendecomposition(float *A, } if (off_diagonal < 1e-7f) { /* The matrix has nearly reached diagonal form. - * Since the eigenvalues are only used to determine truncation, their exact values aren't required - a relative error of a few ULPs won't matter at all. */ + * Since the eigenvalues are only used to determine truncation, their exact values aren't + * required - a relative error of a few ULPs won't matter at all. */ break; } @@ -277,7 +281,8 @@ ccl_device void math_matrix_jacobi_eigendecomposition(float *A, float element = MAT(A, n, row, col); float abs_element = fabsf(element); - /* If we're in a later sweep and the element already is very small, just set it to zero and skip the rotation. */ + /* If we're in a later sweep and the element already is very small, + * just set it to zero and skip the rotation. */ if (sweep > 3 && abs_element <= singular_epsilon * fabsf(MAT(A, n, row, row)) && abs_element <= singular_epsilon * fabsf(MAT(A, n, col, col))) { MAT(A, n, row, col) = 0.0f; @@ -288,13 +293,16 @@ ccl_device void math_matrix_jacobi_eigendecomposition(float *A, continue; } - /* If we're in one of the first sweeps and the element is smaller than the threshold, skip it. */ + /* If we're in one of the first sweeps and the element is smaller than the threshold, + * skip it. */ if (sweep < 3 && (abs_element < threshold)) { continue; } - /* Determine rotation: The rotation is characterized by its angle phi - or, in the actual implementation, sin(phi) and cos(phi). - * To find those, we first compute their ratio - that might be unstable if the angle approaches 90°, so there's a fallback for that case. + /* Determine rotation: The rotation is characterized by its angle phi - or, + * in the actual implementation, sin(phi) and cos(phi). + * To find those, we first compute their ratio - that might be unstable if the angle + * approaches 90°, so there's a fallback for that case. * Then, we compute sin(phi) and cos(phi) themselves. */ float singular_diff = MAT(A, n, row, row) - MAT(A, n, col, col); float ratio; @@ -310,7 +318,8 @@ ccl_device void math_matrix_jacobi_eigendecomposition(float *A, float c = 1.0f / sqrtf(1.0f + ratio * ratio); float s = ratio * c; - /* To improve numerical stability by avoiding cancellation, the update equations are reformulized to use sin(phi) and tan(phi/2) instead. */ + /* To improve numerical stability by avoiding cancellation, the update equations are + * reformulized to use sin(phi) and tan(phi/2) instead. */ float tan_phi_2 = s / (1.0f + c); /* Update the singular values in the diagonal. */ @@ -330,7 +339,8 @@ ccl_device void math_matrix_jacobi_eigendecomposition(float *A, MATS(M, n, r2, c2, stride) += s * (M1 - tan_phi_2 * M2); \ } - /* Split into three parts to ensure correct accesses since we only store the lower-triangular part of A. */ + /* Split into three parts to ensure correct accesses since we only store the + * lower-triangular part of A. */ for (int i = 0; i < col; i++) ROT(A, col, i, row, i, 1); for (int i = col + 1; i < row; i++) diff --git a/intern/cycles/util/util_profiling.cpp b/intern/cycles/util/util_profiling.cpp index e3edf219435..bbefbadd0fe 100644 --- a/intern/cycles/util/util_profiling.cpp +++ b/intern/cycles/util/util_profiling.cpp @@ -47,7 +47,8 @@ void Profiler::run() } if (cur_shader >= 0 && cur_shader < shader_samples.size()) { - /* Only consider the active shader during events whose runtime significantly depends on it. */ + /* Only consider the active shader during events whose runtime significantly depends on it. + */ if (((cur_event >= PROFILING_SHADER_EVAL) && (cur_event <= PROFILING_SUBSURFACE)) || ((cur_event >= PROFILING_CLOSURE_EVAL) && (cur_event <= PROFILING_CLOSURE_VOLUME_SAMPLE))) { diff --git a/intern/cycles/util/util_progress.h b/intern/cycles/util/util_progress.h index f05e5b918f3..379beaeeefa 100644 --- a/intern/cycles/util/util_progress.h +++ b/intern/cycles/util/util_progress.h @@ -362,7 +362,8 @@ class Progress { * It's used to display the sample count if only one tile is active. */ int current_tile_sample; /* Stores the number of tiles that's already finished. - * Used to determine whether all but the last tile are finished rendering, in which case the current_tile_sample is displayed. */ + * Used to determine whether all but the last tile are finished rendering, + * in which case the current_tile_sample is displayed. */ int rendered_tiles, denoised_tiles; double start_time, render_start_time; diff --git a/intern/cycles/util/util_task.cpp b/intern/cycles/util/util_task.cpp index 4b11ce73ea9..2f771e2717e 100644 --- a/intern/cycles/util/util_task.cpp +++ b/intern/cycles/util/util_task.cpp @@ -261,7 +261,7 @@ vector distribute_threads_on_nodes(const int num_threads) const int num_nodes = num_per_node_processors.size(); int thread_index = 0; /* First pass: fill in all the nodes to their maximum. - * + * * If there is less threads than the overall nodes capacity, some of the * nodes or parts of them will idle. * diff --git a/intern/cycles/util/util_types_float8.h b/intern/cycles/util/util_types_float8.h index 7289e3298c3..27da120a4ba 100644 --- a/intern/cycles/util/util_types_float8.h +++ b/intern/cycles/util/util_types_float8.h @@ -1,30 +1,30 @@ /* -* Original code Copyright 2017, Intel Corporation -* Modifications Copyright 2018, Blender Foundation. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* * Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above copyright -* notice, this list of conditions and the following disclaimer in the -* documentation and/or other materials provided with the distribution. -* * Neither the name of Intel Corporation nor the names of its contributors -* may be used to endorse or promote products derived from this software -* without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ + * Original code Copyright 2017, Intel Corporation + * Modifications Copyright 2018, Blender Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ #ifndef __UTIL_TYPES_FLOAT8_H__ #define __UTIL_TYPES_FLOAT8_H__ diff --git a/intern/cycles/util/util_types_float8_impl.h b/intern/cycles/util/util_types_float8_impl.h index 8ce3d81b1bb..4e4ea28c6a4 100644 --- a/intern/cycles/util/util_types_float8_impl.h +++ b/intern/cycles/util/util_types_float8_impl.h @@ -1,30 +1,30 @@ /* -* Original code Copyright 2017, Intel Corporation -* Modifications Copyright 2018, Blender Foundation. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* * Redistributions of source code must retain the above copyright notice, -* this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above copyright -* notice, this list of conditions and the following disclaimer in the -* documentation and/or other materials provided with the distribution. -* * Neither the name of Intel Corporation nor the names of its contributors -* may be used to endorse or promote products derived from this software -* without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ + * Original code Copyright 2017, Intel Corporation + * Modifications Copyright 2018, Blender Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ #ifndef __UTIL_TYPES_FLOAT8_IMPL_H__ #define __UTIL_TYPES_FLOAT8_IMPL_H__ -- cgit v1.2.3