Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/cycles/kernel/camera')
-rw-r--r--intern/cycles/kernel/camera/camera.h521
-rw-r--r--intern/cycles/kernel/camera/projection.h258
2 files changed, 779 insertions, 0 deletions
diff --git a/intern/cycles/kernel/camera/camera.h b/intern/cycles/kernel/camera/camera.h
new file mode 100644
index 00000000000..4f3931583de
--- /dev/null
+++ b/intern/cycles/kernel/camera/camera.h
@@ -0,0 +1,521 @@
+/*
+ * Copyright 2011-2013 Blender Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "kernel/camera/projection.h"
+#include "kernel/sample/mapping.h"
+#include "kernel/util/differential.h"
+#include "kernel/util/lookup_table.h"
+
+CCL_NAMESPACE_BEGIN
+
+/* Perspective Camera */
+
+ccl_device float2 camera_sample_aperture(ccl_constant KernelCamera *cam, float u, float v)
+{
+ float blades = cam->blades;
+ float2 bokeh;
+
+ if (blades == 0.0f) {
+ /* sample disk */
+ bokeh = concentric_sample_disk(u, v);
+ }
+ else {
+ /* sample polygon */
+ float rotation = cam->bladesrotation;
+ bokeh = regular_polygon_sample(blades, rotation, u, v);
+ }
+
+ /* anamorphic lens bokeh */
+ bokeh.x *= cam->inv_aperture_ratio;
+
+ return bokeh;
+}
+
+ccl_device void camera_sample_perspective(KernelGlobals kg,
+ float raster_x,
+ float raster_y,
+ float lens_u,
+ float lens_v,
+ ccl_private Ray *ray)
+{
+ /* create ray form raster position */
+ ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
+ float3 raster = make_float3(raster_x, raster_y, 0.0f);
+ float3 Pcamera = transform_perspective(&rastertocamera, raster);
+
+#ifdef __CAMERA_MOTION__
+ if (kernel_data.cam.have_perspective_motion) {
+ /* TODO(sergey): Currently we interpolate projected coordinate which
+ * gives nice looking result and which is simple, but is in fact a bit
+ * different comparing to constructing projective matrix from an
+ * interpolated field of view.
+ */
+ if (ray->time < 0.5f) {
+ ProjectionTransform rastertocamera_pre = kernel_data.cam.perspective_pre;
+ float3 Pcamera_pre = transform_perspective(&rastertocamera_pre, raster);
+ Pcamera = interp(Pcamera_pre, Pcamera, ray->time * 2.0f);
+ }
+ else {
+ ProjectionTransform rastertocamera_post = kernel_data.cam.perspective_post;
+ float3 Pcamera_post = transform_perspective(&rastertocamera_post, raster);
+ Pcamera = interp(Pcamera, Pcamera_post, (ray->time - 0.5f) * 2.0f);
+ }
+ }
+#endif
+
+ float3 P = zero_float3();
+ float3 D = Pcamera;
+
+ /* modify ray for depth of field */
+ float aperturesize = kernel_data.cam.aperturesize;
+
+ if (aperturesize > 0.0f) {
+ /* sample point on aperture */
+ float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v) * aperturesize;
+
+ /* compute point on plane of focus */
+ float ft = kernel_data.cam.focaldistance / D.z;
+ float3 Pfocus = D * ft;
+
+ /* update ray for effect of lens */
+ P = make_float3(lensuv.x, lensuv.y, 0.0f);
+ D = normalize(Pfocus - P);
+ }
+
+ /* transform ray from camera to world */
+ Transform cameratoworld = kernel_data.cam.cameratoworld;
+
+#ifdef __CAMERA_MOTION__
+ if (kernel_data.cam.num_motion_steps) {
+ transform_motion_array_interpolate(&cameratoworld,
+ kernel_tex_array(__camera_motion),
+ kernel_data.cam.num_motion_steps,
+ ray->time);
+ }
+#endif
+
+ P = transform_point(&cameratoworld, P);
+ D = normalize(transform_direction(&cameratoworld, D));
+
+ bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
+ if (!use_stereo) {
+ /* No stereo */
+ ray->P = P;
+ ray->D = D;
+
+#ifdef __RAY_DIFFERENTIALS__
+ float3 Dcenter = transform_direction(&cameratoworld, Pcamera);
+ float3 Dcenter_normalized = normalize(Dcenter);
+
+ /* TODO: can this be optimized to give compact differentials directly? */
+ ray->dP = differential_zero_compact();
+ differential3 dD;
+ dD.dx = normalize(Dcenter + float4_to_float3(kernel_data.cam.dx)) - Dcenter_normalized;
+ dD.dy = normalize(Dcenter + float4_to_float3(kernel_data.cam.dy)) - Dcenter_normalized;
+ ray->dD = differential_make_compact(dD);
+#endif
+ }
+ else {
+ /* Spherical stereo */
+ spherical_stereo_transform(&kernel_data.cam, &P, &D);
+ ray->P = P;
+ ray->D = D;
+
+#ifdef __RAY_DIFFERENTIALS__
+ /* Ray differentials, computed from scratch using the raster coordinates
+ * because we don't want to be affected by depth of field. We compute
+ * ray origin and direction for the center and two neighboring pixels
+ * and simply take their differences. */
+ float3 Pnostereo = transform_point(&cameratoworld, zero_float3());
+
+ float3 Pcenter = Pnostereo;
+ float3 Dcenter = Pcamera;
+ Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
+ spherical_stereo_transform(&kernel_data.cam, &Pcenter, &Dcenter);
+
+ float3 Px = Pnostereo;
+ float3 Dx = transform_perspective(&rastertocamera,
+ make_float3(raster_x + 1.0f, raster_y, 0.0f));
+ Dx = normalize(transform_direction(&cameratoworld, Dx));
+ spherical_stereo_transform(&kernel_data.cam, &Px, &Dx);
+
+ differential3 dP, dD;
+
+ dP.dx = Px - Pcenter;
+ dD.dx = Dx - Dcenter;
+
+ float3 Py = Pnostereo;
+ float3 Dy = transform_perspective(&rastertocamera,
+ make_float3(raster_x, raster_y + 1.0f, 0.0f));
+ Dy = normalize(transform_direction(&cameratoworld, Dy));
+ spherical_stereo_transform(&kernel_data.cam, &Py, &Dy);
+
+ dP.dy = Py - Pcenter;
+ dD.dy = Dy - Dcenter;
+ ray->dD = differential_make_compact(dD);
+ ray->dP = differential_make_compact(dP);
+#endif
+ }
+
+#ifdef __CAMERA_CLIPPING__
+ /* clipping */
+ float z_inv = 1.0f / normalize(Pcamera).z;
+ float nearclip = kernel_data.cam.nearclip * z_inv;
+ ray->P += nearclip * ray->D;
+ ray->dP += nearclip * ray->dD;
+ ray->t = kernel_data.cam.cliplength * z_inv;
+#else
+ ray->t = FLT_MAX;
+#endif
+}
+
+/* Orthographic Camera */
+ccl_device void camera_sample_orthographic(KernelGlobals kg,
+ float raster_x,
+ float raster_y,
+ float lens_u,
+ float lens_v,
+ ccl_private Ray *ray)
+{
+ /* create ray form raster position */
+ ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
+ float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
+
+ float3 P;
+ float3 D = make_float3(0.0f, 0.0f, 1.0f);
+
+ /* modify ray for depth of field */
+ float aperturesize = kernel_data.cam.aperturesize;
+
+ if (aperturesize > 0.0f) {
+ /* sample point on aperture */
+ float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v) * aperturesize;
+
+ /* compute point on plane of focus */
+ float3 Pfocus = D * kernel_data.cam.focaldistance;
+
+ /* update ray for effect of lens */
+ float3 lensuvw = make_float3(lensuv.x, lensuv.y, 0.0f);
+ P = Pcamera + lensuvw;
+ D = normalize(Pfocus - lensuvw);
+ }
+ else {
+ P = Pcamera;
+ }
+ /* transform ray from camera to world */
+ Transform cameratoworld = kernel_data.cam.cameratoworld;
+
+#ifdef __CAMERA_MOTION__
+ if (kernel_data.cam.num_motion_steps) {
+ transform_motion_array_interpolate(&cameratoworld,
+ kernel_tex_array(__camera_motion),
+ kernel_data.cam.num_motion_steps,
+ ray->time);
+ }
+#endif
+
+ ray->P = transform_point(&cameratoworld, P);
+ ray->D = normalize(transform_direction(&cameratoworld, D));
+
+#ifdef __RAY_DIFFERENTIALS__
+ /* ray differential */
+ differential3 dP;
+ dP.dx = float4_to_float3(kernel_data.cam.dx);
+ dP.dy = float4_to_float3(kernel_data.cam.dx);
+
+ ray->dP = differential_make_compact(dP);
+ ray->dD = differential_zero_compact();
+#endif
+
+#ifdef __CAMERA_CLIPPING__
+ /* clipping */
+ ray->t = kernel_data.cam.cliplength;
+#else
+ ray->t = FLT_MAX;
+#endif
+}
+
+/* Panorama Camera */
+
+ccl_device_inline void camera_sample_panorama(ccl_constant KernelCamera *cam,
+#ifdef __CAMERA_MOTION__
+ ccl_global const DecomposedTransform *cam_motion,
+#endif
+ float raster_x,
+ float raster_y,
+ float lens_u,
+ float lens_v,
+ ccl_private Ray *ray)
+{
+ ProjectionTransform rastertocamera = cam->rastertocamera;
+ float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
+
+ /* create ray form raster position */
+ float3 P = zero_float3();
+ float3 D = panorama_to_direction(cam, Pcamera.x, Pcamera.y);
+
+ /* indicates ray should not receive any light, outside of the lens */
+ if (is_zero(D)) {
+ ray->t = 0.0f;
+ return;
+ }
+
+ /* modify ray for depth of field */
+ float aperturesize = cam->aperturesize;
+
+ if (aperturesize > 0.0f) {
+ /* sample point on aperture */
+ float2 lensuv = camera_sample_aperture(cam, lens_u, lens_v) * aperturesize;
+
+ /* compute point on plane of focus */
+ float3 Dfocus = normalize(D);
+ float3 Pfocus = Dfocus * cam->focaldistance;
+
+ /* calculate orthonormal coordinates perpendicular to Dfocus */
+ float3 U, V;
+ U = normalize(make_float3(1.0f, 0.0f, 0.0f) - Dfocus.x * Dfocus);
+ V = normalize(cross(Dfocus, U));
+
+ /* update ray for effect of lens */
+ P = U * lensuv.x + V * lensuv.y;
+ D = normalize(Pfocus - P);
+ }
+
+ /* transform ray from camera to world */
+ Transform cameratoworld = cam->cameratoworld;
+
+#ifdef __CAMERA_MOTION__
+ if (cam->num_motion_steps) {
+ transform_motion_array_interpolate(
+ &cameratoworld, cam_motion, cam->num_motion_steps, ray->time);
+ }
+#endif
+
+ /* Stereo transform */
+ bool use_stereo = cam->interocular_offset != 0.0f;
+ if (use_stereo) {
+ spherical_stereo_transform(cam, &P, &D);
+ }
+
+ P = transform_point(&cameratoworld, P);
+ D = normalize(transform_direction(&cameratoworld, D));
+
+ ray->P = P;
+ ray->D = D;
+
+#ifdef __RAY_DIFFERENTIALS__
+ /* Ray differentials, computed from scratch using the raster coordinates
+ * because we don't want to be affected by depth of field. We compute
+ * ray origin and direction for the center and two neighboring pixels
+ * and simply take their differences. */
+ float3 Pcenter = Pcamera;
+ float3 Dcenter = panorama_to_direction(cam, Pcenter.x, Pcenter.y);
+ if (use_stereo) {
+ spherical_stereo_transform(cam, &Pcenter, &Dcenter);
+ }
+ Pcenter = transform_point(&cameratoworld, Pcenter);
+ Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
+
+ float3 Px = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
+ float3 Dx = panorama_to_direction(cam, Px.x, Px.y);
+ if (use_stereo) {
+ spherical_stereo_transform(cam, &Px, &Dx);
+ }
+ Px = transform_point(&cameratoworld, Px);
+ Dx = normalize(transform_direction(&cameratoworld, Dx));
+
+ differential3 dP, dD;
+ dP.dx = Px - Pcenter;
+ dD.dx = Dx - Dcenter;
+
+ float3 Py = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
+ float3 Dy = panorama_to_direction(cam, Py.x, Py.y);
+ if (use_stereo) {
+ spherical_stereo_transform(cam, &Py, &Dy);
+ }
+ Py = transform_point(&cameratoworld, Py);
+ Dy = normalize(transform_direction(&cameratoworld, Dy));
+
+ dP.dy = Py - Pcenter;
+ dD.dy = Dy - Dcenter;
+ ray->dD = differential_make_compact(dD);
+ ray->dP = differential_make_compact(dP);
+#endif
+
+#ifdef __CAMERA_CLIPPING__
+ /* clipping */
+ float nearclip = cam->nearclip;
+ ray->P += nearclip * ray->D;
+ ray->dP += nearclip * ray->dD;
+ ray->t = cam->cliplength;
+#else
+ ray->t = FLT_MAX;
+#endif
+}
+
+/* Common */
+
+ccl_device_inline void camera_sample(KernelGlobals kg,
+ int x,
+ int y,
+ float filter_u,
+ float filter_v,
+ float lens_u,
+ float lens_v,
+ float time,
+ ccl_private Ray *ray)
+{
+ /* pixel filter */
+ int filter_table_offset = kernel_data.film.filter_table_offset;
+ float raster_x = x + lookup_table_read(kg, filter_u, filter_table_offset, FILTER_TABLE_SIZE);
+ float raster_y = y + lookup_table_read(kg, filter_v, filter_table_offset, FILTER_TABLE_SIZE);
+
+#ifdef __CAMERA_MOTION__
+ /* motion blur */
+ if (kernel_data.cam.shuttertime == -1.0f) {
+ ray->time = 0.5f;
+ }
+ else {
+ /* TODO(sergey): Such lookup is unneeded when there's rolling shutter
+ * effect in use but rolling shutter duration is set to 0.0.
+ */
+ const int shutter_table_offset = kernel_data.cam.shutter_table_offset;
+ ray->time = lookup_table_read(kg, time, shutter_table_offset, SHUTTER_TABLE_SIZE);
+ /* TODO(sergey): Currently single rolling shutter effect type only
+ * where scan-lines are acquired from top to bottom and whole scan-line
+ * is acquired at once (no delay in acquisition happens between pixels
+ * of single scan-line).
+ *
+ * Might want to support more models in the future.
+ */
+ if (kernel_data.cam.rolling_shutter_type) {
+ /* Time corresponding to a fully rolling shutter only effect:
+ * top of the frame is time 0.0, bottom of the frame is time 1.0.
+ */
+ const float time = 1.0f - (float)y / kernel_data.cam.height;
+ const float duration = kernel_data.cam.rolling_shutter_duration;
+ if (duration != 0.0f) {
+ /* This isn't fully physical correct, but lets us to have simple
+ * controls in the interface. The idea here is basically sort of
+ * linear interpolation between how much rolling shutter effect
+ * exist on the frame and how much of it is a motion blur effect.
+ */
+ ray->time = (ray->time - 0.5f) * duration;
+ ray->time += (time - 0.5f) * (1.0f - duration) + 0.5f;
+ }
+ else {
+ ray->time = time;
+ }
+ }
+ }
+#endif
+
+ /* sample */
+ if (kernel_data.cam.type == CAMERA_PERSPECTIVE) {
+ camera_sample_perspective(kg, raster_x, raster_y, lens_u, lens_v, ray);
+ }
+ else if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
+ camera_sample_orthographic(kg, raster_x, raster_y, lens_u, lens_v, ray);
+ }
+ else {
+#ifdef __CAMERA_MOTION__
+ ccl_global const DecomposedTransform *cam_motion = kernel_tex_array(__camera_motion);
+ camera_sample_panorama(&kernel_data.cam, cam_motion, raster_x, raster_y, lens_u, lens_v, ray);
+#else
+ camera_sample_panorama(&kernel_data.cam, raster_x, raster_y, lens_u, lens_v, ray);
+#endif
+ }
+}
+
+/* Utilities */
+
+ccl_device_inline float3 camera_position(KernelGlobals kg)
+{
+ Transform cameratoworld = kernel_data.cam.cameratoworld;
+ return make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
+}
+
+ccl_device_inline float camera_distance(KernelGlobals kg, float3 P)
+{
+ Transform cameratoworld = kernel_data.cam.cameratoworld;
+ float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
+
+ if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
+ float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
+ return fabsf(dot((P - camP), camD));
+ }
+ else {
+ return len(P - camP);
+ }
+}
+
+ccl_device_inline float camera_z_depth(KernelGlobals kg, float3 P)
+{
+ if (kernel_data.cam.type != CAMERA_PANORAMA) {
+ Transform worldtocamera = kernel_data.cam.worldtocamera;
+ return transform_point(&worldtocamera, P).z;
+ }
+ else {
+ Transform cameratoworld = kernel_data.cam.cameratoworld;
+ float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
+ return len(P - camP);
+ }
+}
+
+ccl_device_inline float3 camera_direction_from_point(KernelGlobals kg, float3 P)
+{
+ Transform cameratoworld = kernel_data.cam.cameratoworld;
+
+ if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
+ float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
+ return -camD;
+ }
+ else {
+ float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
+ return normalize(camP - P);
+ }
+}
+
+ccl_device_inline float3 camera_world_to_ndc(KernelGlobals kg,
+ ccl_private ShaderData *sd,
+ float3 P)
+{
+ if (kernel_data.cam.type != CAMERA_PANORAMA) {
+ /* perspective / ortho */
+ if (sd->object == PRIM_NONE && kernel_data.cam.type == CAMERA_PERSPECTIVE)
+ P += camera_position(kg);
+
+ ProjectionTransform tfm = kernel_data.cam.worldtondc;
+ return transform_perspective(&tfm, P);
+ }
+ else {
+ /* panorama */
+ Transform tfm = kernel_data.cam.worldtocamera;
+
+ if (sd->object != OBJECT_NONE)
+ P = normalize(transform_point(&tfm, P));
+ else
+ P = normalize(transform_direction(&tfm, P));
+
+ float2 uv = direction_to_panorama(&kernel_data.cam, P);
+
+ return make_float3(uv.x, uv.y, 0.0f);
+ }
+}
+
+CCL_NAMESPACE_END
diff --git a/intern/cycles/kernel/camera/projection.h b/intern/cycles/kernel/camera/projection.h
new file mode 100644
index 00000000000..0aea82fa812
--- /dev/null
+++ b/intern/cycles/kernel/camera/projection.h
@@ -0,0 +1,258 @@
+/*
+ * Parts adapted from Open Shading Language with this license:
+ *
+ * Copyright (c) 2009-2010 Sony Pictures Imageworks Inc., et al.
+ * All Rights Reserved.
+ *
+ * Modifications Copyright 2011, Blender Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Sony Pictures Imageworks nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+CCL_NAMESPACE_BEGIN
+
+/* Spherical coordinates <-> Cartesian direction. */
+
+ccl_device float2 direction_to_spherical(float3 dir)
+{
+ float theta = safe_acosf(dir.z);
+ float phi = atan2f(dir.x, dir.y);
+
+ return make_float2(theta, phi);
+}
+
+ccl_device float3 spherical_to_direction(float theta, float phi)
+{
+ float sin_theta = sinf(theta);
+ return make_float3(sin_theta * cosf(phi), sin_theta * sinf(phi), cosf(theta));
+}
+
+/* Equirectangular coordinates <-> Cartesian direction */
+
+ccl_device float2 direction_to_equirectangular_range(float3 dir, float4 range)
+{
+ if (is_zero(dir))
+ return zero_float2();
+
+ float u = (atan2f(dir.y, dir.x) - range.y) / range.x;
+ float v = (acosf(dir.z / len(dir)) - range.w) / range.z;
+
+ return make_float2(u, v);
+}
+
+ccl_device float3 equirectangular_range_to_direction(float u, float v, float4 range)
+{
+ float phi = range.x * u + range.y;
+ float theta = range.z * v + range.w;
+ float sin_theta = sinf(theta);
+ return make_float3(sin_theta * cosf(phi), sin_theta * sinf(phi), cosf(theta));
+}
+
+ccl_device float2 direction_to_equirectangular(float3 dir)
+{
+ return direction_to_equirectangular_range(dir, make_float4(-M_2PI_F, M_PI_F, -M_PI_F, M_PI_F));
+}
+
+ccl_device float3 equirectangular_to_direction(float u, float v)
+{
+ return equirectangular_range_to_direction(u, v, make_float4(-M_2PI_F, M_PI_F, -M_PI_F, M_PI_F));
+}
+
+/* Fisheye <-> Cartesian direction */
+
+ccl_device float2 direction_to_fisheye(float3 dir, float fov)
+{
+ float r = atan2f(sqrtf(dir.y * dir.y + dir.z * dir.z), dir.x) / fov;
+ float phi = atan2f(dir.z, dir.y);
+
+ float u = r * cosf(phi) + 0.5f;
+ float v = r * sinf(phi) + 0.5f;
+
+ return make_float2(u, v);
+}
+
+ccl_device float3 fisheye_to_direction(float u, float v, float fov)
+{
+ u = (u - 0.5f) * 2.0f;
+ v = (v - 0.5f) * 2.0f;
+
+ float r = sqrtf(u * u + v * v);
+
+ if (r > 1.0f)
+ return zero_float3();
+
+ float phi = safe_acosf((r != 0.0f) ? u / r : 0.0f);
+ float theta = r * fov * 0.5f;
+
+ if (v < 0.0f)
+ phi = -phi;
+
+ return make_float3(cosf(theta), -cosf(phi) * sinf(theta), sinf(phi) * sinf(theta));
+}
+
+ccl_device float2 direction_to_fisheye_equisolid(float3 dir, float lens, float width, float height)
+{
+ float theta = safe_acosf(dir.x);
+ float r = 2.0f * lens * sinf(theta * 0.5f);
+ float phi = atan2f(dir.z, dir.y);
+
+ float u = r * cosf(phi) / width + 0.5f;
+ float v = r * sinf(phi) / height + 0.5f;
+
+ return make_float2(u, v);
+}
+
+ccl_device_inline float3
+fisheye_equisolid_to_direction(float u, float v, float lens, float fov, float width, float height)
+{
+ u = (u - 0.5f) * width;
+ v = (v - 0.5f) * height;
+
+ float rmax = 2.0f * lens * sinf(fov * 0.25f);
+ float r = sqrtf(u * u + v * v);
+
+ if (r > rmax)
+ return zero_float3();
+
+ float phi = safe_acosf((r != 0.0f) ? u / r : 0.0f);
+ float theta = 2.0f * asinf(r / (2.0f * lens));
+
+ if (v < 0.0f)
+ phi = -phi;
+
+ return make_float3(cosf(theta), -cosf(phi) * sinf(theta), sinf(phi) * sinf(theta));
+}
+
+/* Mirror Ball <-> Cartesion direction */
+
+ccl_device float3 mirrorball_to_direction(float u, float v)
+{
+ /* point on sphere */
+ float3 dir;
+
+ dir.x = 2.0f * u - 1.0f;
+ dir.z = 2.0f * v - 1.0f;
+
+ if (dir.x * dir.x + dir.z * dir.z > 1.0f)
+ return zero_float3();
+
+ dir.y = -sqrtf(max(1.0f - dir.x * dir.x - dir.z * dir.z, 0.0f));
+
+ /* reflection */
+ float3 I = make_float3(0.0f, -1.0f, 0.0f);
+
+ return 2.0f * dot(dir, I) * dir - I;
+}
+
+ccl_device float2 direction_to_mirrorball(float3 dir)
+{
+ /* inverse of mirrorball_to_direction */
+ dir.y -= 1.0f;
+
+ float div = 2.0f * sqrtf(max(-0.5f * dir.y, 0.0f));
+ if (div > 0.0f)
+ dir /= div;
+
+ float u = 0.5f * (dir.x + 1.0f);
+ float v = 0.5f * (dir.z + 1.0f);
+
+ return make_float2(u, v);
+}
+
+ccl_device_inline float3 panorama_to_direction(ccl_constant KernelCamera *cam, float u, float v)
+{
+ switch (cam->panorama_type) {
+ case PANORAMA_EQUIRECTANGULAR:
+ return equirectangular_range_to_direction(u, v, cam->equirectangular_range);
+ case PANORAMA_MIRRORBALL:
+ return mirrorball_to_direction(u, v);
+ case PANORAMA_FISHEYE_EQUIDISTANT:
+ return fisheye_to_direction(u, v, cam->fisheye_fov);
+ case PANORAMA_FISHEYE_EQUISOLID:
+ default:
+ return fisheye_equisolid_to_direction(
+ u, v, cam->fisheye_lens, cam->fisheye_fov, cam->sensorwidth, cam->sensorheight);
+ }
+}
+
+ccl_device_inline float2 direction_to_panorama(ccl_constant KernelCamera *cam, float3 dir)
+{
+ switch (cam->panorama_type) {
+ case PANORAMA_EQUIRECTANGULAR:
+ return direction_to_equirectangular_range(dir, cam->equirectangular_range);
+ case PANORAMA_MIRRORBALL:
+ return direction_to_mirrorball(dir);
+ case PANORAMA_FISHEYE_EQUIDISTANT:
+ return direction_to_fisheye(dir, cam->fisheye_fov);
+ case PANORAMA_FISHEYE_EQUISOLID:
+ default:
+ return direction_to_fisheye_equisolid(
+ dir, cam->fisheye_lens, cam->sensorwidth, cam->sensorheight);
+ }
+}
+
+ccl_device_inline void spherical_stereo_transform(ccl_constant KernelCamera *cam,
+ ccl_private float3 *P,
+ ccl_private float3 *D)
+{
+ float interocular_offset = cam->interocular_offset;
+
+ /* Interocular offset of zero means either non stereo, or stereo without
+ * spherical stereo. */
+ kernel_assert(interocular_offset != 0.0f);
+
+ if (cam->pole_merge_angle_to > 0.0f) {
+ const float pole_merge_angle_from = cam->pole_merge_angle_from,
+ pole_merge_angle_to = cam->pole_merge_angle_to;
+ float altitude = fabsf(safe_asinf((*D).z));
+ if (altitude > pole_merge_angle_to) {
+ interocular_offset = 0.0f;
+ }
+ else if (altitude > pole_merge_angle_from) {
+ float fac = (altitude - pole_merge_angle_from) /
+ (pole_merge_angle_to - pole_merge_angle_from);
+ float fade = cosf(fac * M_PI_2_F);
+ interocular_offset *= fade;
+ }
+ }
+
+ float3 up = make_float3(0.0f, 0.0f, 1.0f);
+ float3 side = normalize(cross(*D, up));
+ float3 stereo_offset = side * interocular_offset;
+
+ *P += stereo_offset;
+
+ /* Convergence distance is FLT_MAX in the case of parallel convergence mode,
+ * no need to modify direction in this case either. */
+ const float convergence_distance = cam->convergence_distance;
+
+ if (convergence_distance != FLT_MAX) {
+ float3 screen_offset = convergence_distance * (*D);
+ *D = normalize(screen_offset - stereo_offset);
+ }
+}
+
+CCL_NAMESPACE_END