Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--intern/cycles/kernel/geom/geom_primitive.h6
-rw-r--r--intern/cycles/kernel/kernel_camera.h64
-rw-r--r--intern/cycles/kernel/kernel_projection.h36
-rw-r--r--intern/cycles/render/camera.cpp39
4 files changed, 89 insertions, 56 deletions
diff --git a/intern/cycles/kernel/geom/geom_primitive.h b/intern/cycles/kernel/geom/geom_primitive.h
index 90a9c2147cc..60a1e483b84 100644
--- a/intern/cycles/kernel/geom/geom_primitive.h
+++ b/intern/cycles/kernel/geom/geom_primitive.h
@@ -216,19 +216,19 @@ ccl_device_inline float4 primitive_motion_vector(KernelGlobals *kg, ShaderData *
else {
tfm = kernel_data.cam.worldtocamera;
motion_center = normalize(transform_point(&tfm, center));
- motion_center = float2_to_float3(direction_to_panorama(kg, motion_center));
+ motion_center = float2_to_float3(direction_to_panorama(&kernel_data.cam, motion_center));
motion_center.x *= kernel_data.cam.width;
motion_center.y *= kernel_data.cam.height;
tfm = kernel_data.cam.motion.pre;
motion_pre = normalize(transform_point(&tfm, motion_pre));
- motion_pre = float2_to_float3(direction_to_panorama(kg, motion_pre));
+ motion_pre = float2_to_float3(direction_to_panorama(&kernel_data.cam, motion_pre));
motion_pre.x *= kernel_data.cam.width;
motion_pre.y *= kernel_data.cam.height;
tfm = kernel_data.cam.motion.post;
motion_post = normalize(transform_point(&tfm, motion_post));
- motion_post = float2_to_float3(direction_to_panorama(kg, motion_post));
+ motion_post = float2_to_float3(direction_to_panorama(&kernel_data.cam, motion_post));
motion_post.x *= kernel_data.cam.width;
motion_post.y *= kernel_data.cam.height;
}
diff --git a/intern/cycles/kernel/kernel_camera.h b/intern/cycles/kernel/kernel_camera.h
index 77e3446329a..96cdb04d955 100644
--- a/intern/cycles/kernel/kernel_camera.h
+++ b/intern/cycles/kernel/kernel_camera.h
@@ -18,9 +18,9 @@ CCL_NAMESPACE_BEGIN
/* Perspective Camera */
-ccl_device float2 camera_sample_aperture(KernelGlobals *kg, float u, float v)
+ccl_device float2 camera_sample_aperture(ccl_constant KernelCamera *cam, float u, float v)
{
- float blades = kernel_data.cam.blades;
+ float blades = cam->blades;
float2 bokeh;
if(blades == 0.0f) {
@@ -29,12 +29,12 @@ ccl_device float2 camera_sample_aperture(KernelGlobals *kg, float u, float v)
}
else {
/* sample polygon */
- float rotation = kernel_data.cam.bladesrotation;
+ float rotation = cam->bladesrotation;
bokeh = regular_polygon_sample(blades, rotation, u, v);
}
/* anamorphic lens bokeh */
- bokeh.x *= kernel_data.cam.inv_aperture_ratio;
+ bokeh.x *= cam->inv_aperture_ratio;
return bokeh;
}
@@ -76,7 +76,7 @@ ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, flo
if(aperturesize > 0.0f) {
/* sample point on aperture */
- float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
+ float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v)*aperturesize;
/* compute point on plane of focus */
float ft = kernel_data.cam.focaldistance/D.z;
@@ -124,7 +124,7 @@ ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, flo
}
else {
/* Spherical stereo */
- spherical_stereo_transform(kg, &P, &D);
+ spherical_stereo_transform(&kernel_data.cam, &P, &D);
ray->P = P;
ray->D = D;
@@ -138,12 +138,12 @@ ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, flo
float3 Pcenter = Pnostereo;
float3 Dcenter = Pcamera;
Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
- spherical_stereo_transform(kg, &Pcenter, &Dcenter);
+ spherical_stereo_transform(&kernel_data.cam, &Pcenter, &Dcenter);
float3 Px = Pnostereo;
float3 Dx = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
Dx = normalize(transform_direction(&cameratoworld, Dx));
- spherical_stereo_transform(kg, &Px, &Dx);
+ spherical_stereo_transform(&kernel_data.cam, &Px, &Dx);
ray->dP.dx = Px - Pcenter;
ray->dD.dx = Dx - Dcenter;
@@ -151,7 +151,7 @@ ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, flo
float3 Py = Pnostereo;
float3 Dy = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
Dy = normalize(transform_direction(&cameratoworld, Dy));
- spherical_stereo_transform(kg, &Py, &Dy);
+ spherical_stereo_transform(&kernel_data.cam, &Py, &Dy);
ray->dP.dy = Py - Pcenter;
ray->dD.dy = Dy - Dcenter;
@@ -186,7 +186,7 @@ ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, fl
if(aperturesize > 0.0f) {
/* sample point on aperture */
- float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
+ float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v)*aperturesize;
/* compute point on plane of focus */
float3 Pfocus = D * kernel_data.cam.focaldistance;
@@ -238,17 +238,17 @@ ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, fl
/* Panorama Camera */
-ccl_device_inline void camera_sample_panorama(KernelGlobals *kg,
+ccl_device_inline void camera_sample_panorama(ccl_constant KernelCamera *cam,
float raster_x, float raster_y,
float lens_u, float lens_v,
ccl_addr_space Ray *ray)
{
- Transform rastertocamera = kernel_data.cam.rastertocamera;
+ Transform rastertocamera = cam->rastertocamera;
float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
/* create ray form raster position */
float3 P = make_float3(0.0f, 0.0f, 0.0f);
- float3 D = panorama_to_direction(kg, Pcamera.x, Pcamera.y);
+ float3 D = panorama_to_direction(cam, Pcamera.x, Pcamera.y);
/* indicates ray should not receive any light, outside of the lens */
if(is_zero(D)) {
@@ -257,15 +257,15 @@ ccl_device_inline void camera_sample_panorama(KernelGlobals *kg,
}
/* modify ray for depth of field */
- float aperturesize = kernel_data.cam.aperturesize;
+ float aperturesize = cam->aperturesize;
if(aperturesize > 0.0f) {
/* sample point on aperture */
- float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
+ float2 lensuv = camera_sample_aperture(cam, lens_u, lens_v)*aperturesize;
/* compute point on plane of focus */
float3 Dfocus = normalize(D);
- float3 Pfocus = Dfocus * kernel_data.cam.focaldistance;
+ float3 Pfocus = Dfocus * cam->focaldistance;
/* calculate orthonormal coordinates perpendicular to Dfocus */
float3 U, V;
@@ -278,18 +278,18 @@ ccl_device_inline void camera_sample_panorama(KernelGlobals *kg,
}
/* transform ray from camera to world */
- Transform cameratoworld = kernel_data.cam.cameratoworld;
+ Transform cameratoworld = cam->cameratoworld;
#ifdef __CAMERA_MOTION__
- if(kernel_data.cam.have_motion) {
+ if(cam->have_motion) {
# ifdef __KERNEL_OPENCL__
- const MotionTransform tfm = kernel_data.cam.motion;
+ const MotionTransform tfm = cam->motion;
transform_motion_interpolate(&cameratoworld,
&tfm,
ray->time);
# else
transform_motion_interpolate(&cameratoworld,
- &kernel_data.cam.motion,
+ &cam->motion,
ray->time);
# endif
}
@@ -299,9 +299,9 @@ ccl_device_inline void camera_sample_panorama(KernelGlobals *kg,
D = normalize(transform_direction(&cameratoworld, D));
/* Stereo transform */
- bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
+ bool use_stereo = cam->interocular_offset != 0.0f;
if(use_stereo) {
- spherical_stereo_transform(kg, &P, &D);
+ spherical_stereo_transform(cam, &P, &D);
}
ray->P = P;
@@ -313,30 +313,30 @@ ccl_device_inline void camera_sample_panorama(KernelGlobals *kg,
* ray origin and direction for the center and two neighbouring pixels
* and simply take their differences. */
float3 Pcenter = Pcamera;
- float3 Dcenter = panorama_to_direction(kg, Pcenter.x, Pcenter.y);
+ float3 Dcenter = panorama_to_direction(cam, Pcenter.x, Pcenter.y);
Pcenter = transform_point(&cameratoworld, Pcenter);
Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
if(use_stereo) {
- spherical_stereo_transform(kg, &Pcenter, &Dcenter);
+ spherical_stereo_transform(cam, &Pcenter, &Dcenter);
}
float3 Px = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
- float3 Dx = panorama_to_direction(kg, Px.x, Px.y);
+ float3 Dx = panorama_to_direction(cam, Px.x, Px.y);
Px = transform_point(&cameratoworld, Px);
Dx = normalize(transform_direction(&cameratoworld, Dx));
if(use_stereo) {
- spherical_stereo_transform(kg, &Px, &Dx);
+ spherical_stereo_transform(cam, &Px, &Dx);
}
ray->dP.dx = Px - Pcenter;
ray->dD.dx = Dx - Dcenter;
float3 Py = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
- float3 Dy = panorama_to_direction(kg, Py.x, Py.y);
+ float3 Dy = panorama_to_direction(cam, Py.x, Py.y);
Py = transform_point(&cameratoworld, Py);
Dy = normalize(transform_direction(&cameratoworld, Dy));
if(use_stereo) {
- spherical_stereo_transform(kg, &Py, &Dy);
+ spherical_stereo_transform(cam, &Py, &Dy);
}
ray->dP.dy = Py - Pcenter;
@@ -345,11 +345,11 @@ ccl_device_inline void camera_sample_panorama(KernelGlobals *kg,
#ifdef __CAMERA_CLIPPING__
/* clipping */
- float nearclip = kernel_data.cam.nearclip;
+ float nearclip = cam->nearclip;
ray->P += nearclip * ray->D;
ray->dP.dx += nearclip * ray->dD.dx;
ray->dP.dy += nearclip * ray->dD.dy;
- ray->t = kernel_data.cam.cliplength;
+ ray->t = cam->cliplength;
#else
ray->t = FLT_MAX;
#endif
@@ -415,7 +415,7 @@ ccl_device_inline void camera_sample(KernelGlobals *kg,
else if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC)
camera_sample_orthographic(kg, raster_x, raster_y, lens_u, lens_v, ray);
else
- camera_sample_panorama(kg, raster_x, raster_y, lens_u, lens_v, ray);
+ camera_sample_panorama(&kernel_data.cam, raster_x, raster_y, lens_u, lens_v, ray);
}
/* Utilities */
@@ -472,7 +472,7 @@ ccl_device_inline float3 camera_world_to_ndc(KernelGlobals *kg, ShaderData *sd,
else
P = normalize(transform_direction(&tfm, P));
- float2 uv = direction_to_panorama(kg, P);
+ float2 uv = direction_to_panorama(&kernel_data.cam, P);
return make_float3(uv.x, uv.y, 0.0f);
}
diff --git a/intern/cycles/kernel/kernel_projection.h b/intern/cycles/kernel/kernel_projection.h
index cbb2442d1dc..4540d733af4 100644
--- a/intern/cycles/kernel/kernel_projection.h
+++ b/intern/cycles/kernel/kernel_projection.h
@@ -195,49 +195,49 @@ ccl_device float2 direction_to_mirrorball(float3 dir)
return make_float2(u, v);
}
-ccl_device_inline float3 panorama_to_direction(KernelGlobals *kg, float u, float v)
+ccl_device_inline float3 panorama_to_direction(ccl_constant KernelCamera *cam, float u, float v)
{
- switch(kernel_data.cam.panorama_type) {
+ switch(cam->panorama_type) {
case PANORAMA_EQUIRECTANGULAR:
- return equirectangular_range_to_direction(u, v, kernel_data.cam.equirectangular_range);
+ return equirectangular_range_to_direction(u, v, cam->equirectangular_range);
case PANORAMA_MIRRORBALL:
return mirrorball_to_direction(u, v);
case PANORAMA_FISHEYE_EQUIDISTANT:
- return fisheye_to_direction(u, v, kernel_data.cam.fisheye_fov);
+ return fisheye_to_direction(u, v, cam->fisheye_fov);
case PANORAMA_FISHEYE_EQUISOLID:
default:
- return fisheye_equisolid_to_direction(u, v, kernel_data.cam.fisheye_lens,
- kernel_data.cam.fisheye_fov, kernel_data.cam.sensorwidth, kernel_data.cam.sensorheight);
+ return fisheye_equisolid_to_direction(u, v, cam->fisheye_lens,
+ cam->fisheye_fov, cam->sensorwidth, cam->sensorheight);
}
}
-ccl_device_inline float2 direction_to_panorama(KernelGlobals *kg, float3 dir)
+ccl_device_inline float2 direction_to_panorama(ccl_constant KernelCamera *cam, float3 dir)
{
- switch(kernel_data.cam.panorama_type) {
+ switch(cam->panorama_type) {
case PANORAMA_EQUIRECTANGULAR:
- return direction_to_equirectangular_range(dir, kernel_data.cam.equirectangular_range);
+ return direction_to_equirectangular_range(dir, cam->equirectangular_range);
case PANORAMA_MIRRORBALL:
return direction_to_mirrorball(dir);
case PANORAMA_FISHEYE_EQUIDISTANT:
- return direction_to_fisheye(dir, kernel_data.cam.fisheye_fov);
+ return direction_to_fisheye(dir, cam->fisheye_fov);
case PANORAMA_FISHEYE_EQUISOLID:
default:
- return direction_to_fisheye_equisolid(dir, kernel_data.cam.fisheye_lens,
- kernel_data.cam.sensorwidth, kernel_data.cam.sensorheight);
+ return direction_to_fisheye_equisolid(dir, cam->fisheye_lens,
+ cam->sensorwidth, cam->sensorheight);
}
}
-ccl_device_inline void spherical_stereo_transform(KernelGlobals *kg, float3 *P, float3 *D)
+ccl_device_inline void spherical_stereo_transform(ccl_constant KernelCamera *cam, float3 *P, float3 *D)
{
- float interocular_offset = kernel_data.cam.interocular_offset;
+ float interocular_offset = cam->interocular_offset;
/* Interocular offset of zero means either non stereo, or stereo without
* spherical stereo. */
kernel_assert(interocular_offset != 0.0f);
- if(kernel_data.cam.pole_merge_angle_to > 0.0f) {
- const float pole_merge_angle_from = kernel_data.cam.pole_merge_angle_from,
- pole_merge_angle_to = kernel_data.cam.pole_merge_angle_to;
+ if(cam->pole_merge_angle_to > 0.0f) {
+ const float pole_merge_angle_from = cam->pole_merge_angle_from,
+ pole_merge_angle_to = cam->pole_merge_angle_to;
float altitude = fabsf(safe_asinf((*D).z));
if(altitude > pole_merge_angle_to) {
interocular_offset = 0.0f;
@@ -257,7 +257,7 @@ ccl_device_inline void spherical_stereo_transform(KernelGlobals *kg, float3 *P,
/* Convergence distance is FLT_MAX in the case of parallel convergence mode,
* no need to modify direction in this case either. */
- const float convergence_distance = kernel_data.cam.convergence_distance;
+ const float convergence_distance = cam->convergence_distance;
if(convergence_distance != FLT_MAX)
{
diff --git a/intern/cycles/render/camera.cpp b/intern/cycles/render/camera.cpp
index 8b975886081..a2fda12ec85 100644
--- a/intern/cycles/render/camera.cpp
+++ b/intern/cycles/render/camera.cpp
@@ -27,6 +27,15 @@
#include "util/util_math_cdf.h"
#include "util/util_vector.h"
+/* needed for calculating differentials */
+#include "kernel/kernel_compat_cpu.h"
+#include "kernel/split/kernel_split_data.h"
+#include "kernel/kernel_globals.h"
+#include "kernel/kernel_projection.h"
+#include "kernel/kernel_differential.h"
+#include "kernel/kernel_montecarlo.h"
+#include "kernel/kernel_camera.h"
+
CCL_NAMESPACE_BEGIN
static float shutter_curve_eval(float x,
@@ -688,9 +697,33 @@ float Camera::world_to_raster_size(float3 P)
}
}
}
- else {
- // TODO(mai): implement for CAMERA_PANORAMA
- assert(!"pixel width calculation for panoramic projection not implemented yet");
+ else if(type == CAMERA_PANORAMA) {
+ float3 D = transform_point(&worldtocamera, P);
+ float dist = len(D);
+
+ Ray ray;
+
+ /* Distortion can become so great that the results become meaningless, there
+ * may be a better way to do this, but calculating differentials from the
+ * point directly ahead seems to produce good enough results. */
+#if 0
+ float2 dir = direction_to_panorama(&kernel_camera, normalize(D));
+ float3 raster = transform_perspective(&cameratoraster, make_float3(dir.x, dir.y, 0.0f));
+
+ ray.t = 1.0f;
+ camera_sample_panorama(&kernel_camera, raster.x, raster.y, 0.0f, 0.0f, &ray);
+ if(ray.t == 0.0f) {
+ /* No differentials, just use from directly ahead. */
+ camera_sample_panorama(&kernel_camera, 0.5f*width, 0.5f*height, 0.0f, 0.0f, &ray);
+ }
+#else
+ camera_sample_panorama(&kernel_camera, 0.5f*width, 0.5f*height, 0.0f, 0.0f, &ray);
+#endif
+
+ differential_transfer(&ray.dP, ray.dP, ray.D, ray.dD, ray.D, dist);
+
+ return max(len(ray.dP.dx) * (float(width)/float(full_width)),
+ len(ray.dP.dy) * (float(height)/float(full_height)));
}
return res;