Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brechtvanlommel@pandora.be>2012-04-30 16:49:26 +0400
committerBrecht Van Lommel <brechtvanlommel@pandora.be>2012-04-30 16:49:26 +0400
commit1d8c79818870b92df46c443d7778438aa67d019c (patch)
treeba3fc305671261e40851d8a230a33ebe19396e95 /intern/cycles
parent796dd8a321108df26757fb9df5c2aa6eb42c9633 (diff)
Cycles: support for motion vector and UV passes.
Most of the changes are related to adding support for motion data throughout the code. There's some code for actual camera/object motion blur raytracing but it's unfinished (it badly slows down the raytracing kernel even when the option is turned off), so that code it disabled still. Motion vector export from Blender tries to avoid computing derived meshes when the mesh does not have a deforming modifier, and it also won't store motion vectors for every vertex if only the object or camera is moving.
Diffstat (limited to 'intern/cycles')
-rw-r--r--intern/cycles/app/cycles_xml.cpp7
-rw-r--r--intern/cycles/blender/addon/ui.py30
-rw-r--r--intern/cycles/blender/blender_camera.cpp66
-rw-r--r--intern/cycles/blender/blender_mesh.cpp69
-rw-r--r--intern/cycles/blender/blender_object.cpp98
-rw-r--r--intern/cycles/blender/blender_session.cpp11
-rw-r--r--intern/cycles/blender/blender_sync.cpp7
-rw-r--r--intern/cycles/blender/blender_sync.h9
-rw-r--r--intern/cycles/blender/blender_util.h12
-rw-r--r--intern/cycles/kernel/kernel_bvh.h19
-rw-r--r--intern/cycles/kernel/kernel_camera.h25
-rw-r--r--intern/cycles/kernel/kernel_emission.h8
-rw-r--r--intern/cycles/kernel/kernel_light.h13
-rw-r--r--intern/cycles/kernel/kernel_object.h70
-rw-r--r--intern/cycles/kernel/kernel_passes.h7
-rw-r--r--intern/cycles/kernel/kernel_path.h17
-rw-r--r--intern/cycles/kernel/kernel_shader.h58
-rw-r--r--intern/cycles/kernel/kernel_triangle.h63
-rw-r--r--intern/cycles/kernel/kernel_types.h65
-rw-r--r--intern/cycles/kernel/svm/svm_tex_coord.h24
-rw-r--r--intern/cycles/render/CMakeLists.txt4
-rw-r--r--intern/cycles/render/attribute.cpp54
-rw-r--r--intern/cycles/render/attribute.h29
-rw-r--r--intern/cycles/render/buffers.cpp22
-rw-r--r--intern/cycles/render/camera.cpp39
-rw-r--r--intern/cycles/render/camera.h10
-rw-r--r--intern/cycles/render/film.cpp22
-rw-r--r--intern/cycles/render/film.h1
-rw-r--r--intern/cycles/render/graph.cpp4
-rw-r--r--intern/cycles/render/integrator.cpp4
-rw-r--r--intern/cycles/render/integrator.h1
-rw-r--r--intern/cycles/render/mesh.cpp80
-rw-r--r--intern/cycles/render/mesh.h3
-rw-r--r--intern/cycles/render/mesh_displace.cpp4
-rw-r--r--intern/cycles/render/nodes.cpp8
-rw-r--r--intern/cycles/render/object.cpp83
-rw-r--r--intern/cycles/render/object.h4
-rw-r--r--intern/cycles/render/scene.cpp29
-rw-r--r--intern/cycles/render/scene.h7
-rw-r--r--intern/cycles/render/shader.cpp4
-rw-r--r--intern/cycles/render/shader.h2
-rw-r--r--intern/cycles/render/svm.cpp2
-rw-r--r--intern/cycles/render/svm.h2
-rw-r--r--intern/cycles/subd/subd_dice.cpp4
-rw-r--r--intern/cycles/util/util_math.h76
-rw-r--r--intern/cycles/util/util_transform.cpp101
-rw-r--r--intern/cycles/util/util_transform.h118
47 files changed, 1094 insertions, 301 deletions
diff --git a/intern/cycles/app/cycles_xml.cpp b/intern/cycles/app/cycles_xml.cpp
index b954ff45e27..82f1338d86b 100644
--- a/intern/cycles/app/cycles_xml.cpp
+++ b/intern/cycles/app/cycles_xml.cpp
@@ -284,8 +284,7 @@ static void xml_read_camera(const XMLReadState& state, pugi::xml_node node)
xml_read_float(&cam->farclip, node, "farclip");
xml_read_float(&cam->aperturesize, node, "aperturesize"); // 0.5*focallength/fstop
xml_read_float(&cam->focaldistance, node, "focaldistance");
- xml_read_float(&cam->shutteropen, node, "shutteropen");
- xml_read_float(&cam->shutterclose, node, "shutterclose");
+ xml_read_float(&cam->shuttertime, node, "shuttertime");
if(xml_equal_string(node, "type", "orthographic"))
cam->type = CAMERA_ORTHOGRAPHIC;
@@ -705,7 +704,7 @@ static void xml_read_mesh(const XMLReadState& state, pugi::xml_node node)
}
/* temporary for test compatibility */
- mesh->attributes.remove(Attribute::STD_VERTEX_NORMAL);
+ mesh->attributes.remove(ATTR_STD_VERTEX_NORMAL);
}
/* Patch */
@@ -766,7 +765,7 @@ static void xml_read_patch(const XMLReadState& state, pugi::xml_node node)
delete patch;
/* temporary for test compatibility */
- mesh->attributes.remove(Attribute::STD_VERTEX_NORMAL);
+ mesh->attributes.remove(ATTR_STD_VERTEX_NORMAL);
}
}
diff --git a/intern/cycles/blender/addon/ui.py b/intern/cycles/blender/addon/ui.py
index 0ed08589327..8480b0a5256 100644
--- a/intern/cycles/blender/addon/ui.py
+++ b/intern/cycles/blender/addon/ui.py
@@ -94,6 +94,29 @@ class CyclesRender_PT_integrator(CyclesButtonsPanel, Panel):
col.prop(cscene, "blur_glossy")
+class CyclesRender_PT_motion_blur(CyclesButtonsPanel, Panel):
+ bl_label = "Motion Blur"
+ bl_options = {'DEFAULT_CLOSED'}
+
+ @classmethod
+ def poll(cls, context):
+ return False
+
+ def draw_header(self, context):
+ rd = context.scene.render
+
+ self.layout.prop(rd, "use_motion_blur", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ rd = context.scene.render
+ layout.active = rd.use_motion_blur
+
+ row = layout.row()
+ row.prop(rd, "motion_blur_shutter")
+
+
class CyclesRender_PT_film(CyclesButtonsPanel, Panel):
bl_label = "Film"
@@ -202,10 +225,10 @@ class CyclesRender_PT_layers(CyclesButtonsPanel, Panel):
col.prop(rl, "use_pass_combined")
col.prop(rl, "use_pass_z")
col.prop(rl, "use_pass_normal")
+ col.prop(rl, "use_pass_vector")
+ col.prop(rl, "use_pass_uv")
col.prop(rl, "use_pass_object_index")
col.prop(rl, "use_pass_material_index")
- col.prop(rl, "use_pass_emit")
- col.prop(rl, "use_pass_environment")
col.prop(rl, "use_pass_ambient_occlusion")
col.prop(rl, "use_pass_shadow")
@@ -227,6 +250,9 @@ class CyclesRender_PT_layers(CyclesButtonsPanel, Panel):
row.prop(rl, "use_pass_transmission_indirect", text="Indirect", toggle=True)
row.prop(rl, "use_pass_transmission_color", text="Color", toggle=True)
+ col.prop(rl, "use_pass_emit", text="Emission")
+ col.prop(rl, "use_pass_environment")
+
class Cycles_PT_post_processing(CyclesButtonsPanel, Panel):
bl_label = "Post Processing"
diff --git a/intern/cycles/blender/blender_camera.cpp b/intern/cycles/blender/blender_camera.cpp
index a21b22bc35a..55a32d8fc10 100644
--- a/intern/cycles/blender/blender_camera.cpp
+++ b/intern/cycles/blender/blender_camera.cpp
@@ -35,6 +35,7 @@ struct BlenderCamera {
float ortho_scale;
float lens;
+ float shuttertime;
float aperturesize;
uint apertureblades;
@@ -64,6 +65,7 @@ static void blender_camera_init(BlenderCamera *bcam)
bcam->sensor_width = 32.0f;
bcam->sensor_height = 18.0f;
bcam->sensor_fit = BlenderCamera::AUTO;
+ bcam->shuttertime = 1.0f;
}
static float blender_camera_focal_distance(BL::Object b_ob, BL::Camera b_camera)
@@ -132,6 +134,28 @@ static void blender_camera_from_object(BlenderCamera *bcam, BL::Object b_ob)
}
}
+static Transform blender_camera_matrix(const Transform& tfm, CameraType type)
+{
+ Transform result;
+
+ if(type == CAMERA_ENVIRONMENT) {
+ /* make it so environment camera needs to be pointed in the direction
+ of the positive x-axis to match an environment texture, this way
+ it is looking at the center of the texture */
+ result = tfm *
+ make_transform( 0.0f, -1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 0.0f,
+ -1.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f);
+ }
+ else {
+ /* note the blender camera points along the negative z-axis */
+ result = tfm * transform_scale(1.0f, 1.0f, -1.0f);
+ }
+
+ return transform_clear_scale(result);
+}
+
static void blender_camera_sync(Camera *cam, BlenderCamera *bcam, int width, int height)
{
/* copy camera to compare later */
@@ -224,24 +248,11 @@ static void blender_camera_sync(Camera *cam, BlenderCamera *bcam, int width, int
cam->bladesrotation = bcam->aperturerotation;
/* transform */
- cam->matrix = bcam->matrix;
-
- if(bcam->type == CAMERA_ENVIRONMENT) {
- /* make it so environment camera needs to be pointed in the direction
- of the positive x-axis to match an environment texture, this way
- it is looking at the center of the texture */
- cam->matrix = cam->matrix *
- make_transform( 0.0f, -1.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 1.0f, 0.0f,
- -1.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 1.0f);
- }
- else {
- /* note the blender camera points along the negative z-axis */
- cam->matrix = cam->matrix * transform_scale(1.0f, 1.0f, -1.0f);
- }
-
- cam->matrix = transform_clear_scale(cam->matrix);
+ cam->matrix = blender_camera_matrix(bcam->matrix, bcam->type);
+ cam->motion.pre = cam->matrix;
+ cam->motion.post = cam->matrix;
+ cam->use_motion = false;
+ cam->shuttertime = bcam->shuttertime;
/* set update flag */
if(cam->modified(prevcam))
@@ -260,6 +271,7 @@ void BlenderSync::sync_camera(BL::Object b_override, int width, int height)
bcam.pixelaspect.x = r.pixel_aspect_x();
bcam.pixelaspect.y = r.pixel_aspect_y();
+ bcam.shuttertime = r.motion_blur_shutter();
/* camera object */
BL::Object b_ob = b_scene.camera();
@@ -277,6 +289,23 @@ void BlenderSync::sync_camera(BL::Object b_override, int width, int height)
blender_camera_sync(cam, &bcam, width, height);
}
+void BlenderSync::sync_camera_motion(BL::Object b_ob, int motion)
+{
+ Camera *cam = scene->camera;
+
+ Transform tfm = get_transform(b_ob.matrix_world());
+ tfm = blender_camera_matrix(tfm, cam->type);
+
+ if(tfm != cam->matrix) {
+ if(motion == -1)
+ cam->motion.pre = tfm;
+ else
+ cam->motion.post = tfm;
+
+ cam->use_motion = true;
+ }
+}
+
/* Sync 3D View Camera */
void BlenderSync::sync_view(BL::SpaceView3D b_v3d, BL::RegionView3D b_rv3d, int width, int height)
@@ -288,6 +317,7 @@ void BlenderSync::sync_view(BL::SpaceView3D b_v3d, BL::RegionView3D b_rv3d, int
bcam.nearclip = b_v3d.clip_start();
bcam.farclip = b_v3d.clip_end();
bcam.lens = b_v3d.lens();
+ bcam.shuttertime = b_scene.render().motion_blur_shutter();
if(b_rv3d.view_perspective() == BL::RegionView3D::view_perspective_CAMERA) {
/* camera view */
diff --git a/intern/cycles/blender/blender_mesh.cpp b/intern/cycles/blender/blender_mesh.cpp
index 7caa6b3d511..f77e6551de0 100644
--- a/intern/cycles/blender/blender_mesh.cpp
+++ b/intern/cycles/blender/blender_mesh.cpp
@@ -33,30 +33,6 @@ CCL_NAMESPACE_BEGIN
/* Find/Add */
-static bool mesh_need_attribute(Scene *scene, Mesh *mesh, Attribute::Standard std)
-{
- if(std == Attribute::STD_NONE)
- return false;
-
- foreach(uint shader, mesh->used_shaders)
- if(scene->shaders[shader]->attributes.find(std))
- return true;
-
- return false;
-}
-
-static bool mesh_need_attribute(Scene *scene, Mesh *mesh, ustring name)
-{
- if(name == ustring())
- return false;
-
- foreach(uint shader, mesh->used_shaders)
- if(scene->shaders[shader]->attributes.find(name))
- return true;
-
- return false;
-}
-
static void create_mesh(Scene *scene, Mesh *mesh, BL::Mesh b_mesh, const vector<uint>& used_shaders)
{
/* create vertices */
@@ -66,7 +42,7 @@ static void create_mesh(Scene *scene, Mesh *mesh, BL::Mesh b_mesh, const vector<
mesh->verts.push_back(get_float3(v->co()));
/* create vertex normals */
- Attribute *attr_N = mesh->attributes.add(Attribute::STD_VERTEX_NORMAL);
+ Attribute *attr_N = mesh->attributes.add(ATTR_STD_VERTEX_NORMAL);
float3 *N = attr_N->data_float3();
for(b_mesh.vertices.begin(v); v != b_mesh.vertices.end(); ++v, ++N)
@@ -94,8 +70,8 @@ static void create_mesh(Scene *scene, Mesh *mesh, BL::Mesh b_mesh, const vector<
/* create generated coordinates. todo: we should actually get the orco
coordinates from modifiers, for now we use texspace loc/size which
is available in the api. */
- if(mesh_need_attribute(scene, mesh, Attribute::STD_GENERATED)) {
- Attribute *attr = mesh->attributes.add(Attribute::STD_GENERATED);
+ if(mesh->need_attribute(scene, ATTR_STD_GENERATED)) {
+ Attribute *attr = mesh->attributes.add(ATTR_STD_GENERATED);
float3 loc = get_float3(b_mesh.texspace_location());
float3 size = get_float3(b_mesh.texspace_size());
@@ -118,7 +94,7 @@ static void create_mesh(Scene *scene, Mesh *mesh, BL::Mesh b_mesh, const vector<
BL::Mesh::tessface_vertex_colors_iterator l;
for(b_mesh.tessface_vertex_colors.begin(l); l != b_mesh.tessface_vertex_colors.end(); ++l) {
- if(!mesh_need_attribute(scene, mesh, ustring(l->name().c_str())))
+ if(!mesh->need_attribute(scene, ustring(l->name().c_str())))
continue;
Attribute *attr = mesh->attributes.add(
@@ -150,10 +126,10 @@ static void create_mesh(Scene *scene, Mesh *mesh, BL::Mesh b_mesh, const vector<
BL::Mesh::tessface_uv_textures_iterator l;
for(b_mesh.tessface_uv_textures.begin(l); l != b_mesh.tessface_uv_textures.end(); ++l) {
- Attribute::Standard std = (l->active_render())? Attribute::STD_UV: Attribute::STD_NONE;
+ AttributeStandard std = (l->active_render())? ATTR_STD_UV: ATTR_STD_NONE;
ustring name = ustring(l->name().c_str());
- if(!(mesh_need_attribute(scene, mesh, name) || mesh_need_attribute(scene, mesh, std)))
+ if(!(mesh->need_attribute(scene, name) || mesh->need_attribute(scene, std)))
continue;
Attribute *attr;
@@ -329,5 +305,38 @@ Mesh *BlenderSync::sync_mesh(BL::Object b_ob, bool holdout, bool object_updated)
return mesh;
}
+void BlenderSync::sync_mesh_motion(BL::Object b_ob, Mesh *mesh, int motion)
+{
+ /* todo: displacement, subdivision */
+ BL::ID b_ob_data = b_ob.data();
+ size_t size = mesh->verts.size();
+
+ /* skip objects without deforming modifiers. this is not a totally reliable,
+ * would need a more extensive check to see which objects are animated */
+ if(!size || !ccl::object_is_deform_modified(b_ob, b_scene, preview))
+ return;
+
+ /* get derived mesh */
+ BL::Mesh b_mesh = object_to_mesh(b_ob, b_scene, true, !preview);
+
+ if(b_mesh) {
+ BL::Mesh::vertices_iterator v;
+ AttributeStandard std = (motion == -1)? ATTR_STD_MOTION_PRE: ATTR_STD_MOTION_POST;
+ Attribute *attr_M = mesh->attributes.add(std);
+ float3 *M = attr_M->data_float3();
+ size_t i = 0, size = mesh->verts.size();
+
+ for(b_mesh.vertices.begin(v); v != b_mesh.vertices.end() && i < size; ++v, M++, i++)
+ *M = get_float3(v->co());
+
+ /* if number of vertices changed, or if coordinates stayed the same, drop it */
+ if(i != size || memcmp(M, &mesh->verts[0], sizeof(float3)*size) == 0)
+ mesh->attributes.remove(std);
+
+ /* free derived mesh */
+ object_remove_mesh(b_data, b_mesh);
+ }
+}
+
CCL_NAMESPACE_END
diff --git a/intern/cycles/blender/blender_object.cpp b/intern/cycles/blender/blender_object.cpp
index 96faee19af4..b1cd778c6d3 100644
--- a/intern/cycles/blender/blender_object.cpp
+++ b/intern/cycles/blender/blender_object.cpp
@@ -16,6 +16,7 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#include "camera.h"
#include "graph.h"
#include "light.h"
#include "mesh.h"
@@ -188,11 +189,12 @@ void BlenderSync::sync_background_light()
/* Object */
-void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob, Transform& tfm, uint layer_flag)
+void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob, Transform& tfm, uint layer_flag, int motion)
{
/* light is handled separately */
if(object_is_light(b_ob)) {
- sync_light(b_parent, b_index, b_ob, tfm);
+ if(!motion)
+ sync_light(b_parent, b_index, b_ob, tfm);
return;
}
@@ -200,9 +202,31 @@ void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob,
if(!object_is_mesh(b_ob))
return;
- /* test if we need to sync */
+ /* key to lookup object */
ObjectKey key(b_parent, b_index, b_ob);
Object *object;
+
+ /* motion vector case */
+ if(motion) {
+ object = object_map.find(key);
+
+ if(object) {
+ if(tfm != object->tfm) {
+ if(motion == -1)
+ object->motion.pre = tfm;
+ else
+ object->motion.post = tfm;
+
+ object->use_motion = true;
+ }
+
+ sync_mesh_motion(b_ob, object->mesh, motion);
+ }
+
+ return;
+ }
+
+ /* test if we need to sync */
bool object_updated = false;
if(object_map.sync(&object, b_ob, b_parent, key))
@@ -219,6 +243,9 @@ void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob,
object->name = b_ob.name().c_str();
object->pass_id = b_ob.pass_index();
object->tfm = tfm;
+ object->motion.pre = tfm;
+ object->motion.post = tfm;
+ object->use_motion = false;
/* visibility flags for both parent */
object->visibility = object_ray_visibility(b_ob) & PATH_RAY_ALL;
@@ -238,16 +265,18 @@ void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob,
/* Object Loop */
-void BlenderSync::sync_objects(BL::SpaceView3D b_v3d)
+void BlenderSync::sync_objects(BL::SpaceView3D b_v3d, int motion)
{
/* layer data */
uint scene_layer = render_layer.scene_layer;
- /* prepare for sync */
- light_map.pre_sync();
- mesh_map.pre_sync();
- object_map.pre_sync();
- mesh_synced.clear();
+ if(!motion) {
+ /* prepare for sync */
+ light_map.pre_sync();
+ mesh_map.pre_sync();
+ object_map.pre_sync();
+ mesh_synced.clear();
+ }
/* object loop */
BL::Scene::objects_iterator b_ob;
@@ -270,7 +299,7 @@ void BlenderSync::sync_objects(BL::SpaceView3D b_v3d)
bool dup_hide = (b_v3d)? b_dup_ob.hide(): b_dup_ob.hide_render();
if(!(b_dup->hide() || dup_hide))
- sync_object(*b_ob, b_index, b_dup_ob, tfm, ob_layer);
+ sync_object(*b_ob, b_index, b_dup_ob, tfm, ob_layer, motion);
b_index++;
}
@@ -296,21 +325,50 @@ void BlenderSync::sync_objects(BL::SpaceView3D b_v3d)
if(!hide) {
/* object itself */
Transform tfm = get_transform(b_ob->matrix_world());
- sync_object(*b_ob, 0, *b_ob, tfm, ob_layer);
+ sync_object(*b_ob, 0, *b_ob, tfm, ob_layer, motion);
}
}
}
- sync_background_light();
+ if(!motion) {
+ sync_background_light();
+
+ /* handle removed data and modified pointers */
+ if(light_map.post_sync())
+ scene->light_manager->tag_update(scene);
+ if(mesh_map.post_sync())
+ scene->mesh_manager->tag_update(scene);
+ if(object_map.post_sync())
+ scene->object_manager->tag_update(scene);
+ mesh_synced.clear();
+ }
+}
+
+void BlenderSync::sync_motion(BL::SpaceView3D b_v3d, BL::Object b_override)
+{
+ if(scene->need_motion() == Scene::MOTION_NONE)
+ return;
+
+ /* get camera object here to deal with camera switch */
+ BL::Object b_cam = b_scene.camera();
+ if(b_override)
+ b_cam = b_override;
+
+ /* go back and forth one frame */
+ int frame = b_scene.frame_current();
+
+ for(int motion = -1; motion <= 1; motion += 2) {
+ scene_frame_set(b_scene, frame + motion);
+
+ /* camera object */
+ if(b_cam)
+ sync_camera_motion(b_cam, motion);
+
+ /* mesh objects */
+ sync_objects(b_v3d, motion);
+ }
- /* handle removed data and modified pointers */
- if(light_map.post_sync())
- scene->light_manager->tag_update(scene);
- if(mesh_map.post_sync())
- scene->mesh_manager->tag_update(scene);
- if(object_map.post_sync())
- scene->object_manager->tag_update(scene);
- mesh_synced.clear();
+ scene_frame_set(b_scene, frame);
}
CCL_NAMESPACE_END
diff --git a/intern/cycles/blender/blender_session.cpp b/intern/cycles/blender/blender_session.cpp
index 5ece7aa26e2..f79b9995165 100644
--- a/intern/cycles/blender/blender_session.cpp
+++ b/intern/cycles/blender/blender_session.cpp
@@ -91,7 +91,7 @@ void BlenderSession::create_session()
/* create sync */
sync = new BlenderSync(b_data, b_scene, scene, !background);
- sync->sync_data(b_v3d);
+ sync->sync_data(b_v3d, b_engine.camera_override());
if(b_rv3d)
sync->sync_view(b_v3d, b_rv3d, width, height);
@@ -130,6 +130,8 @@ static PassType get_pass_type(BL::RenderPass b_pass)
return PASS_OBJECT_ID;
case BL::RenderPass::type_UV:
return PASS_UV;
+ case BL::RenderPass::type_VECTOR:
+ return PASS_MOTION;
case BL::RenderPass::type_MATERIAL_INDEX:
return PASS_MATERIAL_ID;
@@ -168,7 +170,6 @@ static PassType get_pass_type(BL::RenderPass b_pass)
case BL::RenderPass::type_REFRACTION:
case BL::RenderPass::type_SPECULAR:
case BL::RenderPass::type_REFLECTION:
- case BL::RenderPass::type_VECTOR:
case BL::RenderPass::type_MIST:
return PASS_NONE;
}
@@ -209,6 +210,8 @@ void BlenderSession::render()
BL::RenderPass b_pass(*b_pass_iter);
PassType pass_type = get_pass_type(b_pass);
+ if(pass_type == PASS_MOTION && scene->integrator->motion_blur)
+ continue;
if(pass_type != PASS_NONE)
Pass::add(pass_type, passes);
}
@@ -219,7 +222,7 @@ void BlenderSession::render()
scene->film->tag_update(scene);
/* update scene */
- sync->sync_data(b_v3d, b_iter->name().c_str());
+ sync->sync_data(b_v3d, b_engine.camera_override(), b_iter->name().c_str());
/* update session */
int samples = sync->get_layer_samples();
@@ -310,7 +313,7 @@ void BlenderSession::synchronize()
}
/* data and camera synchronize */
- sync->sync_data(b_v3d);
+ sync->sync_data(b_v3d, b_engine.camera_override());
if(b_rv3d)
sync->sync_view(b_v3d, b_rv3d, width, height);
diff --git a/intern/cycles/blender/blender_sync.cpp b/intern/cycles/blender/blender_sync.cpp
index 41cd200d003..24cf10bc028 100644
--- a/intern/cycles/blender/blender_sync.cpp
+++ b/intern/cycles/blender/blender_sync.cpp
@@ -121,19 +121,21 @@ bool BlenderSync::sync_recalc()
return recalc;
}
-void BlenderSync::sync_data(BL::SpaceView3D b_v3d, const char *layer)
+void BlenderSync::sync_data(BL::SpaceView3D b_v3d, BL::Object b_override, const char *layer)
{
sync_render_layers(b_v3d, layer);
sync_integrator();
sync_film();
sync_shaders();
sync_objects(b_v3d);
+ sync_motion(b_v3d, b_override);
}
/* Integrator */
void BlenderSync::sync_integrator()
{
+ BL::RenderSettings r = b_scene.render();
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
experimental = (RNA_enum_get(&cscene, "feature_set") != 0);
@@ -160,6 +162,9 @@ void BlenderSync::sync_integrator()
integrator->layer_flag = render_layer.layer;
integrator->sample_clamp = get_float(cscene, "sample_clamp");
+#ifdef __MOTION__
+ integrator->motion_blur = (!preview && r.use_motion_blur());
+#endif
if(integrator->modified(previntegrator))
integrator->tag_update(scene);
diff --git a/intern/cycles/blender/blender_sync.h b/intern/cycles/blender/blender_sync.h
index ab8e4bd8d00..acdcea1ef9b 100644
--- a/intern/cycles/blender/blender_sync.h
+++ b/intern/cycles/blender/blender_sync.h
@@ -54,7 +54,7 @@ public:
/* sync */
bool sync_recalc();
- void sync_data(BL::SpaceView3D b_v3d, const char *layer = 0);
+ void sync_data(BL::SpaceView3D b_v3d, BL::Object b_override, const char *layer = 0);
void sync_camera(BL::Object b_override, int width, int height);
void sync_view(BL::SpaceView3D b_v3d, BL::RegionView3D b_rv3d, int width, int height);
int get_layer_samples() { return render_layer.samples; }
@@ -69,7 +69,8 @@ private:
/* sync */
void sync_lamps();
void sync_materials();
- void sync_objects(BL::SpaceView3D b_v3d);
+ void sync_objects(BL::SpaceView3D b_v3d, int motion = 0);
+ void sync_motion(BL::SpaceView3D b_v3d, BL::Object b_override);
void sync_film();
void sync_integrator();
void sync_view();
@@ -79,9 +80,11 @@ private:
void sync_nodes(Shader *shader, BL::ShaderNodeTree b_ntree);
Mesh *sync_mesh(BL::Object b_ob, bool holdout, bool object_updated);
- void sync_object(BL::Object b_parent, int b_index, BL::Object b_object, Transform& tfm, uint layer_flag);
+ void sync_object(BL::Object b_parent, int b_index, BL::Object b_object, Transform& tfm, uint layer_flag, int motion);
void sync_light(BL::Object b_parent, int b_index, BL::Object b_ob, Transform& tfm);
void sync_background_light();
+ void sync_mesh_motion(BL::Object b_ob, Mesh *mesh, int motion);
+ void sync_camera_motion(BL::Object b_ob, int motion);
/* util */
void find_shader(BL::ID id, vector<uint>& used_shaders, int default_shader);
diff --git a/intern/cycles/blender/blender_util.h b/intern/cycles/blender/blender_util.h
index 67f3a3ab7d9..9184e14bc76 100644
--- a/intern/cycles/blender/blender_util.h
+++ b/intern/cycles/blender/blender_util.h
@@ -49,8 +49,10 @@ void RE_engine_update_progress(struct RenderEngine *engine, float progress);
void engine_tag_redraw(void *engine);
void engine_tag_update(void *engine);
int rna_Object_is_modified(void *ob, void *scene, int settings);
+int rna_Object_is_deform_modified(void *ob, void *scene, int settings);
void BLI_timestr(double _time, char *str);
void rna_ColorRamp_eval(void *coba, float position, float color[4]);
+void rna_Scene_frame_set(void *scene, int frame, float subframe);
}
@@ -94,6 +96,16 @@ static inline bool object_is_modified(BL::Object self, BL::Scene scene, bool pre
return rna_Object_is_modified(self.ptr.data, scene.ptr.data, (preview)? (1<<0): (1<<1))? true: false;
}
+static inline bool object_is_deform_modified(BL::Object self, BL::Scene scene, bool preview)
+{
+ return rna_Object_is_deform_modified(self.ptr.data, scene.ptr.data, (preview)? (1<<0): (1<<1))? true: false;
+}
+
+static inline void scene_frame_set(BL::Scene scene, int frame)
+{
+ rna_Scene_frame_set(scene.ptr.data, frame, 0.0f);
+}
+
/* Utilities */
static inline Transform get_transform(BL::Array<float, 16> array)
diff --git a/intern/cycles/kernel/kernel_bvh.h b/intern/cycles/kernel/kernel_bvh.h
index 523ae8ae926..5da4253bd86 100644
--- a/intern/cycles/kernel/kernel_bvh.h
+++ b/intern/cycles/kernel/kernel_bvh.h
@@ -57,7 +57,7 @@ __device_inline float3 bvh_inverse_direction(float3 dir)
__device_inline void bvh_instance_push(KernelGlobals *kg, int object, const Ray *ray, float3 *P, float3 *idir, float *t, const float tmax)
{
- Transform tfm = object_fetch_transform(kg, object, OBJECT_INVERSE_TRANSFORM);
+ Transform tfm = object_fetch_transform(kg, object, ray->time, OBJECT_INVERSE_TRANSFORM);
*P = transform_point(&tfm, ray->P);
@@ -74,7 +74,7 @@ __device_inline void bvh_instance_push(KernelGlobals *kg, int object, const Ray
__device_inline void bvh_instance_pop(KernelGlobals *kg, int object, const Ray *ray, float3 *P, float3 *idir, float *t, const float tmax)
{
- Transform tfm = object_fetch_transform(kg, object, OBJECT_TRANSFORM);
+ Transform tfm = object_fetch_transform(kg, object, ray->time, OBJECT_TRANSFORM);
if(*t != FLT_MAX)
*t *= len(transform_direction(&tfm, 1.0f/(*idir)));
@@ -341,7 +341,7 @@ __device_inline float3 ray_offset(float3 P, float3 Ng)
#endif
}
-__device_inline float3 bvh_triangle_refine(KernelGlobals *kg, const Intersection *isect, const Ray *ray)
+__device_inline float3 bvh_triangle_refine(KernelGlobals *kg, ShaderData *sd, const Intersection *isect, const Ray *ray)
{
float3 P = ray->P;
float3 D = ray->D;
@@ -349,7 +349,11 @@ __device_inline float3 bvh_triangle_refine(KernelGlobals *kg, const Intersection
#ifdef __INTERSECTION_REFINE__
if(isect->object != ~0) {
- Transform tfm = object_fetch_transform(kg, isect->object, OBJECT_INVERSE_TRANSFORM);
+#ifdef __MOTION__
+ Transform tfm = sd->ob_itfm;
+#else
+ Transform tfm = object_fetch_transform(kg, isect->object, ray->time, OBJECT_INVERSE_TRANSFORM);
+#endif
P = transform_point(&tfm, P);
D = transform_direction(&tfm, D*t);
@@ -366,7 +370,12 @@ __device_inline float3 bvh_triangle_refine(KernelGlobals *kg, const Intersection
P = P + D*rt;
if(isect->object != ~0) {
- Transform tfm = object_fetch_transform(kg, isect->object, OBJECT_TRANSFORM);
+#ifdef __MOTION__
+ Transform tfm = sd->ob_tfm;
+#else
+ Transform tfm = object_fetch_transform(kg, isect->object, ray->time, OBJECT_TRANSFORM);
+#endif
+
P = transform_point(&tfm, P);
}
diff --git a/intern/cycles/kernel/kernel_camera.h b/intern/cycles/kernel/kernel_camera.h
index 99dac18d545..7b93ed7c0e6 100644
--- a/intern/cycles/kernel/kernel_camera.h
+++ b/intern/cycles/kernel/kernel_camera.h
@@ -63,6 +63,11 @@ __device void camera_sample_perspective(KernelGlobals *kg, float raster_x, float
/* transform ray from camera to world */
Transform cameratoworld = kernel_data.cam.cameratoworld;
+#ifdef __MOTION__
+ if(ray->time != TIME_INVALID)
+ transform_motion_interpolate(&cameratoworld, &kernel_data.cam.motion, ray->time);
+#endif
+
ray->P = transform_point(&cameratoworld, ray->P);
ray->D = transform_direction(&cameratoworld, ray->D);
ray->D = normalize(ray->D);
@@ -101,6 +106,11 @@ __device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, floa
/* transform ray from camera to world */
Transform cameratoworld = kernel_data.cam.cameratoworld;
+#ifdef __MOTION__
+ if(ray->time != TIME_INVALID)
+ transform_motion_interpolate(&cameratoworld, &kernel_data.cam.motion, ray->time);
+#endif
+
ray->P = transform_point(&cameratoworld, ray->P);
ray->D = transform_direction(&cameratoworld, ray->D);
ray->D = normalize(ray->D);
@@ -136,6 +146,11 @@ __device void camera_sample_environment(KernelGlobals *kg, float raster_x, float
/* transform ray from camera to world */
Transform cameratoworld = kernel_data.cam.cameratoworld;
+#ifdef __MOTION__
+ if(ray->time != TIME_INVALID)
+ transform_motion_interpolate(&cameratoworld, &kernel_data.cam.motion, ray->time);
+#endif
+
ray->P = transform_point(&cameratoworld, ray->P);
ray->D = transform_direction(&cameratoworld, ray->D);
ray->D = normalize(ray->D);
@@ -162,14 +177,20 @@ __device void camera_sample_environment(KernelGlobals *kg, float raster_x, float
/* Common */
-__device void camera_sample(KernelGlobals *kg, int x, int y, float filter_u, float filter_v, float lens_u, float lens_v, Ray *ray)
+__device void camera_sample(KernelGlobals *kg, int x, int y, float filter_u, float filter_v,
+ float lens_u, float lens_v, float time, Ray *ray)
{
/* pixel filter */
float raster_x = x + kernel_tex_interp(__filter_table, filter_u, FILTER_TABLE_SIZE);
float raster_y = y + kernel_tex_interp(__filter_table, filter_v, FILTER_TABLE_SIZE);
+#ifdef __MOTION__
/* motion blur */
- //ray->time = lerp(time_t, kernel_data.cam.shutter_open, kernel_data.cam.shutter_close);
+ if(kernel_data.cam.shuttertime == 0.0f)
+ ray->time = TIME_INVALID;
+ else
+ ray->time = 0.5f + (time - 0.5f)*kernel_data.cam.shuttertime;
+#endif
/* sample */
if(kernel_data.cam.type == CAMERA_PERSPECTIVE)
diff --git a/intern/cycles/kernel/kernel_emission.h b/intern/cycles/kernel/kernel_emission.h
index 764ac599991..cd7701a0c75 100644
--- a/intern/cycles/kernel/kernel_emission.h
+++ b/intern/cycles/kernel/kernel_emission.h
@@ -21,7 +21,7 @@ CCL_NAMESPACE_BEGIN
/* Direction Emission */
__device float3 direct_emissive_eval(KernelGlobals *kg, float rando,
- LightSample *ls, float u, float v, float3 I)
+ LightSample *ls, float u, float v, float3 I, float time)
{
/* setup shading at emitter */
ShaderData sd;
@@ -40,7 +40,7 @@ __device float3 direct_emissive_eval(KernelGlobals *kg, float rando,
else
#endif
{
- shader_setup_from_sample(kg, &sd, ls->P, ls->Ng, I, ls->shader, ls->object, ls->prim, u, v);
+ shader_setup_from_sample(kg, &sd, ls->P, ls->Ng, I, ls->shader, ls->object, ls->prim, u, v, time);
ls->Ng = sd.Ng;
/* no path flag, we're evaluating this for all closures. that's weak but
@@ -76,7 +76,7 @@ __device bool direct_emission(KernelGlobals *kg, ShaderData *sd, int lindex,
#endif
{
/* sample a light and position on int */
- light_sample(kg, randt, randu, randv, sd->P, &ls, &pdf);
+ light_sample(kg, randt, randu, randv, sd->time, sd->P, &ls, &pdf);
}
/* compute pdf */
@@ -87,7 +87,7 @@ __device bool direct_emission(KernelGlobals *kg, ShaderData *sd, int lindex,
return false;
/* evaluate closure */
- float3 light_eval = direct_emissive_eval(kg, rando, &ls, randu, randv, -ls.D);
+ float3 light_eval = direct_emissive_eval(kg, rando, &ls, randu, randv, -ls.D, sd->time);
if(is_zero(light_eval))
return false;
diff --git a/intern/cycles/kernel/kernel_light.h b/intern/cycles/kernel/kernel_light.h
index 42260577069..c2cf293cab3 100644
--- a/intern/cycles/kernel/kernel_light.h
+++ b/intern/cycles/kernel/kernel_light.h
@@ -251,7 +251,7 @@ __device float regular_light_pdf(KernelGlobals *kg,
/* Triangle Light */
__device void triangle_light_sample(KernelGlobals *kg, int prim, int object,
- float randu, float randv, LightSample *ls)
+ float randu, float randv, float time, LightSample *ls)
{
/* triangle, so get position, normal, shader */
ls->P = triangle_sample_MT(kg, prim, randu, randv);
@@ -264,8 +264,11 @@ __device void triangle_light_sample(KernelGlobals *kg, int prim, int object,
#ifdef __INSTANCING__
/* instance transform */
if(ls->object >= 0) {
- object_position_transform(kg, ls->object, &ls->P);
- object_normal_transform(kg, ls->object, &ls->Ng);
+ Transform tfm = object_fetch_transform(kg, ls->object, time, OBJECT_TRANSFORM);
+ Transform itfm = object_fetch_transform(kg, ls->object, time, OBJECT_INVERSE_TRANSFORM);
+
+ ls->P = transform_point(&tfm, ls->P);
+ ls->Ng = transform_direction_transposed(&itfm, ls->Ng);
}
#endif
}
@@ -313,7 +316,7 @@ __device int light_distribution_sample(KernelGlobals *kg, float randt)
/* Generic Light */
-__device void light_sample(KernelGlobals *kg, float randt, float randu, float randv, float3 P, LightSample *ls, float *pdf)
+__device void light_sample(KernelGlobals *kg, float randt, float randu, float randv, float time, float3 P, LightSample *ls, float *pdf)
{
/* sample index */
int index = light_distribution_sample(kg, randt);
@@ -324,7 +327,7 @@ __device void light_sample(KernelGlobals *kg, float randt, float randu, float ra
if(prim >= 0) {
int object = __float_as_int(l.w);
- triangle_light_sample(kg, prim, object, randu, randv, ls);
+ triangle_light_sample(kg, prim, object, randu, randv, time, ls);
}
else {
int point = -prim-1;
diff --git a/intern/cycles/kernel/kernel_object.h b/intern/cycles/kernel/kernel_object.h
index b676f58e5d4..262ca848f28 100644
--- a/intern/cycles/kernel/kernel_object.h
+++ b/intern/cycles/kernel/kernel_object.h
@@ -20,41 +20,87 @@ CCL_NAMESPACE_BEGIN
enum ObjectTransform {
OBJECT_TRANSFORM = 0,
- OBJECT_INVERSE_TRANSFORM = 4,
- OBJECT_NORMAL_TRANSFORM = 8,
- OBJECT_PROPERTIES = 12
+ OBJECT_INVERSE_TRANSFORM = 3,
+ OBJECT_PROPERTIES = 6,
+ OBJECT_TRANSFORM_MOTION_PRE = 8,
+ OBJECT_TRANSFORM_MOTION_POST = 12
};
-__device_inline Transform object_fetch_transform(KernelGlobals *kg, int object, enum ObjectTransform type)
+__device_inline Transform object_fetch_transform(KernelGlobals *kg, int object, float time, enum ObjectTransform type)
{
Transform tfm;
+#ifdef __MOTION__
+ /* if we do motion blur */
+ if(time != TIME_INVALID) {
+ int offset = object*OBJECT_SIZE + (int)OBJECT_TRANSFORM_MOTION_PRE;
+ float4 have_motion = kernel_tex_fetch(__objects, offset + 0);
+
+ /* if this object have motion */
+ if(have_motion.x != FLT_MAX) {
+ /* fetch motion transforms */
+ MotionTransform motion;
+
+ motion.pre.x = have_motion;
+ motion.pre.y = kernel_tex_fetch(__objects, offset + 1);
+ motion.pre.z = kernel_tex_fetch(__objects, offset + 2);
+ motion.pre.w = kernel_tex_fetch(__objects, offset + 3);
+
+ motion.post.x = kernel_tex_fetch(__objects, offset + 4);
+ motion.post.y = kernel_tex_fetch(__objects, offset + 5);
+ motion.post.z = kernel_tex_fetch(__objects, offset + 6);
+ motion.post.w = kernel_tex_fetch(__objects, offset + 7);
+
+ /* interpolate (todo: do only once per object) */
+ transform_motion_interpolate(&tfm, &motion, time);
+
+ /* invert */
+ if(type == OBJECT_INVERSE_TRANSFORM)
+ tfm = transform_quick_inverse(tfm);
+
+ return tfm;
+ }
+ }
+#endif
+
int offset = object*OBJECT_SIZE + (int)type;
tfm.x = kernel_tex_fetch(__objects, offset + 0);
tfm.y = kernel_tex_fetch(__objects, offset + 1);
tfm.z = kernel_tex_fetch(__objects, offset + 2);
- tfm.w = kernel_tex_fetch(__objects, offset + 3);
+ tfm.w = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
return tfm;
}
-__device_inline void object_position_transform(KernelGlobals *kg, int object, float3 *P)
+__device_inline void object_position_transform(KernelGlobals *kg, ShaderData *sd, float3 *P)
{
- Transform tfm = object_fetch_transform(kg, object, OBJECT_TRANSFORM);
+#ifdef __MOTION__
+ *P = transform_point(&sd->ob_tfm, *P);
+#else
+ Transform tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_TRANSFORM);
*P = transform_point(&tfm, *P);
+#endif
}
-__device_inline void object_normal_transform(KernelGlobals *kg, int object, float3 *N)
+__device_inline void object_normal_transform(KernelGlobals *kg, ShaderData *sd, float3 *N)
{
- Transform tfm = object_fetch_transform(kg, object, OBJECT_NORMAL_TRANSFORM);
- *N = normalize(transform_direction(&tfm, *N));
+#ifdef __MOTION__
+ *N = normalize(transform_direction_transposed(&sd->ob_itfm, *N));
+#else
+ Transform tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_INVERSE_TRANSFORM);
+ *N = normalize(transform_direction_transposed(&tfm, *N));
+#endif
}
-__device_inline void object_dir_transform(KernelGlobals *kg, int object, float3 *D)
+__device_inline void object_dir_transform(KernelGlobals *kg, ShaderData *sd, float3 *D)
{
- Transform tfm = object_fetch_transform(kg, object, OBJECT_TRANSFORM);
+#ifdef __MOTION__
+ *D = transform_direction(&sd->ob_tfm, *D);
+#else
+ Transform tfm = object_fetch_transform(kg, sd->object, 0.0f, OBJECT_TRANSFORM);
*D = transform_direction(&tfm, *D);
+#endif
}
__device_inline float object_surface_area(KernelGlobals *kg, int object)
diff --git a/intern/cycles/kernel/kernel_passes.h b/intern/cycles/kernel/kernel_passes.h
index fd4ee17cdc1..f3ddda4a392 100644
--- a/intern/cycles/kernel/kernel_passes.h
+++ b/intern/cycles/kernel/kernel_passes.h
@@ -72,9 +72,14 @@ __device_inline void kernel_write_data_passes(KernelGlobals *kg, __global float
kernel_write_pass_float3(buffer + kernel_data.film.pass_normal, sample, normal);
}
if(flag & PASS_UV) {
- float3 uv = make_float3(0.0f, 0.0f, 0.0f); /* todo: request and lookup */
+ float3 uv = triangle_uv(kg, sd);
kernel_write_pass_float3(buffer + kernel_data.film.pass_uv, sample, uv);
}
+ if(flag & PASS_MOTION) {
+ float4 speed = triangle_motion_vector(kg, sd);
+ kernel_write_pass_float4(buffer + kernel_data.film.pass_motion, sample, speed);
+ kernel_write_pass_float(buffer + kernel_data.film.pass_motion_weight, sample, 1.0f);
+ }
}
if(flag & (PASS_DIFFUSE_INDIRECT|PASS_DIFFUSE_COLOR|PASS_DIFFUSE_DIRECT))
diff --git a/intern/cycles/kernel/kernel_path.h b/intern/cycles/kernel/kernel_path.h
index 8ebac177277..b7c22087e1f 100644
--- a/intern/cycles/kernel/kernel_path.h
+++ b/intern/cycles/kernel/kernel_path.h
@@ -18,8 +18,8 @@
#include "kernel_differential.h"
#include "kernel_montecarlo.h"
-#include "kernel_triangle.h"
#include "kernel_object.h"
+#include "kernel_triangle.h"
#ifdef __QBVH__
#include "kernel_qbvh.h"
#else
@@ -324,6 +324,9 @@ __device float4 kernel_path_integrate(KernelGlobals *kg, RNG *rng, int sample, R
light_ray.P = ray_offset(sd.P, sd.Ng);
light_ray.D = ao_D;
light_ray.t = kernel_data.background.ao_distance;
+#ifdef __MOTION__
+ light_ray.time = sd.time;
+#endif
if(!shadow_blocked(kg, &state, &light_ray, &ao_shadow)) {
float3 ao_bsdf = shader_bsdf_diffuse(kg, &sd)*kernel_data.background.ao_factor;
@@ -346,6 +349,10 @@ __device float4 kernel_path_integrate(KernelGlobals *kg, RNG *rng, int sample, R
BsdfEval L_light;
bool is_lamp;
+#ifdef __MOTION__
+ light_ray.time = sd.time;
+#endif
+
#ifdef __MULTI_LIGHT__
/* index -1 means randomly sample from distribution */
int i = (kernel_data.integrator.num_distribution)? -1: 0;
@@ -449,7 +456,13 @@ __device void kernel_path_trace(KernelGlobals *kg,
float lens_u = path_rng(kg, &rng, sample, PRNG_LENS_U);
float lens_v = path_rng(kg, &rng, sample, PRNG_LENS_V);
- camera_sample(kg, x, y, filter_u, filter_v, lens_u, lens_v, &ray);
+#ifdef __MOTION__
+ float time = path_rng(kg, &rng, sample, PRNG_TIME);
+#else
+ float time = 0.0f;
+#endif
+
+ camera_sample(kg, x, y, filter_u, filter_v, lens_u, lens_v, time, &ray);
/* integrate */
float4 L = kernel_path_integrate(kg, &rng, sample, ray, buffer);
diff --git a/intern/cycles/kernel/kernel_shader.h b/intern/cycles/kernel/kernel_shader.h
index 46ef5d2022a..b2f2a7577be 100644
--- a/intern/cycles/kernel/kernel_shader.h
+++ b/intern/cycles/kernel/kernel_shader.h
@@ -53,16 +53,9 @@ __device_inline void shader_setup_from_ray(KernelGlobals *kg, ShaderData *sd,
float3 Ng = make_float3(Ns.x, Ns.y, Ns.z);
int shader = __float_as_int(Ns.w);
- /* vectors */
- sd->P = bvh_triangle_refine(kg, isect, ray);
- sd->Ng = Ng;
- sd->N = Ng;
- sd->I = -ray->D;
- sd->shader = shader;
-
/* triangle */
#ifdef __INSTANCING__
- sd->object = isect->object;
+ sd->object = (isect->object == ~0)? kernel_tex_fetch(__prim_object, isect->prim): isect->object;
#endif
sd->prim = prim;
#ifdef __UV__
@@ -70,6 +63,21 @@ __device_inline void shader_setup_from_ray(KernelGlobals *kg, ShaderData *sd,
sd->v = isect->v;
#endif
+ /* matrices and time */
+#ifdef __MOTION__
+ sd->ob_tfm = object_fetch_transform(kg, sd->object, ray->time, OBJECT_TRANSFORM);
+ sd->ob_itfm = object_fetch_transform(kg, sd->object, ray->time, OBJECT_INVERSE_TRANSFORM);
+
+ sd->time = ray->time;
+#endif
+
+ /* vectors */
+ sd->P = bvh_triangle_refine(kg, sd, isect, ray);
+ sd->Ng = Ng;
+ sd->N = Ng;
+ sd->I = -ray->D;
+ sd->shader = shader;
+
/* smooth normal */
if(sd->shader & SHADER_SMOOTH_NORMAL)
sd->N = triangle_smooth_normal(kg, sd->prim, sd->u, sd->v);
@@ -82,19 +90,15 @@ __device_inline void shader_setup_from_ray(KernelGlobals *kg, ShaderData *sd,
#endif
#ifdef __INSTANCING__
- if(sd->object != ~0) {
+ if(isect->object != ~0) {
/* instance transform */
- object_normal_transform(kg, sd->object, &sd->N);
- object_normal_transform(kg, sd->object, &sd->Ng);
+ object_normal_transform(kg, sd, &sd->N);
+ object_normal_transform(kg, sd, &sd->Ng);
#ifdef __DPDU__
- object_dir_transform(kg, sd->object, &sd->dPdu);
- object_dir_transform(kg, sd->object, &sd->dPdv);
+ object_dir_transform(kg, sd, &sd->dPdu);
+ object_dir_transform(kg, sd, &sd->dPdv);
#endif
}
- else {
- /* non-instanced object index */
- sd->object = kernel_tex_fetch(__prim_object, isect->prim);
- }
#endif
/* backfacing test */
@@ -122,7 +126,7 @@ __device_inline void shader_setup_from_ray(KernelGlobals *kg, ShaderData *sd,
__device void shader_setup_from_sample(KernelGlobals *kg, ShaderData *sd,
const float3 P, const float3 Ng, const float3 I,
- int shader, int object, int prim, float u, float v)
+ int shader, int object, int prim, float u, float v, float time)
{
/* vectors */
sd->P = P;
@@ -155,13 +159,20 @@ __device void shader_setup_from_sample(KernelGlobals *kg, ShaderData *sd,
}
#endif
+#ifdef __MOTION__
+ sd->time = time;
+
+ sd->ob_tfm = object_fetch_transform(kg, sd->object, time, OBJECT_TRANSFORM);
+ sd->ob_itfm = object_fetch_transform(kg, sd->object, time, OBJECT_INVERSE_TRANSFORM);
+#endif
+
/* smooth normal */
if(sd->shader & SHADER_SMOOTH_NORMAL) {
sd->N = triangle_smooth_normal(kg, sd->prim, sd->u, sd->v);
#ifdef __INSTANCING__
if(instanced)
- object_normal_transform(kg, sd->object, &sd->N);
+ object_normal_transform(kg, sd, &sd->N);
#endif
}
@@ -178,8 +189,8 @@ __device void shader_setup_from_sample(KernelGlobals *kg, ShaderData *sd,
#ifdef __INSTANCING__
if(instanced) {
- object_dir_transform(kg, sd->object, &sd->dPdu);
- object_dir_transform(kg, sd->object, &sd->dPdv);
+ object_dir_transform(kg, sd, &sd->dPdu);
+ object_dir_transform(kg, sd, &sd->dPdv);
}
#endif
}
@@ -229,7 +240,7 @@ __device void shader_setup_from_displace(KernelGlobals *kg, ShaderData *sd,
/* watch out: no instance transform currently */
- shader_setup_from_sample(kg, sd, P, Ng, I, shader, object, prim, u, v);
+ shader_setup_from_sample(kg, sd, P, Ng, I, shader, object, prim, u, v, TIME_INVALID);
}
/* ShaderData setup from ray into background */
@@ -243,6 +254,9 @@ __device_inline void shader_setup_from_background(KernelGlobals *kg, ShaderData
sd->I = -sd->P;
sd->shader = kernel_data.background.shader;
sd->flag = kernel_tex_fetch(__shader_flag, (sd->shader & SHADER_MASK)*2);
+#ifdef __MOTION__
+ sd->time = ray->time;
+#endif
#ifdef __INSTANCING__
sd->object = ~0;
diff --git a/intern/cycles/kernel/kernel_triangle.h b/intern/cycles/kernel/kernel_triangle.h
index 7eaf54d14bf..1b3956c1dd4 100644
--- a/intern/cycles/kernel/kernel_triangle.h
+++ b/intern/cycles/kernel/kernel_triangle.h
@@ -179,5 +179,68 @@ __device float3 triangle_attribute_float3(KernelGlobals *kg, const ShaderData *s
}
}
+/* motion */
+
+__device int triangle_find_attribute(KernelGlobals *kg, ShaderData *sd, uint id)
+{
+ /* find attribute by unique id */
+ uint attr_offset = sd->object*kernel_data.bvh.attributes_map_stride;
+ uint4 attr_map = kernel_tex_fetch(__attributes_map, attr_offset);
+
+ while(attr_map.x != id)
+ attr_map = kernel_tex_fetch(__attributes_map, ++attr_offset);
+
+ /* return result */
+ return (attr_map.y == ATTR_ELEMENT_NONE)? ATTR_STD_NOT_FOUND: attr_map.z;
+}
+
+__device float4 triangle_motion_vector(KernelGlobals *kg, ShaderData *sd)
+{
+ float3 motion_pre = sd->P, motion_post = sd->P;
+
+ /* deformation motion */
+ int offset_pre = triangle_find_attribute(kg, sd, ATTR_STD_MOTION_PRE);
+ int offset_post = triangle_find_attribute(kg, sd, ATTR_STD_MOTION_POST);
+
+ if(offset_pre != ATTR_STD_NOT_FOUND)
+ motion_pre = triangle_attribute_float3(kg, sd, ATTR_ELEMENT_VERTEX, offset_pre, NULL, NULL);
+ if(offset_post != ATTR_STD_NOT_FOUND)
+ motion_post = triangle_attribute_float3(kg, sd, ATTR_ELEMENT_VERTEX, offset_post, NULL, NULL);
+
+ /* object motion. note that depending on the mesh having motion vectors, this
+ transformation was set match the world/object space of motion_pre/post */
+ Transform tfm;
+
+ tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_TRANSFORM_MOTION_PRE);
+ motion_pre = transform_point(&tfm, motion_pre);
+
+ tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_TRANSFORM_MOTION_POST);
+ motion_post = transform_point(&tfm, motion_post);
+
+ /* camera motion */
+ tfm = kernel_data.cam.worldtoraster;
+ float3 P = transform_perspective(&tfm, sd->P);
+
+ tfm = kernel_data.cam.motion.pre;
+ motion_pre = transform_perspective(&tfm, motion_pre) - P;
+
+ tfm = kernel_data.cam.motion.post;
+ motion_post = P - transform_perspective(&tfm, motion_post);
+
+ return make_float4(motion_pre.x, motion_pre.y, motion_post.x, motion_post.y);
+}
+
+__device float3 triangle_uv(KernelGlobals *kg, ShaderData *sd)
+{
+ int offset_uv = triangle_find_attribute(kg, sd, ATTR_STD_UV);
+
+ if(offset_uv == ATTR_STD_NOT_FOUND)
+ return make_float3(0.0f, 0.0f, 0.0f);
+
+ float3 uv = triangle_attribute_float3(kg, sd, ATTR_ELEMENT_CORNER, offset_uv, NULL, NULL);
+ uv.z = 1.0f;
+ return uv;
+}
+
CCL_NAMESPACE_END
diff --git a/intern/cycles/kernel/kernel_types.h b/intern/cycles/kernel/kernel_types.h
index 102a2bb036d..e9103087025 100644
--- a/intern/cycles/kernel/kernel_types.h
+++ b/intern/cycles/kernel/kernel_types.h
@@ -20,9 +20,12 @@
#define __KERNEL_TYPES_H__
#include "kernel_math.h"
-
#include "svm/svm_types.h"
+#ifndef __KERNEL_GPU__
+#define __KERNEL_CPU__
+#endif
+
CCL_NAMESPACE_BEGIN
/* constants */
@@ -30,6 +33,7 @@ CCL_NAMESPACE_BEGIN
#define LIGHT_SIZE 4
#define FILTER_TABLE_SIZE 256
#define RAMP_TABLE_SIZE 256
+#define TIME_INVALID FLT_MAX
/* device capabilities */
#ifdef __KERNEL_CPU__
@@ -75,6 +79,7 @@ CCL_NAMESPACE_BEGIN
#define __PASSES__
#define __BACKGROUND_MIS__
#define __AO__
+//#define __MOTION__
#endif
//#define __MULTI_LIGHT__
@@ -90,14 +95,21 @@ enum ShaderEvalType {
SHADER_EVAL_BACKGROUND
};
-/* Path Tracing */
+/* Path Tracing
+ * note we need to keep the u/v pairs at even values */
enum PathTraceDimension {
PRNG_FILTER_U = 0,
PRNG_FILTER_V = 1,
PRNG_LENS_U = 2,
PRNG_LENS_V = 3,
+#ifdef __MOTION__
+ PRNG_TIME = 4,
+ PRNG_UNUSED = 5,
+ PRNG_BASE_NUM = 6,
+#else
PRNG_BASE_NUM = 4,
+#endif
PRNG_BSDF_U = 0,
PRNG_BSDF_V = 1,
@@ -177,7 +189,9 @@ typedef enum PassType {
PASS_EMISSION = 65536,
PASS_BACKGROUND = 131072,
PASS_AO = 262144,
- PASS_SHADOW = 524288
+ PASS_SHADOW = 524288,
+ PASS_MOTION = 1048576,
+ PASS_MOTION_WEIGHT = 2097152
} PassType;
#define PASS_ALL (~0)
@@ -275,6 +289,7 @@ typedef struct Ray {
float3 P;
float3 D;
float t;
+ float time;
#ifdef __RAY_DIFFERENTIALS__
differential3 dP;
@@ -300,6 +315,21 @@ typedef enum AttributeElement {
ATTR_ELEMENT_NONE
} AttributeElement;
+typedef enum AttributeStandard {
+ ATTR_STD_NONE = 0,
+ ATTR_STD_VERTEX_NORMAL,
+ ATTR_STD_FACE_NORMAL,
+ ATTR_STD_UV,
+ ATTR_STD_GENERATED,
+ ATTR_STD_POSITION_UNDEFORMED,
+ ATTR_STD_POSITION_UNDISPLACED,
+ ATTR_STD_MOTION_PRE,
+ ATTR_STD_MOTION_POST,
+ ATTR_STD_NUM,
+
+ ATTR_STD_NOT_FOUND = ~0
+} AttributeStandard;
+
/* Closure data */
#define MAX_CLOSURE 8
@@ -365,6 +395,16 @@ typedef struct ShaderData {
/* object id if there is one, ~0 otherwise */
int object;
+ /* motion blur sample time */
+ float time;
+
+#ifdef __MOTION__
+ /* object <-> world space transformations, cached to avoid
+ * re-interpolating them constantly for shading */
+ Transform ob_tfm;
+ Transform ob_itfm;
+#endif
+
#ifdef __RAY_DIFFERENTIALS__
/* differential of P. these are orthogonal to Ng, not N */
differential3 dP;
@@ -422,8 +462,8 @@ typedef struct KernelCamera {
float focaldistance;
/* motion blur */
- float shutteropen;
- float shutterclose;
+ float shuttertime;
+ float pad;
/* clipping */
float nearclip;
@@ -437,6 +477,8 @@ typedef struct KernelCamera {
Transform worldtoraster;
Transform worldtondc;
Transform worldtocamera;
+
+ MotionTransform motion;
} KernelCamera;
typedef struct KernelFilm {
@@ -448,27 +490,32 @@ typedef struct KernelFilm {
int pass_combined;
int pass_depth;
int pass_normal;
- int pass_pad;
+ int pass_motion;
+ int pass_motion_weight;
int pass_uv;
int pass_object_id;
int pass_material_id;
- int pass_diffuse_color;
+ int pass_diffuse_color;
int pass_glossy_color;
int pass_transmission_color;
int pass_diffuse_indirect;
- int pass_glossy_indirect;
+ int pass_glossy_indirect;
int pass_transmission_indirect;
int pass_diffuse_direct;
int pass_glossy_direct;
- int pass_transmission_direct;
+ int pass_transmission_direct;
int pass_emission;
int pass_background;
int pass_ao;
+
int pass_shadow;
+ int pass_pad1;
+ int pass_pad2;
+ int pass_pad3;
} KernelFilm;
typedef struct KernelBackground {
diff --git a/intern/cycles/kernel/svm/svm_tex_coord.h b/intern/cycles/kernel/svm/svm_tex_coord.h
index 98f8734aed2..5ecda795251 100644
--- a/intern/cycles/kernel/svm/svm_tex_coord.h
+++ b/intern/cycles/kernel/svm/svm_tex_coord.h
@@ -33,8 +33,8 @@ __device void svm_node_tex_coord(KernelGlobals *kg, ShaderData *sd, float *stack
switch(type) {
case NODE_TEXCO_OBJECT: {
if(sd->object != ~0) {
- Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_INVERSE_TRANSFORM);
- data = transform_point(&tfm, sd->P);
+ data = sd->P;
+ object_position_transform(kg, sd, &data);
}
else
data = sd->P;
@@ -42,8 +42,8 @@ __device void svm_node_tex_coord(KernelGlobals *kg, ShaderData *sd, float *stack
}
case NODE_TEXCO_NORMAL: {
if(sd->object != ~0) {
- Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_INVERSE_TRANSFORM);
- data = transform_direction(&tfm, sd->N);
+ data = sd->N;
+ object_normal_transform(kg, sd, &data);
}
else
data = sd->N;
@@ -87,8 +87,8 @@ __device void svm_node_tex_coord_bump_dx(KernelGlobals *kg, ShaderData *sd, floa
switch(type) {
case NODE_TEXCO_OBJECT: {
if(sd->object != ~0) {
- Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_INVERSE_TRANSFORM);
- data = transform_point(&tfm, sd->P + sd->dP.dx);
+ data = sd->P + sd->dP.dx;
+ object_position_transform(kg, sd, &data);
}
else
data = sd->P + sd->dP.dx;
@@ -96,8 +96,8 @@ __device void svm_node_tex_coord_bump_dx(KernelGlobals *kg, ShaderData *sd, floa
}
case NODE_TEXCO_NORMAL: {
if(sd->object != ~0) {
- Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_INVERSE_TRANSFORM);
- data = transform_direction(&tfm, sd->N);
+ data = sd->N;
+ object_normal_transform(kg, sd, &data);
}
else
data = sd->N;
@@ -144,8 +144,8 @@ __device void svm_node_tex_coord_bump_dy(KernelGlobals *kg, ShaderData *sd, floa
switch(type) {
case NODE_TEXCO_OBJECT: {
if(sd->object != ~0) {
- Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_INVERSE_TRANSFORM);
- data = transform_point(&tfm, sd->P + sd->dP.dy);
+ data = sd->P + sd->dP.dy;
+ object_position_transform(kg, sd, &data);
}
else
data = sd->P + sd->dP.dy;
@@ -153,8 +153,8 @@ __device void svm_node_tex_coord_bump_dy(KernelGlobals *kg, ShaderData *sd, floa
}
case NODE_TEXCO_NORMAL: {
if(sd->object != ~0) {
- Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_INVERSE_TRANSFORM);
- data = normalize(transform_direction(&tfm, sd->N));
+ data = sd->N;
+ object_normal_transform(kg, sd, &data);
}
else
data = sd->N;
diff --git a/intern/cycles/render/CMakeLists.txt b/intern/cycles/render/CMakeLists.txt
index db92cf4ef54..4d4fbfe6814 100644
--- a/intern/cycles/render/CMakeLists.txt
+++ b/intern/cycles/render/CMakeLists.txt
@@ -16,7 +16,7 @@ set(SRC
buffers.cpp
camera.cpp
film.cpp
- # film_response.cpp # XXX, why isn't this in?
+ # film_response.cpp (code unused)
filter.cpp
graph.cpp
image.cpp
@@ -41,7 +41,7 @@ set(SRC_HEADERS
buffers.h
camera.h
film.h
- # film_response.h # XXX, why isn't this in?
+ # film_response.h (code unused)
filter.h
graph.h
image.h
diff --git a/intern/cycles/render/attribute.cpp b/intern/cycles/render/attribute.cpp
index 9e90bf1b625..c1a089cc872 100644
--- a/intern/cycles/render/attribute.cpp
+++ b/intern/cycles/render/attribute.cpp
@@ -31,7 +31,7 @@ void Attribute::set(ustring name_, TypeDesc type_, Element element_)
name = name_;
type = type_;
element = element_;
- std = STD_NONE;
+ std = ATTR_STD_NONE;
/* string and matrix not supported! */
assert(type == TypeDesc::TypeFloat || type == TypeDesc::TypeColor ||
@@ -81,20 +81,24 @@ bool Attribute::same_storage(TypeDesc a, TypeDesc b)
return false;
}
-ustring Attribute::standard_name(Attribute::Standard std)
+ustring Attribute::standard_name(AttributeStandard std)
{
- if(std == Attribute::STD_VERTEX_NORMAL)
+ if(std == ATTR_STD_VERTEX_NORMAL)
return ustring("N");
- else if(std == Attribute::STD_FACE_NORMAL)
+ else if(std == ATTR_STD_FACE_NORMAL)
return ustring("Ng");
- else if(std == Attribute::STD_UV)
+ else if(std == ATTR_STD_UV)
return ustring("uv");
- else if(std == Attribute::STD_GENERATED)
+ else if(std == ATTR_STD_GENERATED)
return ustring("generated");
- else if(std == Attribute::STD_POSITION_UNDEFORMED)
+ else if(std == ATTR_STD_POSITION_UNDEFORMED)
return ustring("undeformed");
- else if(std == Attribute::STD_POSITION_UNDISPLACED)
+ else if(std == ATTR_STD_POSITION_UNDISPLACED)
return ustring("undisplaced");
+ else if(std == ATTR_STD_MOTION_PRE)
+ return ustring("motion_pre");
+ else if(std == ATTR_STD_MOTION_POST)
+ return ustring("motion_post");
return ustring();
}
@@ -164,24 +168,28 @@ void AttributeSet::remove(ustring name)
}
}
-Attribute *AttributeSet::add(Attribute::Standard std, ustring name)
+Attribute *AttributeSet::add(AttributeStandard std, ustring name)
{
Attribute *attr = NULL;
if(name == ustring())
name = Attribute::standard_name(std);
- if(std == Attribute::STD_VERTEX_NORMAL)
+ if(std == ATTR_STD_VERTEX_NORMAL)
attr = add(name, TypeDesc::TypeNormal, Attribute::VERTEX);
- else if(std == Attribute::STD_FACE_NORMAL)
+ else if(std == ATTR_STD_FACE_NORMAL)
attr = add(name, TypeDesc::TypeNormal, Attribute::FACE);
- else if(std == Attribute::STD_UV)
+ else if(std == ATTR_STD_UV)
attr = add(name, TypeDesc::TypePoint, Attribute::CORNER);
- else if(std == Attribute::STD_GENERATED)
+ else if(std == ATTR_STD_GENERATED)
attr = add(name, TypeDesc::TypePoint, Attribute::VERTEX);
- else if(std == Attribute::STD_POSITION_UNDEFORMED)
+ else if(std == ATTR_STD_POSITION_UNDEFORMED)
attr = add(name, TypeDesc::TypePoint, Attribute::VERTEX);
- else if(std == Attribute::STD_POSITION_UNDISPLACED)
+ else if(std == ATTR_STD_POSITION_UNDISPLACED)
+ attr = add(name, TypeDesc::TypePoint, Attribute::VERTEX);
+ else if(std == ATTR_STD_MOTION_PRE)
+ attr = add(name, TypeDesc::TypePoint, Attribute::VERTEX);
+ else if(std == ATTR_STD_MOTION_POST)
attr = add(name, TypeDesc::TypePoint, Attribute::VERTEX);
else
assert(0);
@@ -191,7 +199,7 @@ Attribute *AttributeSet::add(Attribute::Standard std, ustring name)
return attr;
}
-Attribute *AttributeSet::find(Attribute::Standard std)
+Attribute *AttributeSet::find(AttributeStandard std)
{
foreach(Attribute& attr, attributes)
if(attr.std == std)
@@ -200,7 +208,7 @@ Attribute *AttributeSet::find(Attribute::Standard std)
return NULL;
}
-void AttributeSet::remove(Attribute::Standard std)
+void AttributeSet::remove(AttributeStandard std)
{
Attribute *attr = find(std);
@@ -218,7 +226,7 @@ void AttributeSet::remove(Attribute::Standard std)
Attribute *AttributeSet::find(AttributeRequest& req)
{
- if(req.std == Attribute::STD_NONE)
+ if(req.std == ATTR_STD_NONE)
return find(req.name);
else
return find(req.std);
@@ -240,14 +248,14 @@ void AttributeSet::clear()
AttributeRequest::AttributeRequest(ustring name_)
{
name = name_;
- std = Attribute::STD_NONE;
+ std = ATTR_STD_NONE;
type = TypeDesc::TypeFloat;
element = ATTR_ELEMENT_NONE;
offset = 0;
}
-AttributeRequest::AttributeRequest(Attribute::Standard std_)
+AttributeRequest::AttributeRequest(AttributeStandard std_)
{
name = ustring();
std = std_;
@@ -296,7 +304,7 @@ void AttributeRequestSet::add(ustring name)
requests.push_back(AttributeRequest(name));
}
-void AttributeRequestSet::add(Attribute::Standard std)
+void AttributeRequestSet::add(AttributeStandard std)
{
foreach(AttributeRequest& req, requests)
if(req.std == std)
@@ -308,7 +316,7 @@ void AttributeRequestSet::add(Attribute::Standard std)
void AttributeRequestSet::add(AttributeRequestSet& reqs)
{
foreach(AttributeRequest& req, reqs.requests) {
- if(req.std == Attribute::STD_NONE)
+ if(req.std == ATTR_STD_NONE)
add(req.name);
else
add(req.std);
@@ -324,7 +332,7 @@ bool AttributeRequestSet::find(ustring name)
return false;
}
-bool AttributeRequestSet::find(Attribute::Standard std)
+bool AttributeRequestSet::find(AttributeStandard std)
{
foreach(AttributeRequest& req, requests)
if(req.std == std)
diff --git a/intern/cycles/render/attribute.h b/intern/cycles/render/attribute.h
index 7af4657daa3..707d558fc79 100644
--- a/intern/cycles/render/attribute.h
+++ b/intern/cycles/render/attribute.h
@@ -47,19 +47,8 @@ public:
CORNER
};
- enum Standard {
- STD_NONE = 0,
- STD_VERTEX_NORMAL,
- STD_FACE_NORMAL,
- STD_UV,
- STD_GENERATED,
- STD_POSITION_UNDEFORMED,
- STD_POSITION_UNDISPLACED,
- STD_NUM
- };
-
ustring name;
- Standard std;
+ AttributeStandard std;
TypeDesc type;
vector<char> buffer;
@@ -82,7 +71,7 @@ public:
const float *data_float() const { return (float*)data(); }
static bool same_storage(TypeDesc a, TypeDesc b);
- static ustring standard_name(Attribute::Standard std);
+ static ustring standard_name(AttributeStandard std);
};
/* Attribute Set
@@ -101,9 +90,9 @@ public:
Attribute *find(ustring name);
void remove(ustring name);
- Attribute *add(Attribute::Standard std, ustring name = ustring());
- Attribute *find(Attribute::Standard std);
- void remove(Attribute::Standard std);
+ Attribute *add(AttributeStandard std, ustring name = ustring());
+ Attribute *find(AttributeStandard std);
+ void remove(AttributeStandard std);
Attribute *find(AttributeRequest& req);
@@ -120,7 +109,7 @@ public:
class AttributeRequest {
public:
ustring name;
- Attribute::Standard std;
+ AttributeStandard std;
/* temporary variables used by MeshManager */
TypeDesc type;
@@ -128,7 +117,7 @@ public:
int offset;
AttributeRequest(ustring name_);
- AttributeRequest(Attribute::Standard std);
+ AttributeRequest(AttributeStandard std);
};
/* AttributeRequestSet
@@ -143,11 +132,11 @@ public:
~AttributeRequestSet();
void add(ustring name);
- void add(Attribute::Standard std);
+ void add(AttributeStandard std);
void add(AttributeRequestSet& reqs);
bool find(ustring name);
- bool find(Attribute::Standard std);
+ bool find(AttributeStandard std);
size_t size();
void clear();
diff --git a/intern/cycles/render/buffers.cpp b/intern/cycles/render/buffers.cpp
index bda20a8ab9d..a80851b945a 100644
--- a/intern/cycles/render/buffers.cpp
+++ b/intern/cycles/render/buffers.cpp
@@ -221,6 +221,28 @@ bool RenderBuffers::get_pass(PassType type, float exposure, int sample, int comp
pixels[3] = 1.0f;
}
}
+ else if(type == PASS_MOTION) {
+ /* need to normalize by number of samples accumulated for motion */
+ pass_offset = 0;
+ foreach(Pass& color_pass, params.passes) {
+ if(color_pass.type == PASS_MOTION_WEIGHT)
+ break;
+ pass_offset += color_pass.components;
+ }
+
+ float *in_weight = (float*)buffer.data_pointer + pass_offset;
+
+ for(int i = 0; i < size; i++, in += pass_stride, in_weight += pass_stride, pixels += 4) {
+ float4 f = make_float4(in[0], in[1], in[2], in[3]);
+ float w = in_weight[0];
+ float invw = (w > 0.0f)? 1.0f/w: 0.0f;
+
+ pixels[0] = f.x*invw;
+ pixels[1] = f.y*invw;
+ pixels[2] = f.z*invw;
+ pixels[3] = f.w*invw;
+ }
+ }
else {
for(int i = 0; i < size; i++, in += pass_stride, pixels += 4) {
float4 f = make_float4(in[0], in[1], in[2], in[3]);
diff --git a/intern/cycles/render/camera.cpp b/intern/cycles/render/camera.cpp
index f9290dfc835..e9ca7c3a366 100644
--- a/intern/cycles/render/camera.cpp
+++ b/intern/cycles/render/camera.cpp
@@ -25,8 +25,7 @@ CCL_NAMESPACE_BEGIN
Camera::Camera()
{
- shutteropen = 0.0f;
- shutterclose = 1.0f;
+ shuttertime = 1.0f;
aperturesize = 0.0f;
focaldistance = 10.0f;
@@ -35,6 +34,10 @@ Camera::Camera()
matrix = transform_identity();
+ motion.pre = transform_identity();
+ motion.post = transform_identity();
+ use_motion = false;
+
type = CAMERA_PERSPECTIVE;
fov = M_PI_F/4.0f;
@@ -124,7 +127,7 @@ void Camera::update()
need_device_update = true;
}
-void Camera::device_update(Device *device, DeviceScene *dscene)
+void Camera::device_update(Device *device, DeviceScene *dscene, Scene *scene)
{
update();
@@ -140,10 +143,28 @@ void Camera::device_update(Device *device, DeviceScene *dscene)
kcam->rastertocamera = rastertocamera;
kcam->cameratoworld = cameratoworld;
kcam->worldtoscreen = transform_inverse(screentoworld);
- kcam->worldtoraster = transform_inverse(rastertoworld);
+ kcam->worldtoraster = worldtoraster;
kcam->worldtondc = transform_inverse(ndctoworld);
kcam->worldtocamera = transform_inverse(cameratoworld);
+ /* camera motion */
+ Scene::MotionType need_motion = scene->need_motion();
+
+ if(need_motion == Scene::MOTION_PASS) {
+ if(use_motion) {
+ kcam->motion.pre = transform_inverse(motion.pre * rastertocamera);
+ kcam->motion.post = transform_inverse(motion.post * rastertocamera);
+ }
+ else {
+ kcam->motion.pre = worldtoraster;
+ kcam->motion.post = worldtoraster;
+ }
+ }
+ else if(need_motion == Scene::MOTION_BLUR) {
+ /* todo: exact camera position will not be hit this way */
+ transform_motion_decompose(&kcam->motion, &motion);
+ }
+
/* depth of field */
kcam->aperturesize = aperturesize;
kcam->focaldistance = focaldistance;
@@ -151,8 +172,7 @@ void Camera::device_update(Device *device, DeviceScene *dscene)
kcam->bladesrotation = bladesrotation;
/* motion blur */
- kcam->shutteropen = shutteropen;
- kcam->shutterclose = shutterclose;
+ kcam->shuttertime= (need_motion == Scene::MOTION_BLUR)? shuttertime: 0.0f;
/* type */
kcam->type = type;
@@ -175,8 +195,7 @@ void Camera::device_free(Device *device, DeviceScene *dscene)
bool Camera::modified(const Camera& cam)
{
- return !((shutteropen == cam.shutteropen) &&
- (shutterclose == cam.shutterclose) &&
+ return !((shuttertime== cam.shuttertime) &&
(aperturesize == cam.aperturesize) &&
(blades == cam.blades) &&
(bladesrotation == cam.bladesrotation) &&
@@ -192,7 +211,9 @@ bool Camera::modified(const Camera& cam)
(right == cam.right) &&
(bottom == cam.bottom) &&
(top == cam.top) &&
- (matrix == cam.matrix));
+ (matrix == cam.matrix) &&
+ (motion == cam.motion) &&
+ (use_motion == cam.use_motion));
}
void Camera::tag_update()
diff --git a/intern/cycles/render/camera.h b/intern/cycles/render/camera.h
index cfcc5406ee3..935489711c8 100644
--- a/intern/cycles/render/camera.h
+++ b/intern/cycles/render/camera.h
@@ -28,6 +28,7 @@ CCL_NAMESPACE_BEGIN
class Device;
class DeviceScene;
+class Scene;
/* Camera
*
@@ -37,8 +38,7 @@ class DeviceScene;
class Camera {
public:
/* motion blur */
- float shutteropen;
- float shutterclose;
+ float shuttertime;
/* depth of field */
float focaldistance;
@@ -61,6 +61,10 @@ public:
/* transformation */
Transform matrix;
+ /* motion */
+ MotionTransform motion;
+ bool use_motion;
+
/* computed camera parameters */
Transform screentoworld;
Transform rastertoworld;
@@ -82,7 +86,7 @@ public:
void update();
- void device_update(Device *device, DeviceScene *dscene);
+ void device_update(Device *device, DeviceScene *dscene, Scene *scene);
void device_free(Device *device, DeviceScene *dscene);
bool modified(const Camera& cam);
diff --git a/intern/cycles/render/film.cpp b/intern/cycles/render/film.cpp
index cc17f86fcb6..55c89b7b1b2 100644
--- a/intern/cycles/render/film.cpp
+++ b/intern/cycles/render/film.cpp
@@ -67,6 +67,13 @@ void Pass::add(PassType type, vector<Pass>& passes)
case PASS_UV:
pass.components = 4;
break;
+ case PASS_MOTION:
+ pass.components = 4;
+ pass.divide_type = PASS_MOTION_WEIGHT;
+ break;
+ case PASS_MOTION_WEIGHT:
+ pass.components = 1;
+ break;
case PASS_OBJECT_ID:
pass.components = 1;
pass.filter = false;
@@ -154,6 +161,15 @@ bool Pass::equals(const vector<Pass>& A, const vector<Pass>& B)
return true;
}
+bool Pass::contains(const vector<Pass>& passes, PassType type)
+{
+ foreach(const Pass& pass, passes)
+ if(pass.type == type)
+ return true;
+
+ return false;
+}
+
/* Film */
Film::Film()
@@ -196,6 +212,12 @@ void Film::device_update(Device *device, DeviceScene *dscene)
case PASS_UV:
kfilm->pass_uv = kfilm->pass_stride;
break;
+ case PASS_MOTION:
+ kfilm->pass_motion = kfilm->pass_stride;
+ break;
+ case PASS_MOTION_WEIGHT:
+ kfilm->pass_motion_weight = kfilm->pass_stride;
+ break;
case PASS_OBJECT_ID:
kfilm->pass_object_id = kfilm->pass_stride;
break;
diff --git a/intern/cycles/render/film.h b/intern/cycles/render/film.h
index 8a3dbbf1b08..c7d2ee24388 100644
--- a/intern/cycles/render/film.h
+++ b/intern/cycles/render/film.h
@@ -40,6 +40,7 @@ public:
static void add(PassType type, vector<Pass>& passes);
static bool equals(const vector<Pass>& A, const vector<Pass>& B);
+ static bool contains(const vector<Pass>& passes, PassType);
};
class Film {
diff --git a/intern/cycles/render/graph.cpp b/intern/cycles/render/graph.cpp
index cc29047f048..d9486de47c9 100644
--- a/intern/cycles/render/graph.cpp
+++ b/intern/cycles/render/graph.cpp
@@ -120,9 +120,9 @@ void ShaderNode::attributes(AttributeRequestSet *attributes)
foreach(ShaderInput *input, inputs) {
if(!input->link) {
if(input->default_value == ShaderInput::TEXTURE_GENERATED)
- attributes->add(Attribute::STD_GENERATED);
+ attributes->add(ATTR_STD_GENERATED);
else if(input->default_value == ShaderInput::TEXTURE_UV)
- attributes->add(Attribute::STD_UV);
+ attributes->add(ATTR_STD_UV);
}
}
}
diff --git a/intern/cycles/render/integrator.cpp b/intern/cycles/render/integrator.cpp
index c1f066df10c..b26ebfd91e1 100644
--- a/intern/cycles/render/integrator.cpp
+++ b/intern/cycles/render/integrator.cpp
@@ -45,6 +45,7 @@ Integrator::Integrator()
seed = 0;
layer_flag = ~0;
sample_clamp = 0.0f;
+ motion_blur = false;
need_update = true;
}
@@ -125,7 +126,8 @@ bool Integrator::modified(const Integrator& integrator)
filter_glossy == integrator.filter_glossy &&
layer_flag == integrator.layer_flag &&
seed == integrator.seed &&
- sample_clamp == integrator.sample_clamp);
+ sample_clamp == integrator.sample_clamp &&
+ motion_blur == integrator.motion_blur);
}
void Integrator::tag_update(Scene *scene)
diff --git a/intern/cycles/render/integrator.h b/intern/cycles/render/integrator.h
index 0817fcaa457..afda41a857d 100644
--- a/intern/cycles/render/integrator.h
+++ b/intern/cycles/render/integrator.h
@@ -47,6 +47,7 @@ public:
int layer_flag;
float sample_clamp;
+ bool motion_blur;
bool need_update;
diff --git a/intern/cycles/render/mesh.cpp b/intern/cycles/render/mesh.cpp
index 0ce16e65621..5d96611ff26 100644
--- a/intern/cycles/render/mesh.cpp
+++ b/intern/cycles/render/mesh.cpp
@@ -113,11 +113,11 @@ void Mesh::compute_bounds()
void Mesh::add_face_normals()
{
/* don't compute if already there */
- if(attributes.find(Attribute::STD_FACE_NORMAL))
+ if(attributes.find(ATTR_STD_FACE_NORMAL))
return;
/* get attributes */
- Attribute *attr_fN = attributes.add(Attribute::STD_FACE_NORMAL);
+ Attribute *attr_fN = attributes.add(ATTR_STD_FACE_NORMAL);
float3 *fN = attr_fN->data_float3();
/* compute face normals */
@@ -145,12 +145,12 @@ void Mesh::add_face_normals()
void Mesh::add_vertex_normals()
{
/* don't compute if already there */
- if(attributes.find(Attribute::STD_VERTEX_NORMAL))
+ if(attributes.find(ATTR_STD_VERTEX_NORMAL))
return;
/* get attributes */
- Attribute *attr_fN = attributes.find(Attribute::STD_FACE_NORMAL);
- Attribute *attr_vN = attributes.add(Attribute::STD_VERTEX_NORMAL);
+ Attribute *attr_fN = attributes.find(ATTR_STD_FACE_NORMAL);
+ Attribute *attr_vN = attributes.add(ATTR_STD_VERTEX_NORMAL);
float3 *fN = attr_fN->data_float3();
float3 *vN = attr_vN->data_float3();
@@ -179,8 +179,8 @@ void Mesh::add_vertex_normals()
void Mesh::pack_normals(Scene *scene, float4 *normal, float4 *vnormal)
{
- Attribute *attr_fN = attributes.find(Attribute::STD_FACE_NORMAL);
- Attribute *attr_vN = attributes.find(Attribute::STD_VERTEX_NORMAL);
+ Attribute *attr_fN = attributes.find(ATTR_STD_FACE_NORMAL);
+ Attribute *attr_vN = attributes.find(ATTR_STD_VERTEX_NORMAL);
float3 *fN = attr_fN->data_float3();
float3 *vN = attr_vN->data_float3();
@@ -348,7 +348,7 @@ void MeshManager::update_osl_attributes(Device *device, Scene *scene, vector<Att
else
osl_attr.type = TypeDesc::TypeColor;
- if(req.std != Attribute::STD_NONE) {
+ if(req.std != ATTR_STD_NONE) {
/* if standard attribute, add lookup by std:: name convention */
ustring stdname = ustring(string("std::") + Attribute::standard_name(req.std).c_str());
og->attribute_map[i][stdname] = osl_attr;
@@ -371,7 +371,7 @@ void MeshManager::update_svm_attributes(Device *device, DeviceScene *dscene, Sce
int attr_map_stride = 0;
for(size_t i = 0; i < scene->meshes.size(); i++)
- attr_map_stride = max(attr_map_stride, mesh_attributes[i].size());
+ attr_map_stride = max(attr_map_stride, mesh_attributes[i].size()+1);
if(attr_map_stride == 0)
return;
@@ -393,13 +393,12 @@ void MeshManager::update_svm_attributes(Device *device, DeviceScene *dscene, Sce
AttributeRequestSet& attributes = mesh_attributes[j];
/* set object attributes */
- j = 0;
+ int index = i*attr_map_stride;
foreach(AttributeRequest& req, attributes.requests) {
- int index = i*attr_map_stride + j;
uint id;
- if(req.std == Attribute::STD_NONE)
+ if(req.std == ATTR_STD_NONE)
id = scene->shader_manager->get_attribute_id(req.name);
else
id = scene->shader_manager->get_attribute_id(req.std);
@@ -413,8 +412,14 @@ void MeshManager::update_svm_attributes(Device *device, DeviceScene *dscene, Sce
else
attr_map[index].w = NODE_ATTR_FLOAT3;
- j++;
+ index++;
}
+
+ /* terminator */
+ attr_map[index].x = ATTR_STD_NONE;
+ attr_map[index].y = 0;
+ attr_map[index].z = 0;
+ attr_map[index].w = 0;
}
/* copy to device */
@@ -434,6 +439,8 @@ void MeshManager::device_update_attributes(Device *device, DeviceScene *dscene,
for(size_t i = 0; i < scene->meshes.size(); i++) {
Mesh *mesh = scene->meshes[i];
+ scene->need_global_attributes(mesh_attributes[i]);
+
foreach(uint sindex, mesh->used_shaders) {
Shader *shader = scene->shaders[sindex];
mesh_attributes[i].add(shader->attributes);
@@ -456,8 +463,8 @@ void MeshManager::device_update_attributes(Device *device, DeviceScene *dscene,
Attribute *mattr = mesh->attributes.find(req);
/* todo: get rid of this exception */
- if(!mattr && req.std == Attribute::STD_GENERATED) {
- mattr = mesh->attributes.add(Attribute::STD_GENERATED);
+ if(!mattr && req.std == ATTR_STD_GENERATED) {
+ mattr = mesh->attributes.add(ATTR_STD_GENERATED);
if(mesh->verts.size())
memcpy(mattr->data_float3(), &mesh->verts[0], sizeof(float3)*mesh->verts.size());
}
@@ -489,19 +496,19 @@ void MeshManager::device_update_attributes(Device *device, DeviceScene *dscene,
float *data = mattr->data_float();
req.offset = attr_float.size();
+ attr_float.resize(attr_float.size() + size);
+
for(size_t k = 0; k < size; k++)
- attr_float.push_back(data[k]);
+ attr_float[req.offset+k] = data[k];
}
else {
float3 *data = mattr->data_float3();
req.offset = attr_float3.size();
- for(size_t k = 0; k < size; k++) {
- float3 f3 = data[k];
- float4 f4 = make_float4(f3.x, f3.y, f3.z, 0.0f);
+ attr_float3.resize(attr_float3.size() + size);
- attr_float3.push_back(f4);
- }
+ for(size_t k = 0; k < size; k++)
+ attr_float3[req.offset+k] = float3_to_float4(data[k]);
}
/* mesh vertex/triangle index is global, not per object, so we sneak
@@ -712,8 +719,10 @@ void MeshManager::device_update(Device *device, DeviceScene *dscene, Scene *scen
foreach(Shader *shader, scene->shaders)
shader->need_update_attributes = false;
+ bool motion_blur = scene->need_motion() == Scene::MOTION_BLUR;
+
foreach(Object *object, scene->objects)
- object->compute_bounds();
+ object->compute_bounds(motion_blur);
if(progress.get_cancel()) return;
@@ -759,5 +768,32 @@ void MeshManager::tag_update(Scene *scene)
scene->object_manager->need_update = true;
}
+bool Mesh::need_attribute(Scene *scene, AttributeStandard std)
+{
+ if(std == ATTR_STD_NONE)
+ return false;
+
+ if(scene->need_global_attribute(std))
+ return true;
+
+ foreach(uint shader, used_shaders)
+ if(scene->shaders[shader]->attributes.find(std))
+ return true;
+
+ return false;
+}
+
+bool Mesh::need_attribute(Scene *scene, ustring name)
+{
+ if(name == ustring())
+ return false;
+
+ foreach(uint shader, used_shaders)
+ if(scene->shaders[shader]->attributes.find(name))
+ return true;
+
+ return false;
+}
+
CCL_NAMESPACE_END
diff --git a/intern/cycles/render/mesh.h b/intern/cycles/render/mesh.h
index 585203484c7..047a2d2624d 100644
--- a/intern/cycles/render/mesh.h
+++ b/intern/cycles/render/mesh.h
@@ -98,6 +98,9 @@ public:
void pack_verts(float4 *tri_verts, float4 *tri_vindex, size_t vert_offset);
void compute_bvh(SceneParams *params, Progress& progress);
+ bool need_attribute(Scene *scene, AttributeStandard std);
+ bool need_attribute(Scene *scene, ustring name);
+
void tag_update(Scene *scene, bool rebuild);
};
diff --git a/intern/cycles/render/mesh_displace.cpp b/intern/cycles/render/mesh_displace.cpp
index a6f8e3f6be8..dea694a811e 100644
--- a/intern/cycles/render/mesh_displace.cpp
+++ b/intern/cycles/render/mesh_displace.cpp
@@ -140,11 +140,11 @@ bool MeshManager::displace(Device *device, Scene *scene, Mesh *mesh, Progress& p
* normals, as bump mapping in the shader will already alter the
* vertex normal, so we start from the non-displaced vertex normals
* to avoid applying the perturbation twice. */
- mesh->attributes.remove(Attribute::STD_FACE_NORMAL);
+ mesh->attributes.remove(ATTR_STD_FACE_NORMAL);
mesh->add_face_normals();
if(mesh->displacement_method == Mesh::DISPLACE_TRUE) {
- mesh->attributes.remove(Attribute::STD_VERTEX_NORMAL);
+ mesh->attributes.remove(ATTR_STD_VERTEX_NORMAL);
mesh->add_vertex_normals();
}
diff --git a/intern/cycles/render/nodes.cpp b/intern/cycles/render/nodes.cpp
index d71438ebae1..7039f5b6412 100644
--- a/intern/cycles/render/nodes.cpp
+++ b/intern/cycles/render/nodes.cpp
@@ -1514,9 +1514,9 @@ TextureCoordinateNode::TextureCoordinateNode()
void TextureCoordinateNode::attributes(AttributeRequestSet *attributes)
{
if(!output("Generated")->links.empty())
- attributes->add(Attribute::STD_GENERATED);
+ attributes->add(ATTR_STD_GENERATED);
if(!output("UV")->links.empty())
- attributes->add(Attribute::STD_UV);
+ attributes->add(ATTR_STD_UV);
ShaderNode::attributes(attributes);
}
@@ -1546,7 +1546,7 @@ void TextureCoordinateNode::compile(SVMCompiler& compiler)
compiler.add_node(geom_node, NODE_GEOM_P, out->stack_offset);
}
else {
- int attr = compiler.attribute(Attribute::STD_GENERATED);
+ int attr = compiler.attribute(ATTR_STD_GENERATED);
compiler.stack_assign(out);
compiler.add_node(attr_node, attr, out->stack_offset, NODE_ATTR_FLOAT3);
}
@@ -1560,7 +1560,7 @@ void TextureCoordinateNode::compile(SVMCompiler& compiler)
out = output("UV");
if(!out->links.empty()) {
- int attr = compiler.attribute(Attribute::STD_UV);
+ int attr = compiler.attribute(ATTR_STD_UV);
compiler.stack_assign(out);
compiler.add_node(attr_node, attr, out->stack_offset, NODE_ATTR_FLOAT3);
}
diff --git a/intern/cycles/render/object.cpp b/intern/cycles/render/object.cpp
index 28645d856a8..ccc654965f1 100644
--- a/intern/cycles/render/object.cpp
+++ b/intern/cycles/render/object.cpp
@@ -38,15 +38,37 @@ Object::Object()
visibility = ~0;
pass_id = 0;
bounds = BoundBox::empty;
+ motion.pre = transform_identity();
+ motion.post = transform_identity();
+ use_motion = false;
}
Object::~Object()
{
}
-void Object::compute_bounds()
+void Object::compute_bounds(bool motion_blur)
{
- bounds = mesh->bounds.transformed(&tfm);
+ BoundBox mbounds = mesh->bounds;
+
+ if(motion_blur && use_motion) {
+ MotionTransform decomp;
+ transform_motion_decompose(&decomp, &motion);
+
+ bounds = BoundBox::empty;
+
+ /* todo: this is really terrible. according to pbrt there is a better
+ * way to find this iteratively, but did not find implementation yet
+ * or try to implement myself */
+ for(float t = 0.0f; t < 1.0f; t += 1.0f/128.0f) {
+ Transform ttfm;
+
+ transform_motion_interpolate(&ttfm, &decomp, t);
+ bounds.grow(mbounds.transformed(&ttfm));
+ }
+ }
+ else
+ bounds = mbounds.transformed(&tfm);
}
void Object::apply_transform()
@@ -57,8 +79,8 @@ void Object::apply_transform()
for(size_t i = 0; i < mesh->verts.size(); i++)
mesh->verts[i] = transform_point(&tfm, mesh->verts[i]);
- Attribute *attr_fN = mesh->attributes.find(Attribute::STD_FACE_NORMAL);
- Attribute *attr_vN = mesh->attributes.find(Attribute::STD_VERTEX_NORMAL);
+ Attribute *attr_fN = mesh->attributes.find(ATTR_STD_FACE_NORMAL);
+ Attribute *attr_vN = mesh->attributes.find(ATTR_STD_VERTEX_NORMAL);
Transform ntfm = transform_transpose(transform_inverse(tfm));
@@ -83,7 +105,7 @@ void Object::apply_transform()
if(bounds.valid()) {
mesh->compute_bounds();
- compute_bounds();
+ compute_bounds(false);
}
tfm = transform_identity();
@@ -123,6 +145,7 @@ void ObjectManager::device_update_transforms(Device *device, DeviceScene *dscene
float4 *objects = dscene->objects.resize(OBJECT_SIZE*scene->objects.size());
int i = 0;
map<Mesh*, float> surface_area_map;
+ Scene::MotionType need_motion = scene->need_motion();
foreach(Object *ob, scene->objects) {
Mesh *mesh = ob->mesh;
@@ -130,7 +153,6 @@ void ObjectManager::device_update_transforms(Device *device, DeviceScene *dscene
/* compute transformations */
Transform tfm = ob->tfm;
Transform itfm = transform_inverse(tfm);
- Transform ntfm = transform_transpose(itfm);
/* compute surface area. for uniform scale we can do avoid the many
transform calls and share computation for instances */
@@ -171,10 +193,38 @@ void ObjectManager::device_update_transforms(Device *device, DeviceScene *dscene
/* pack in texture */
int offset = i*OBJECT_SIZE;
- memcpy(&objects[offset], &tfm, sizeof(float4)*4);
- memcpy(&objects[offset+4], &itfm, sizeof(float4)*4);
- memcpy(&objects[offset+8], &ntfm, sizeof(float4)*4);
- objects[offset+12] = make_float4(surface_area, pass_id, 0.0f, 0.0f);
+ memcpy(&objects[offset], &tfm, sizeof(float4)*3);
+ memcpy(&objects[offset+3], &itfm, sizeof(float4)*3);
+ objects[offset+6] = make_float4(surface_area, pass_id, 0.0f, 0.0f);
+
+ if(need_motion == Scene::MOTION_PASS) {
+ /* motion transformations, is world/object space depending if mesh
+ comes with deformed position in object space, or if we transform
+ the shading point in world space */
+ Transform mtfm_pre = ob->motion.pre;
+ Transform mtfm_post = ob->motion.post;
+
+ if(!mesh->attributes.find(ATTR_STD_MOTION_PRE))
+ mtfm_pre = mtfm_pre * itfm;
+ if(!mesh->attributes.find(ATTR_STD_MOTION_POST))
+ mtfm_post = mtfm_post * itfm;
+
+ memcpy(&objects[offset+8], &mtfm_pre, sizeof(float4)*4);
+ memcpy(&objects[offset+12], &mtfm_post, sizeof(float4)*4);
+ }
+ else if(need_motion == Scene::MOTION_BLUR) {
+ if(ob->use_motion) {
+ /* decompose transformations for interpolation */
+ MotionTransform decomp;
+
+ transform_motion_decompose(&decomp, &ob->motion);
+ memcpy(&objects[offset+8], &decomp, sizeof(float4)*8);
+ }
+ else {
+ float4 no_motion = make_float4(FLT_MAX);
+ memcpy(&objects[offset+8], &no_motion, sizeof(float4));
+ }
+ }
i++;
@@ -225,6 +275,7 @@ void ObjectManager::apply_static_transforms(Scene *scene, Progress& progress)
/* counter mesh users */
map<Mesh*, int> mesh_users;
+ bool motion_blur = scene->need_motion() == Scene::MOTION_BLUR;
foreach(Object *object, scene->objects) {
map<Mesh*, int>::iterator it = mesh_users.find(object->mesh);
@@ -240,12 +291,14 @@ void ObjectManager::apply_static_transforms(Scene *scene, Progress& progress)
/* apply transforms for objects with single user meshes */
foreach(Object *object, scene->objects) {
if(mesh_users[object->mesh] == 1) {
- if(!object->mesh->transform_applied) {
- object->apply_transform();
- object->mesh->transform_applied = true;
- }
+ if(!(motion_blur && object->use_motion)) {
+ if(!object->mesh->transform_applied) {
+ object->apply_transform();
+ object->mesh->transform_applied = true;
- if(progress.get_cancel()) return;
+ if(progress.get_cancel()) return;
+ }
+ }
}
}
}
diff --git a/intern/cycles/render/object.h b/intern/cycles/render/object.h
index 14da2cfb35d..e84c4b26767 100644
--- a/intern/cycles/render/object.h
+++ b/intern/cycles/render/object.h
@@ -44,13 +44,15 @@ public:
int pass_id;
vector<ParamValue> attributes;
uint visibility;
+ MotionTransform motion;
+ bool use_motion;
Object();
~Object();
void tag_update(Scene *scene);
- void compute_bounds();
+ void compute_bounds(bool motion_blur);
void apply_transform();
};
diff --git a/intern/cycles/render/scene.cpp b/intern/cycles/render/scene.cpp
index 079f2744e73..b6453339d41 100644
--- a/intern/cycles/render/scene.cpp
+++ b/intern/cycles/render/scene.cpp
@@ -128,7 +128,7 @@ void Scene::device_update(Device *device_, Progress& progress)
if(progress.get_cancel()) return;
progress.set_status("Updating Camera");
- camera->device_update(device, &dscene);
+ camera->device_update(device, &dscene, this);
if(progress.get_cancel()) return;
@@ -166,6 +166,33 @@ void Scene::device_update(Device *device_, Progress& progress)
device->const_copy_to("__data", &dscene.data, sizeof(dscene.data));
}
+Scene::MotionType Scene::need_motion()
+{
+ if(integrator->motion_blur)
+ return MOTION_BLUR;
+ else if(Pass::contains(film->passes, PASS_MOTION))
+ return MOTION_PASS;
+ else
+ return MOTION_NONE;
+}
+
+bool Scene::need_global_attribute(AttributeStandard std)
+{
+ if(std == ATTR_STD_UV)
+ return Pass::contains(film->passes, PASS_UV);
+ if(std == ATTR_STD_MOTION_PRE || ATTR_STD_MOTION_POST)
+ return need_motion() == MOTION_PASS;
+
+ return false;
+}
+
+void Scene::need_global_attributes(AttributeRequestSet& attributes)
+{
+ for(int std = ATTR_STD_NONE; std < ATTR_STD_NUM; std++)
+ if(need_global_attribute((AttributeStandard)std))
+ attributes.add((AttributeStandard)std);
+}
+
bool Scene::need_update()
{
return (need_reset() || film->need_update);
diff --git a/intern/cycles/render/scene.h b/intern/cycles/render/scene.h
index af4301b1cd9..7d4acf369fd 100644
--- a/intern/cycles/render/scene.h
+++ b/intern/cycles/render/scene.h
@@ -33,6 +33,7 @@
CCL_NAMESPACE_BEGIN
+class AttributeRequestSet;
class Background;
class Camera;
class Device;
@@ -175,6 +176,12 @@ public:
void device_update(Device *device, Progress& progress);
+ bool need_global_attribute(AttributeStandard std);
+ void need_global_attributes(AttributeRequestSet& attributes);
+
+ enum MotionType { MOTION_NONE = 0, MOTION_PASS, MOTION_BLUR };
+ MotionType need_motion();
+
bool need_update();
bool need_reset();
};
diff --git a/intern/cycles/render/shader.cpp b/intern/cycles/render/shader.cpp
index c1f7b3518d2..f50709146ef 100644
--- a/intern/cycles/render/shader.cpp
+++ b/intern/cycles/render/shader.cpp
@@ -133,12 +133,12 @@ uint ShaderManager::get_attribute_id(ustring name)
if(it != unique_attribute_id.end())
return it->second;
- uint id = (uint)Attribute::STD_NUM + unique_attribute_id.size();
+ uint id = (uint)ATTR_STD_NUM + unique_attribute_id.size();
unique_attribute_id[name] = id;
return id;
}
-uint ShaderManager::get_attribute_id(Attribute::Standard std)
+uint ShaderManager::get_attribute_id(AttributeStandard std)
{
return (uint)std;
}
diff --git a/intern/cycles/render/shader.h b/intern/cycles/render/shader.h
index 35f3cfe27f5..48d517ce21a 100644
--- a/intern/cycles/render/shader.h
+++ b/intern/cycles/render/shader.h
@@ -103,7 +103,7 @@ public:
/* get globally unique id for a type of attribute */
uint get_attribute_id(ustring name);
- uint get_attribute_id(Attribute::Standard std);
+ uint get_attribute_id(AttributeStandard std);
/* get shader id for mesh faces */
int get_shader_id(uint shader, Mesh *mesh = NULL, bool smooth = false);
diff --git a/intern/cycles/render/svm.cpp b/intern/cycles/render/svm.cpp
index a52e30c6030..1ff3ac20d50 100644
--- a/intern/cycles/render/svm.cpp
+++ b/intern/cycles/render/svm.cpp
@@ -337,7 +337,7 @@ uint SVMCompiler::attribute(ustring name)
return shader_manager->get_attribute_id(name);
}
-uint SVMCompiler::attribute(Attribute::Standard std)
+uint SVMCompiler::attribute(AttributeStandard std)
{
return shader_manager->get_attribute_id(std);
}
diff --git a/intern/cycles/render/svm.h b/intern/cycles/render/svm.h
index 56c930f6217..0db68f400fc 100644
--- a/intern/cycles/render/svm.h
+++ b/intern/cycles/render/svm.h
@@ -69,7 +69,7 @@ public:
void add_node(const float4& f);
void add_array(float4 *f, int num);
uint attribute(ustring name);
- uint attribute(Attribute::Standard std);
+ uint attribute(AttributeStandard std);
uint encode_uchar4(uint x, uint y = 0, uint z = 0, uint w = 0);
uint closure_mix_weight_offset() { return mix_weight_offset; }
diff --git a/intern/cycles/subd/subd_dice.cpp b/intern/cycles/subd/subd_dice.cpp
index 6b29d1ca51a..6e24bb410b5 100644
--- a/intern/cycles/subd/subd_dice.cpp
+++ b/intern/cycles/subd/subd_dice.cpp
@@ -39,7 +39,7 @@ EdgeDice::EdgeDice(Mesh *mesh_, int shader_, bool smooth_, float dicing_rate_)
smooth = smooth_;
camera = NULL;
- mesh->attributes.add(Attribute::STD_VERTEX_NORMAL);
+ mesh->attributes.add(ATTR_STD_VERTEX_NORMAL);
}
void EdgeDice::reserve(int num_verts, int num_tris)
@@ -49,7 +49,7 @@ void EdgeDice::reserve(int num_verts, int num_tris)
mesh->reserve(vert_offset + num_verts, tri_offset + num_tris);
- Attribute *attr_vN = mesh->attributes.add(Attribute::STD_VERTEX_NORMAL);
+ Attribute *attr_vN = mesh->attributes.add(ATTR_STD_VERTEX_NORMAL);
mesh_P = &mesh->verts[0];
mesh_N = attr_vN->data_float3();
diff --git a/intern/cycles/util/util_math.h b/intern/cycles/util/util_math.h
index 53c1302b4a1..f09803d8b09 100644
--- a/intern/cycles/util/util_math.h
+++ b/intern/cycles/util/util_math.h
@@ -55,6 +55,10 @@ CCL_NAMESPACE_BEGIN
#ifndef M_2_PI_F
#define M_2_PI_F ((float)0.636619772367581343075535053490057448)
#endif
+#ifndef M_SQRT2_F
+#define M_SQRT2_F ((float)1.41421356237309504880)
+#endif
+
/* Scalar */
@@ -719,6 +723,45 @@ __device_inline float4 cross(const float4& a, const float4& b)
#endif
}
+__device_inline bool is_zero(const float4& a)
+{
+#ifdef __KERNEL_SSE__
+ return a == make_float4(0.0f);
+#else
+ return (a.x == 0.0f && a.y == 0.0f && a.z == 0.0f && a.w == 0.0f);
+#endif
+}
+
+__device_inline float reduce_add(const float4& a)
+{
+#ifdef __KERNEL_SSE__
+ float4 h = shuffle<1,0,3,2>(a) + a;
+ return _mm_cvtss_f32(shuffle<2,3,0,1>(h) + h); /* todo: efficiency? */
+#else
+ return ((a.x + a.y) + (a.z + a.w));
+#endif
+}
+
+__device_inline float average(const float4& a)
+{
+ return reduce_add(a) * 0.25f;
+}
+
+__device_inline float dot(const float4& a, const float4& b)
+{
+ return reduce_add(a * b);
+}
+
+__device_inline float len(const float4 a)
+{
+ return sqrtf(dot(a, a));
+}
+
+__device_inline float4 normalize(const float4 a)
+{
+ return a/len(a);
+}
+
__device_inline float4 min(float4 a, float4 b)
{
#ifdef __KERNEL_SSE__
@@ -790,39 +833,6 @@ __device_inline void print_float4(const char *label, const float4& a)
#endif
-#ifndef __KERNEL_OPENCL__
-
-__device_inline bool is_zero(const float4& a)
-{
-#ifdef __KERNEL_SSE__
- return a == make_float4(0.0f);
-#else
- return (a.x == 0.0f && a.y == 0.0f && a.z == 0.0f && a.w == 0.0f);
-#endif
-}
-
-__device_inline float reduce_add(const float4& a)
-{
-#ifdef __KERNEL_SSE__
- float4 h = shuffle<1,0,3,2>(a) + a;
- return _mm_cvtss_f32(shuffle<2,3,0,1>(h) + h); /* todo: efficiency? */
-#else
- return ((a.x + a.y) + (a.z + a.w));
-#endif
-}
-
-__device_inline float average(const float4& a)
-{
- return reduce_add(a) * 0.25f;
-}
-
-__device_inline float dot(const float4& a, const float4& b)
-{
- return reduce_add(a * b);
-}
-
-#endif
-
/* Int3 */
#ifndef __KERNEL_OPENCL__
diff --git a/intern/cycles/util/util_transform.cpp b/intern/cycles/util/util_transform.cpp
index 0fd26825911..1780994da27 100644
--- a/intern/cycles/util/util_transform.cpp
+++ b/intern/cycles/util/util_transform.cpp
@@ -53,6 +53,8 @@
CCL_NAMESPACE_BEGIN
+/* Transform Inverse */
+
static bool transform_matrix4_gj_inverse(float R[][4], float M[][4])
{
/* forward elimination */
@@ -151,5 +153,104 @@ Transform transform_inverse(const Transform& tfm)
return tfmR;
}
+/* Motion Transform */
+
+static float4 transform_to_quat(const Transform& tfm)
+{
+ double trace = tfm[0][0] + tfm[1][1] + tfm[2][2];
+ float4 qt;
+
+ if(trace > 0.0f) {
+ double s = sqrt(trace + 1.0);
+
+ qt.w = (float)(s/2.0);
+ s = 0.5/s;
+
+ qt.x = (float)((tfm[2][1] - tfm[1][2]) * s);
+ qt.y = (float)((tfm[0][2] - tfm[2][0]) * s);
+ qt.z = (float)((tfm[1][0] - tfm[0][1]) * s);
+ }
+ else {
+ int i = 0;
+
+ if(tfm[1][1] > tfm[i][i])
+ i = 1;
+ if(tfm[2][2] > tfm[i][i])
+ i = 2;
+
+ int j = (i + 1)%3;
+ int k = (j + 1)%3;
+
+ double s = sqrt((tfm[i][i] - (tfm[j][j] + tfm[k][k])) + 1.0);
+
+ double q[3];
+ q[i] = s * 0.5;
+ if(s != 0.0)
+ s = 0.5/s;
+
+ double w = (tfm[k][j] - tfm[j][k]) * s;
+ q[j] = (tfm[j][i] + tfm[i][j]) * s;
+ q[k] = (tfm[k][i] + tfm[i][k]) * s;
+
+ qt.x = (float)q[0];
+ qt.y = (float)q[1];
+ qt.z = (float)q[2];
+ qt.w = (float)w;
+ }
+
+ return qt;
+}
+
+static void transform_decompose(Transform *decomp, const Transform *tfm)
+{
+ /* extract translation */
+ decomp->y = make_float4(tfm->x.w, tfm->y.w, tfm->z.w, 0.0f);
+
+ /* extract rotation */
+ Transform M = *tfm;
+ M.x.w = 0.0f; M.y.w = 0.0f; M.z.w = 0.0f; M.w.w = 1.0f;
+
+ Transform R = M;
+ float norm;
+ int iteration = 0;
+
+ do {
+ Transform Rnext;
+ Transform Rit = transform_inverse(transform_transpose(R));
+
+ for(int i = 0; i < 4; i++)
+ for(int j = 0; j < 4; j++)
+ Rnext[i][j] = 0.5f * (R[i][j] + Rit[i][j]);
+
+ norm = 0.0f;
+ for(int i = 0; i < 3; i++) {
+ norm = max(norm,
+ fabsf(R[i][0] - Rnext[i][0]) +
+ fabsf(R[i][1] - Rnext[i][1]) +
+ fabsf(R[i][2] - Rnext[i][2]));
+ }
+
+ R = Rnext;
+ iteration++;
+ } while(iteration < 100 && norm > 1e-4f);
+
+ if(transform_negative_scale(R))
+ R = R * transform_scale(-1.0f, -1.0f, -1.0f); /* todo: test scale */
+
+ decomp->x = transform_to_quat(R);
+
+ /* extract scale and pack it */
+ Transform scale = transform_inverse(R) * M;
+ decomp->y.w = scale.x.x;
+ decomp->z = make_float4(scale.x.y, scale.x.z, scale.y.x, scale.y.y);
+ decomp->w = make_float4(scale.y.z, scale.z.x, scale.z.y, scale.z.z);
+}
+
+void transform_motion_decompose(MotionTransform *decomp, const MotionTransform *motion)
+{
+ transform_decompose(&decomp->pre, &motion->pre);
+ transform_decompose(&decomp->post, &motion->post);
+}
+
CCL_NAMESPACE_END
diff --git a/intern/cycles/util/util_transform.h b/intern/cycles/util/util_transform.h
index aeaef7b0e21..03dfbaa441d 100644
--- a/intern/cycles/util/util_transform.h
+++ b/intern/cycles/util/util_transform.h
@@ -28,6 +28,8 @@
CCL_NAMESPACE_BEGIN
+/* Data Types */
+
typedef struct Transform {
float4 x, y, z, w; /* rows */
@@ -37,6 +39,17 @@ typedef struct Transform {
#endif
} Transform;
+typedef struct MotionTransform {
+ Transform pre;
+ Transform post;
+} MotionTransform;
+
+/* transform decomposed in rotation/translation/scale. we use the same data
+ * structure as Transform, and tightly pack decomposition into it. first the
+ * rotation (4), then translation (3), then 3x3 scale matrix (9) */
+
+/* Functions */
+
__device_inline float3 transform_perspective(const Transform *t, const float3 a)
{
float4 b = make_float4(a.x, a.y, a.z, 1.0f);
@@ -62,6 +75,15 @@ __device_inline float3 transform_direction(const Transform *t, const float3 a)
return c;
}
+__device_inline float3 transform_direction_transposed(const Transform *t, const float3 a)
+{
+ float3 x = make_float3(t->x.x, t->y.x, t->z.x);
+ float3 y = make_float3(t->x.y, t->y.y, t->z.y);
+ float3 z = make_float3(t->x.z, t->y.z, t->z.z);
+
+ return make_float3(dot(x, a), dot(y, a), dot(z, a));
+}
+
#ifndef __KERNEL_GPU__
__device_inline void print_transform(const char *label, const Transform& t)
@@ -272,6 +294,102 @@ __device_inline Transform transform_clear_scale(const Transform& tfm)
#endif
+/* Motion Transform */
+
+__device_inline float4 quat_interpolate(float4 q1, float4 q2, float t)
+{
+ float costheta = dot(q1, q2);
+
+ if(costheta > 0.9995f) {
+ return normalize((1.0f - t)*q1 + t*q2);
+ }
+ else {
+ float theta = acosf(clamp(costheta, -1.0f, 1.0f));
+ float thetap = theta * t;
+ float4 qperp = normalize(q2 - q1 * costheta);
+ return q1 * cosf(thetap) + qperp * sinf(thetap);
+ }
+}
+
+__device_inline Transform transform_quick_inverse(Transform M)
+{
+ Transform R;
+ float det = M.x.x*(M.z.z*M.y.y - M.z.y*M.y.z) - M.y.x*(M.z.z*M.x.y - M.z.y*M.x.z) + M.z.x*(M.y.z*M.x.y - M.y.y*M.x.z);
+
+ det = (det != 0.0f)? 1.0f/det: 0.0f;
+
+ float3 Rx = det*make_float3(M.z.z*M.y.y - M.z.y*M.y.z, M.z.y*M.x.z - M.z.z*M.x.y, M.y.z*M.x.y - M.y.y*M.x.z);
+ float3 Ry = det*make_float3(M.z.x*M.y.z - M.z.z*M.y.x, M.z.z*M.x.x - M.z.x*M.x.z, M.y.x*M.x.z - M.y.z*M.x.x);
+ float3 Rz = det*make_float3(M.z.y*M.y.x - M.z.x*M.y.y, M.z.x*M.x.y - M.z.y*M.x.x, M.y.y*M.x.x - M.y.x*M.x.y);
+ float3 T = -make_float3(M.x.w, M.y.w, M.z.w);
+
+ R.x = make_float4(Rx.x, Rx.y, Rx.z, dot(Rx, T));
+ R.y = make_float4(Ry.x, Ry.y, Ry.z, dot(Ry, T));
+ R.z = make_float4(Rz.x, Rz.y, Rz.z, dot(Rz, T));
+ R.w = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
+
+ return R;
+}
+
+__device_inline void transform_compose(Transform *tfm, const Transform *decomp)
+{
+ /* rotation */
+ float q0, q1, q2, q3, qda, qdb, qdc, qaa, qab, qac, qbb, qbc, qcc;
+
+ q0 = M_SQRT2_F * decomp->x.w;
+ q1 = M_SQRT2_F * decomp->x.x;
+ q2 = M_SQRT2_F * decomp->x.y;
+ q3 = M_SQRT2_F * decomp->x.z;
+
+ qda = q0*q1;
+ qdb = q0*q2;
+ qdc = q0*q3;
+ qaa = q1*q1;
+ qab = q1*q2;
+ qac = q1*q3;
+ qbb = q2*q2;
+ qbc = q2*q3;
+ qcc = q3*q3;
+
+ float3 rotation_x = make_float3(1.0f-qbb-qcc, -qdc+qab, qdb+qac);
+ float3 rotation_y = make_float3(qdc+qab, 1.0f-qaa-qcc, -qda+qbc);
+ float3 rotation_z = make_float3(-qdb+qac, qda+qbc, 1.0f-qaa-qbb);
+
+ /* scale */
+ float3 scale_x = make_float3(decomp->y.w, decomp->z.z, decomp->w.y);
+ float3 scale_y = make_float3(decomp->z.x, decomp->z.w, decomp->w.z);
+ float3 scale_z = make_float3(decomp->z.y, decomp->w.x, decomp->w.w);
+
+ /* compose with translation */
+ tfm->x = make_float4(dot(rotation_x, scale_x), dot(rotation_x, scale_y), dot(rotation_x, scale_z), decomp->y.x);
+ tfm->y = make_float4(dot(rotation_y, scale_x), dot(rotation_y, scale_y), dot(rotation_y, scale_z), decomp->y.y);
+ tfm->z = make_float4(dot(rotation_z, scale_x), dot(rotation_z, scale_y), dot(rotation_z, scale_z), decomp->y.z);
+ tfm->w = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
+}
+
+__device void transform_motion_interpolate(Transform *tfm, const MotionTransform *motion, float t)
+{
+ Transform decomp;
+
+ decomp.x = quat_interpolate(motion->pre.x, motion->post.x, t);
+ decomp.y = (1.0f - t)*motion->pre.y + t*motion->post.y;
+ decomp.z = (1.0f - t)*motion->pre.z + t*motion->post.z;
+ decomp.w = (1.0f - t)*motion->pre.w + t*motion->post.w;
+
+ transform_compose(tfm, &decomp);
+}
+
+#ifndef __KERNEL_GPU__
+
+__device_inline bool operator==(const MotionTransform& A, const MotionTransform& B)
+{
+ return (A.pre == B.pre && A.post == B.post);
+}
+
+void transform_motion_decompose(MotionTransform *decomp, const MotionTransform *motion);
+
+#endif
+
CCL_NAMESPACE_END
#endif /* __UTIL_TRANSFORM_H__ */