Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenkernel/intern/constraint.c')
-rw-r--r--source/blender/blenkernel/intern/constraint.c537
1 files changed, 355 insertions, 182 deletions
diff --git a/source/blender/blenkernel/intern/constraint.c b/source/blender/blenkernel/intern/constraint.c
index 45e2ff10ba4..050e8d434ae 100644
--- a/source/blender/blenkernel/intern/constraint.c
+++ b/source/blender/blenkernel/intern/constraint.c
@@ -52,7 +52,7 @@
#include "DNA_tracking_types.h"
#include "BKE_action.h"
-#include "BKE_anim.h" /* for the curve calculation part */
+#include "BKE_anim_path.h"
#include "BKE_armature.h"
#include "BKE_bvhutils.h"
#include "BKE_cachefile.h"
@@ -62,7 +62,7 @@
#include "BKE_deform.h"
#include "BKE_displist.h"
#include "BKE_editmesh.h"
-#include "BKE_fcurve.h"
+#include "BKE_fcurve_driver.h"
#include "BKE_global.h"
#include "BKE_idprop.h"
#include "BKE_lib_id.h"
@@ -472,9 +472,9 @@ static void contarget_get_mesh_mat(Object *ob, const char *substring, float mat[
/* derive the rotation from the average normal:
* - code taken from transform_gizmo.c,
- * calc_gizmo_stats, V3D_ORIENT_NORMAL case
- */
- /* we need the transpose of the inverse for a normal... */
+ * calc_gizmo_stats, V3D_ORIENT_NORMAL case */
+
+ /* We need the transpose of the inverse for a normal. */
copy_m3_m4(imat, ob->obmat);
invert_m3_m3(tmat, imat);
@@ -577,7 +577,7 @@ static void constraint_target_to_mat4(Object *ob,
copy_m4_m4(mat, ob->obmat);
BKE_constraint_mat_convertspace(ob, NULL, mat, from, to, false);
}
- /* Case VERTEXGROUP */
+ /* Case VERTEXGROUP */
/* Current method just takes the average location of all the points in the
* VertexGroup, and uses that as the location value of the targets. Where
* possible, the orientation will also be calculated, by calculating an
@@ -894,55 +894,68 @@ static void childof_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *tar
}
float parmat[4][4];
+ float inverse_matrix[4][4];
/* Simple matrix parenting. */
if ((data->flag & CHILDOF_ALL) == CHILDOF_ALL) {
copy_m4_m4(parmat, ct->matrix);
+ copy_m4_m4(inverse_matrix, data->invmat);
}
/* Filter the parent matrix by channel. */
else {
float loc[3], eul[3], size[3];
+ float loco[3], eulo[3], sizeo[3];
/* extract components of both matrices */
copy_v3_v3(loc, ct->matrix[3]);
mat4_to_eulO(eul, ct->rotOrder, ct->matrix);
mat4_to_size(size, ct->matrix);
- /* disable channels not enabled */
+ copy_v3_v3(loco, data->invmat[3]);
+ mat4_to_eulO(eulo, cob->rotOrder, data->invmat);
+ mat4_to_size(sizeo, data->invmat);
+
+ /* Reset the locked channels to their no-op values. */
if (!(data->flag & CHILDOF_LOCX)) {
- loc[0] = 0.0f;
+ loc[0] = loco[0] = 0.0f;
}
if (!(data->flag & CHILDOF_LOCY)) {
- loc[1] = 0.0f;
+ loc[1] = loco[1] = 0.0f;
}
if (!(data->flag & CHILDOF_LOCZ)) {
- loc[2] = 0.0f;
+ loc[2] = loco[2] = 0.0f;
}
if (!(data->flag & CHILDOF_ROTX)) {
- eul[0] = 0.0f;
+ eul[0] = eulo[0] = 0.0f;
}
if (!(data->flag & CHILDOF_ROTY)) {
- eul[1] = 0.0f;
+ eul[1] = eulo[1] = 0.0f;
}
if (!(data->flag & CHILDOF_ROTZ)) {
- eul[2] = 0.0f;
+ eul[2] = eulo[2] = 0.0f;
}
if (!(data->flag & CHILDOF_SIZEX)) {
- size[0] = 1.0f;
+ size[0] = sizeo[0] = 1.0f;
}
if (!(data->flag & CHILDOF_SIZEY)) {
- size[1] = 1.0f;
+ size[1] = sizeo[1] = 1.0f;
}
if (!(data->flag & CHILDOF_SIZEZ)) {
- size[2] = 1.0f;
+ size[2] = sizeo[2] = 1.0f;
}
- /* make new target mat and offset mat */
+ /* Construct the new matrices given the disabled channels. */
loc_eulO_size_to_mat4(parmat, loc, eul, size, ct->rotOrder);
+ loc_eulO_size_to_mat4(inverse_matrix, loco, eulo, sizeo, cob->rotOrder);
}
- /* Compute the inverse matrix if requested. */
+ /* If requested, compute the inverse matrix from the computed parent matrix. */
if (data->flag & CHILDOF_SET_INVERSE) {
invert_m4_m4(data->invmat, parmat);
+ if (cob->pchan != NULL) {
+ mul_m4_series(data->invmat, data->invmat, cob->ob->obmat);
+ }
+
+ copy_m4_m4(inverse_matrix, data->invmat);
data->flag &= ~CHILDOF_SET_INVERSE;
@@ -962,7 +975,7 @@ static void childof_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *tar
* (i.e. owner is 'parented' to parent). */
float orig_cob_matrix[4][4];
copy_m4_m4(orig_cob_matrix, cob->matrix);
- mul_m4_series(cob->matrix, parmat, data->invmat, orig_cob_matrix);
+ mul_m4_series(cob->matrix, parmat, inverse_matrix, orig_cob_matrix);
/* Without this, changes to scale and rotation can change location
* of a parentless bone or a disconnected bone. Even though its set
@@ -1000,8 +1013,8 @@ static void trackto_new_data(void *cdata)
{
bTrackToConstraint *data = (bTrackToConstraint *)cdata;
- data->reserved1 = TRACK_Y;
- data->reserved2 = UP_Z;
+ data->reserved1 = TRACK_nZ;
+ data->reserved2 = UP_Y;
}
static void trackto_id_looper(bConstraint *con, ConstraintIDFunc func, void *userdata)
@@ -2513,7 +2526,7 @@ static void armdef_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *targ
/* Process all targets. This can't use ct->matrix, as armdef_get_tarmat is not
* called in solve for efficiency because the constraint needs bone data anyway. */
- for (bConstraintTarget *ct = targets->first; ct; ct = ct->next) {
+ LISTBASE_FOREACH (bConstraintTarget *, ct, targets) {
if (ct->weight <= 0.0f) {
continue;
}
@@ -4581,230 +4594,390 @@ static void followtrack_id_looper(bConstraint *con, ConstraintIDFunc func, void
func(con, (ID **)&data->depth_ob, false, userdata);
}
-static void followtrack_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *UNUSED(targets))
+static MovieClip *followtrack_tracking_clip_get(bConstraint *con, bConstraintOb *cob)
{
- Depsgraph *depsgraph = cob->depsgraph;
- Scene *scene = cob->scene;
bFollowTrackConstraint *data = con->data;
- MovieClip *clip = data->clip;
+
+ if (data->flag & FOLLOWTRACK_ACTIVECLIP) {
+ Scene *scene = cob->scene;
+ return scene->clip;
+ }
+
+ return data->clip;
+}
+
+static MovieTrackingObject *followtrack_tracking_object_get(bConstraint *con, bConstraintOb *cob)
+{
+ MovieClip *clip = followtrack_tracking_clip_get(con, cob);
+ MovieTracking *tracking = &clip->tracking;
+ bFollowTrackConstraint *data = con->data;
+
+ if (data->object[0]) {
+ return BKE_tracking_object_get_named(tracking, data->object);
+ }
+ return BKE_tracking_object_get_camera(tracking);
+}
+
+static Object *followtrack_camera_object_get(bConstraint *con, bConstraintOb *cob)
+{
+ bFollowTrackConstraint *data = con->data;
+
+ if (data->camera == NULL) {
+ Scene *scene = cob->scene;
+ return scene->camera;
+ }
+
+ return data->camera;
+}
+
+typedef struct FollowTrackContext {
+ int flag;
+ int frame_method;
+
+ Depsgraph *depsgraph;
+ Scene *scene;
+
+ MovieClip *clip;
+ Object *camera_object;
+ Object *depth_object;
+
MovieTracking *tracking;
- MovieTrackingTrack *track;
MovieTrackingObject *tracking_object;
- Object *camob = data->camera ? data->camera : scene->camera;
+ MovieTrackingTrack *track;
- float ctime = DEG_get_ctime(depsgraph);
- float framenr;
+ float depsgraph_time;
+ float clip_frame;
+} FollowTrackContext;
- if (data->flag & FOLLOWTRACK_ACTIVECLIP) {
- clip = scene->clip;
- }
+static bool followtrack_context_init(FollowTrackContext *context,
+ bConstraint *con,
+ bConstraintOb *cob)
+{
+ bFollowTrackConstraint *data = con->data;
- if (!clip || !data->track[0] || !camob) {
- return;
+ context->flag = data->flag;
+ context->frame_method = data->frame_method;
+
+ context->depsgraph = cob->depsgraph;
+ context->scene = cob->scene;
+
+ context->clip = followtrack_tracking_clip_get(con, cob);
+ context->camera_object = followtrack_camera_object_get(con, cob);
+ if (context->clip == NULL || context->camera_object == NULL) {
+ return false;
}
+ context->depth_object = data->depth_ob;
- tracking = &clip->tracking;
+ context->tracking = &context->clip->tracking;
+ context->tracking_object = followtrack_tracking_object_get(con, cob);
+ if (context->tracking_object == NULL) {
+ return false;
+ }
- if (data->object[0]) {
- tracking_object = BKE_tracking_object_get_named(tracking, data->object);
+ context->track = BKE_tracking_track_get_named(
+ context->tracking, context->tracking_object, data->track);
+ if (context->track == NULL) {
+ return false;
}
- else {
- tracking_object = BKE_tracking_object_get_camera(tracking);
+
+ context->depsgraph_time = DEG_get_ctime(context->depsgraph);
+ context->clip_frame = BKE_movieclip_remap_scene_to_clip_frame(context->clip,
+ context->depsgraph_time);
+
+ return true;
+}
+
+static void followtrack_evaluate_using_3d_position_object(FollowTrackContext *context,
+ bConstraintOb *cob)
+{
+ Object *camera_object = context->camera_object;
+ MovieTracking *tracking = context->tracking;
+ MovieTrackingTrack *track = context->track;
+ MovieTrackingObject *tracking_object = context->tracking_object;
+
+ /* Matrix of the object which is being solved prior to this constraint. */
+ float obmat[4][4];
+ copy_m4_m4(obmat, cob->matrix);
+
+ /* Object matrix of the camera. */
+ float camera_obmat[4][4];
+ copy_m4_m4(camera_obmat, camera_object->obmat);
+
+ /* Calculate inverted matrix of the solved camera at the current time. */
+ float reconstructed_camera_mat[4][4];
+ BKE_tracking_camera_get_reconstructed_interpolate(
+ tracking, tracking_object, context->clip_frame, reconstructed_camera_mat);
+ float reconstructed_camera_mat_inv[4][4];
+ invert_m4_m4(reconstructed_camera_mat_inv, reconstructed_camera_mat);
+
+ mul_m4_series(cob->matrix, obmat, camera_obmat, reconstructed_camera_mat_inv);
+ translate_m4(cob->matrix, track->bundle_pos[0], track->bundle_pos[1], track->bundle_pos[2]);
+}
+
+static void followtrack_evaluate_using_3d_position_camera(FollowTrackContext *context,
+ bConstraintOb *cob)
+{
+ Object *camera_object = context->camera_object;
+ MovieTrackingTrack *track = context->track;
+
+ /* Matrix of the object which is being solved prior to this constraint. */
+ float obmat[4][4];
+ copy_m4_m4(obmat, cob->matrix);
+
+ float reconstructed_camera_mat[4][4];
+ BKE_tracking_get_camera_object_matrix(camera_object, reconstructed_camera_mat);
+
+ mul_m4_m4m4(cob->matrix, obmat, reconstructed_camera_mat);
+ translate_m4(cob->matrix, track->bundle_pos[0], track->bundle_pos[1], track->bundle_pos[2]);
+}
+
+static void followtrack_evaluate_using_3d_position(FollowTrackContext *context, bConstraintOb *cob)
+{
+ MovieTrackingTrack *track = context->track;
+ if ((track->flag & TRACK_HAS_BUNDLE) == 0) {
+ return;
}
- if (!tracking_object) {
+ if ((context->tracking_object->flag & TRACKING_OBJECT_CAMERA) == 0) {
+ followtrack_evaluate_using_3d_position_object(context, cob);
return;
}
- track = BKE_tracking_track_get_named(tracking, tracking_object, data->track);
+ followtrack_evaluate_using_3d_position_camera(context, cob);
+}
- if (!track) {
+/* Apply undistortion if it is enabled in constraint settings. */
+static void followtrack_undistort_if_needed(FollowTrackContext *context,
+ const int clip_width,
+ const int clip_height,
+ float marker_position[2])
+{
+ if ((context->flag & FOLLOWTRACK_USE_UNDISTORTION) == 0) {
return;
}
- framenr = BKE_movieclip_remap_scene_to_clip_frame(clip, ctime);
+ /* Undistortion need to happen in pixel space. */
+ marker_position[0] *= clip_width;
+ marker_position[1] *= clip_height;
- if (data->flag & FOLLOWTRACK_USE_3D_POSITION) {
- if (track->flag & TRACK_HAS_BUNDLE) {
- float obmat[4][4], mat[4][4];
+ BKE_tracking_undistort_v2(
+ context->tracking, clip_width, clip_height, marker_position, marker_position);
- copy_m4_m4(obmat, cob->matrix);
+ /* Normalize pixel coordinates back. */
+ marker_position[0] /= clip_width;
+ marker_position[1] /= clip_height;
+}
- if ((tracking_object->flag & TRACKING_OBJECT_CAMERA) == 0) {
- float imat[4][4];
+/* Modify the marker position matching the frame fitting method. */
+static void followtrack_fit_frame(FollowTrackContext *context,
+ const int clip_width,
+ const int clip_height,
+ float marker_position[2])
+{
+ if (context->frame_method == FOLLOWTRACK_FRAME_STRETCH) {
+ return;
+ }
- copy_m4_m4(mat, camob->obmat);
+ Scene *scene = context->scene;
+ MovieClip *clip = context->clip;
- BKE_tracking_camera_get_reconstructed_interpolate(
- tracking, tracking_object, framenr, imat);
- invert_m4(imat);
+ /* apply clip display aspect */
+ const float w_src = clip_width * clip->aspx;
+ const float h_src = clip_height * clip->aspy;
- mul_m4_series(cob->matrix, obmat, mat, imat);
- translate_m4(
- cob->matrix, track->bundle_pos[0], track->bundle_pos[1], track->bundle_pos[2]);
- }
- else {
- BKE_tracking_get_camera_object_matrix(camob, mat);
+ const float w_dst = scene->r.xsch * scene->r.xasp;
+ const float h_dst = scene->r.ysch * scene->r.yasp;
- mul_m4_m4m4(cob->matrix, obmat, mat);
- translate_m4(
- cob->matrix, track->bundle_pos[0], track->bundle_pos[1], track->bundle_pos[2]);
- }
- }
+ const float asp_src = w_src / h_src;
+ const float asp_dst = w_dst / h_dst;
+
+ if (fabsf(asp_src - asp_dst) < FLT_EPSILON) {
+ return;
+ }
+
+ if ((asp_src > asp_dst) == (context->frame_method == FOLLOWTRACK_FRAME_CROP)) {
+ /* fit X */
+ float div = asp_src / asp_dst;
+ float cent = (float)clip_width / 2.0f;
+
+ marker_position[0] = (((marker_position[0] * clip_width - cent) * div) + cent) / clip_width;
}
else {
- float vec[3], disp[3], axis[3], mat[4][4];
- float aspect = (scene->r.xsch * scene->r.xasp) / (scene->r.ysch * scene->r.yasp);
- float len, d;
+ /* fit Y */
+ float div = asp_dst / asp_src;
+ float cent = (float)clip_height / 2.0f;
- BKE_object_where_is_calc_mat4(camob, mat);
+ marker_position[1] = (((marker_position[1] * clip_height - cent) * div) + cent) / clip_height;
+ }
+}
- /* camera axis */
- vec[0] = 0.0f;
- vec[1] = 0.0f;
- vec[2] = 1.0f;
- mul_v3_m4v3(axis, mat, vec);
+/* Effectively this is a Z-depth of the object form the movie clip camera.
+ * The idea is to preserve this depth while moving the object in 2D. */
+static float followtrack_distance_from_viewplane_get(FollowTrackContext *context,
+ bConstraintOb *cob)
+{
+ Object *camera_object = context->camera_object;
- /* distance to projection plane */
- copy_v3_v3(vec, cob->matrix[3]);
- sub_v3_v3(vec, mat[3]);
- project_v3_v3v3(disp, vec, axis);
+ float camera_matrix[4][4];
+ BKE_object_where_is_calc_mat4(camera_object, camera_matrix);
- len = len_v3(disp);
+ const float z_axis[3] = {0.0f, 0.0f, 1.0f};
- if (len > FLT_EPSILON) {
- CameraParams params;
- int width, height;
- float pos[2], rmat[4][4];
+ /* Direction of camera's local Z axis in the world space. */
+ float camera_axis[3];
+ mul_v3_mat3_m4v3(camera_axis, camera_matrix, z_axis);
- BKE_movieclip_get_size(clip, NULL, &width, &height);
- BKE_tracking_marker_get_subframe_position(track, framenr, pos);
+ /* Distance to projection plane. */
+ float vec[3];
+ copy_v3_v3(vec, cob->matrix[3]);
+ sub_v3_v3(vec, camera_matrix[3]);
- if (data->flag & FOLLOWTRACK_USE_UNDISTORTION) {
- /* Undistortion need to happen in pixel space. */
- pos[0] *= width;
- pos[1] *= height;
+ float projection[3];
+ project_v3_v3v3(projection, vec, camera_axis);
- BKE_tracking_undistort_v2(tracking, pos, pos);
+ return len_v3(projection);
+}
- /* Normalize pixel coordinates back. */
- pos[0] /= width;
- pos[1] /= height;
- }
+/* For the evaluated constraint object project it to the surface of the depth object. */
+static void followtrack_project_to_depth_object_if_needed(FollowTrackContext *context,
+ bConstraintOb *cob)
+{
+ if (context->depth_object == NULL) {
+ return;
+ }
- /* aspect correction */
- if (data->frame_method != FOLLOWTRACK_FRAME_STRETCH) {
- float w_src, h_src, w_dst, h_dst, asp_src, asp_dst;
+ Object *depth_object = context->depth_object;
+ Mesh *depth_mesh = BKE_object_get_evaluated_mesh(depth_object);
+ if (depth_mesh == NULL) {
+ return;
+ }
- /* apply clip display aspect */
- w_src = width * clip->aspx;
- h_src = height * clip->aspy;
+ float depth_object_mat_inv[4][4];
+ invert_m4_m4(depth_object_mat_inv, depth_object->obmat);
- w_dst = scene->r.xsch * scene->r.xasp;
- h_dst = scene->r.ysch * scene->r.yasp;
+ float ray_start[3], ray_end[3];
+ mul_v3_m4v3(ray_start, depth_object_mat_inv, context->camera_object->obmat[3]);
+ mul_v3_m4v3(ray_end, depth_object_mat_inv, cob->matrix[3]);
- asp_src = w_src / h_src;
- asp_dst = w_dst / h_dst;
+ float ray_direction[3];
+ sub_v3_v3v3(ray_direction, ray_end, ray_start);
+ normalize_v3(ray_direction);
- if (fabsf(asp_src - asp_dst) >= FLT_EPSILON) {
- if ((asp_src > asp_dst) == (data->frame_method == FOLLOWTRACK_FRAME_CROP)) {
- /* fit X */
- float div = asp_src / asp_dst;
- float cent = (float)width / 2.0f;
+ BVHTreeFromMesh tree_data = NULL_BVHTreeFromMesh;
+ BKE_bvhtree_from_mesh_get(&tree_data, depth_mesh, BVHTREE_FROM_LOOPTRI, 4);
- pos[0] = (((pos[0] * width - cent) * div) + cent) / width;
- }
- else {
- /* fit Y */
- float div = asp_dst / asp_src;
- float cent = (float)height / 2.0f;
+ BVHTreeRayHit hit;
+ hit.dist = BVH_RAYCAST_DIST_MAX;
+ hit.index = -1;
- pos[1] = (((pos[1] * height - cent) * div) + cent) / height;
- }
- }
- }
+ const int result = BLI_bvhtree_ray_cast(tree_data.tree,
+ ray_start,
+ ray_direction,
+ 0.0f,
+ &hit,
+ tree_data.raycast_callback,
+ &tree_data);
- BKE_camera_params_init(&params);
- BKE_camera_params_from_object(&params, camob);
+ if (result != -1) {
+ mul_v3_m4v3(cob->matrix[3], depth_object->obmat, hit.co);
+ }
- if (params.is_ortho) {
- vec[0] = params.ortho_scale * (pos[0] - 0.5f + params.shiftx);
- vec[1] = params.ortho_scale * (pos[1] - 0.5f + params.shifty);
- vec[2] = -len;
+ free_bvhtree_from_mesh(&tree_data);
+}
- if (aspect > 1.0f) {
- vec[1] /= aspect;
- }
- else {
- vec[0] *= aspect;
- }
+static void followtrack_evaluate_using_2d_position(FollowTrackContext *context, bConstraintOb *cob)
+{
+ Scene *scene = context->scene;
+ MovieClip *clip = context->clip;
+ MovieTrackingTrack *track = context->track;
+ Object *camera_object = context->camera_object;
+ const float clip_frame = context->clip_frame;
+ const float aspect = (scene->r.xsch * scene->r.xasp) / (scene->r.ysch * scene->r.yasp);
- mul_v3_m4v3(disp, camob->obmat, vec);
+ const float object_depth = followtrack_distance_from_viewplane_get(context, cob);
+ if (object_depth < FLT_EPSILON) {
+ return;
+ }
- copy_m4_m4(rmat, camob->obmat);
- zero_v3(rmat[3]);
- mul_m4_m4m4(cob->matrix, cob->matrix, rmat);
+ int clip_width, clip_height;
+ BKE_movieclip_get_size(clip, NULL, &clip_width, &clip_height);
- copy_v3_v3(cob->matrix[3], disp);
- }
- else {
- d = (len * params.sensor_x) / (2.0f * params.lens);
+ float marker_position[2];
+ BKE_tracking_marker_get_subframe_position(track, clip_frame, marker_position);
- vec[0] = d * (2.0f * (pos[0] + params.shiftx) - 1.0f);
- vec[1] = d * (2.0f * (pos[1] + params.shifty) - 1.0f);
- vec[2] = -len;
+ followtrack_undistort_if_needed(context, clip_width, clip_height, marker_position);
+ followtrack_fit_frame(context, clip_width, clip_height, marker_position);
- if (aspect > 1.0f) {
- vec[1] /= aspect;
- }
- else {
- vec[0] *= aspect;
- }
+ float rmat[4][4];
+ CameraParams params;
+ BKE_camera_params_init(&params);
+ BKE_camera_params_from_object(&params, camera_object);
- mul_v3_m4v3(disp, camob->obmat, vec);
+ if (params.is_ortho) {
+ float vec[3];
+ vec[0] = params.ortho_scale * (marker_position[0] - 0.5f + params.shiftx);
+ vec[1] = params.ortho_scale * (marker_position[1] - 0.5f + params.shifty);
+ vec[2] = -object_depth;
- /* apply camera rotation so Z-axis would be co-linear */
- copy_m4_m4(rmat, camob->obmat);
- zero_v3(rmat[3]);
- mul_m4_m4m4(cob->matrix, cob->matrix, rmat);
+ if (aspect > 1.0f) {
+ vec[1] /= aspect;
+ }
+ else {
+ vec[0] *= aspect;
+ }
- copy_v3_v3(cob->matrix[3], disp);
- }
+ float disp[3];
+ mul_v3_m4v3(disp, camera_object->obmat, vec);
- if (data->depth_ob) {
- Object *depth_ob = data->depth_ob;
- Mesh *target_eval = BKE_object_get_evaluated_mesh(depth_ob);
- if (target_eval) {
- BVHTreeFromMesh treeData = NULL_BVHTreeFromMesh;
- BVHTreeRayHit hit;
- float ray_start[3], ray_end[3], ray_nor[3], imat[4][4];
- int result;
+ copy_m4_m4(rmat, camera_object->obmat);
+ zero_v3(rmat[3]);
+ mul_m4_m4m4(cob->matrix, cob->matrix, rmat);
- invert_m4_m4(imat, depth_ob->obmat);
+ copy_v3_v3(cob->matrix[3], disp);
+ }
+ else {
+ const float d = (object_depth * params.sensor_x) / (2.0f * params.lens);
- mul_v3_m4v3(ray_start, imat, camob->obmat[3]);
- mul_v3_m4v3(ray_end, imat, cob->matrix[3]);
+ float vec[3];
+ vec[0] = d * (2.0f * (marker_position[0] + params.shiftx) - 1.0f);
+ vec[1] = d * (2.0f * (marker_position[1] + params.shifty) - 1.0f);
+ vec[2] = -object_depth;
- sub_v3_v3v3(ray_nor, ray_end, ray_start);
- normalize_v3(ray_nor);
+ if (aspect > 1.0f) {
+ vec[1] /= aspect;
+ }
+ else {
+ vec[0] *= aspect;
+ }
- BKE_bvhtree_from_mesh_get(&treeData, target_eval, BVHTREE_FROM_LOOPTRI, 4);
+ float disp[3];
+ mul_v3_m4v3(disp, camera_object->obmat, vec);
- hit.dist = BVH_RAYCAST_DIST_MAX;
- hit.index = -1;
+ /* apply camera rotation so Z-axis would be co-linear */
+ copy_m4_m4(rmat, camera_object->obmat);
+ zero_v3(rmat[3]);
+ mul_m4_m4m4(cob->matrix, cob->matrix, rmat);
- result = BLI_bvhtree_ray_cast(
- treeData.tree, ray_start, ray_nor, 0.0f, &hit, treeData.raycast_callback, &treeData);
+ copy_v3_v3(cob->matrix[3], disp);
+ }
- if (result != -1) {
- mul_v3_m4v3(cob->matrix[3], depth_ob->obmat, hit.co);
- }
+ followtrack_project_to_depth_object_if_needed(context, cob);
+}
- free_bvhtree_from_mesh(&treeData);
- }
- }
- }
+static void followtrack_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *UNUSED(targets))
+{
+ FollowTrackContext context;
+ if (!followtrack_context_init(&context, con, cob)) {
+ return;
+ }
+
+ bFollowTrackConstraint *data = con->data;
+ if (data->flag & FOLLOWTRACK_USE_3D_POSITION) {
+ followtrack_evaluate_using_3d_position(&context, cob);
+ return;
}
+
+ followtrack_evaluate_using_2d_position(&context, cob);
}
static bConstraintTypeInfo CTI_FOLLOWTRACK = {
@@ -5497,7 +5670,7 @@ void BKE_constraints_active_set(ListBase *list, bConstraint *con)
static bConstraint *constraint_list_find_from_target(ListBase *constraints, bConstraintTarget *tgt)
{
- for (bConstraint *con = constraints->first; con; con = con->next) {
+ LISTBASE_FOREACH (bConstraint *, con, constraints) {
ListBase *targets = NULL;
if (con->type == CONSTRAINT_TYPE_PYTHON) {
@@ -5531,7 +5704,7 @@ bConstraint *BKE_constraint_find_from_target(Object *ob,
}
if (ob->pose != NULL) {
- for (bPoseChannel *pchan = ob->pose->chanbase.first; pchan; pchan = pchan->next) {
+ LISTBASE_FOREACH (bPoseChannel *, pchan, &ob->pose->chanbase) {
result = constraint_list_find_from_target(&pchan->constraints, tgt);
if (result != NULL) {