/* SPDX-License-Identifier: GPL-2.0-or-later * Copyright 2021 Blender Foundation. */ /** \file * \ingroup eevee * * The velocity pass outputs motion vectors to use for either * temporal re-projection or motion blur. * * It is the module that tracks the objects between frames updates. */ #include "BKE_duplilist.h" #include "BKE_object.h" #include "BLI_map.hh" #include "DEG_depsgraph_query.h" #include "DNA_rigidbody_types.h" #include "eevee_instance.hh" #include "eevee_renderpasses.hh" #include "eevee_shader.hh" #include "eevee_shader_shared.hh" #include "eevee_velocity.hh" #include "eevee_wrapper.hh" namespace blender::eevee { /* -------------------------------------------------------------------- */ /** \name VelocityModule * * \{ */ void VelocityModule::init(void) { if (inst_.is_viewport()) { /* For viewport we sync when object is evaluated and we swap at init time. * Use next step to store the current position. This one will become the previous step after * next swapping. */ step_ = STEP_NEXT; step_swap(); } if (inst_.render && (inst_.render_passes.vector != nullptr)) { /* No motion blur and the vector pass was requested. Do the step sync here. */ const Scene *scene = inst_.scene; float initial_time = scene->r.cfra + scene->r.subframe; step_sync(STEP_PREVIOUS, initial_time - 1.0f); step_sync(STEP_NEXT, initial_time + 1.0f); inst_.set_time(initial_time); } } static void step_object_sync_render(void *velocity, Object *ob, RenderEngine *UNUSED(engine), Depsgraph *UNUSED(depsgraph)) { ObjectKey object_key(ob); reinterpret_cast(velocity)->step_object_sync(ob, object_key); } void VelocityModule::step_sync(eStep step, float time) { inst_.set_time(time); step_ = step; step_camera_sync(); DRW_render_object_iter(this, inst_.render, inst_.depsgraph, step_object_sync_render); } void VelocityModule::step_camera_sync() { if (!inst_.is_viewport()) { inst_.camera.sync(); } if (step_ == STEP_NEXT) { camera_step.next = inst_.camera.data_get(); } else if (step_ == STEP_PREVIOUS) { camera_step.prev = inst_.camera.data_get(); } } /* Gather motion data from all objects in the scene. */ void VelocityModule::step_object_sync(Object *ob, ObjectKey &object_key) { if (!object_has_velocity(ob) && !object_is_deform(ob)) { return; } auto add_cb = [&]() { inst_.sampling.reset(); return new VelocityObjectBuf(); }; auto data = objects_steps.lookup_or_add_cb(object_key, add_cb); if (step_ == STEP_NEXT) { data->next_object_mat = ob->obmat; } else if (step_ == STEP_PREVIOUS) { data->prev_object_mat = ob->obmat; } } /* Moves next frame data to previous frame data. Nullify next frame data. */ void VelocityModule::step_swap(void) { for (VelocityObjectBuf *data : objects_steps.values()) { data->prev_object_mat = data->next_object_mat; /* Important: This let us known if object is missing from the next time step. */ zero_m4(data->next_object_mat.ptr()); } camera_step.prev = static_cast(camera_step.next); } void VelocityModule::begin_sync(void) { if (inst_.is_viewport()) { step_camera_sync(); } } /* This is the end of the current frame sync. Not the step_sync. */ void VelocityModule::end_sync(void) { Vector deleted_keys; for (auto item : objects_steps.items()) { /* Detect object deletion. Only do this on viewport as STEP_NEXT means current step. */ if (inst_.is_viewport() && is_zero_m4(item.value->next_object_mat.ptr())) { deleted_keys.append(item.key); delete item.value; } else { item.value->push_update(); } } if (deleted_keys.size() > 0) { inst_.sampling.reset(); } for (auto key : deleted_keys) { objects_steps.remove(key); } camera_step.prev.push_update(); camera_step.next.push_update(); } bool VelocityModule::object_has_velocity(const Object *ob) { #if 0 RigidBodyOb *rbo = ob->rigidbody_object; /* Active rigidbody objects only, as only those are affected by sim. */ const bool has_rigidbody = (rbo && (rbo->type == RBO_TYPE_ACTIVE)); /* For now we assume dupli objects are moving. */ const bool is_dupli = (ob->base_flag & BASE_FROM_DUPLI) != 0; const bool object_moves = is_dupli || has_rigidbody || BKE_object_moves_in_time(ob, true); #else UNUSED_VARS(ob); /* BKE_object_moves_in_time does not work in some cases. * Better detect non moving object after evaluation. */ const bool object_moves = true; #endif return object_moves; } bool VelocityModule::object_is_deform(const Object *ob) { RigidBodyOb *rbo = ob->rigidbody_object; /* Active rigidbody objects only, as only those are affected by sim. */ const bool has_rigidbody = (rbo && (rbo->type == RBO_TYPE_ACTIVE)); const bool is_deform = BKE_object_is_deform_modified(inst_.scene, (Object *)ob) || (has_rigidbody && (rbo->flag & RBO_FLAG_USE_DEFORM) != 0); return is_deform; } /** \} */ /* -------------------------------------------------------------------- */ /** \name VelocityPass * * Draws velocity data from VelocityModule module to a framebuffer / texture. * \{ */ void VelocityPass::sync(void) { VelocityModule &velocity = inst_.velocity; { /* Outputs camera motion vector. */ /* TODO(fclem) Ideally, we should run this only where the motion vectors were not written. * But without imageLoadStore, we cannot do that without another buffer. */ DRWState state = DRW_STATE_WRITE_COLOR; DRW_PASS_CREATE(camera_ps_, state); GPUShader *sh = inst_.shaders.static_shader_get(VELOCITY_CAMERA); DRWShadingGroup *grp = DRW_shgroup_create(sh, camera_ps_); DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &input_depth_tx_); DRW_shgroup_uniform_block(grp, "cam_prev", velocity.camera_step.prev); DRW_shgroup_uniform_block(grp, "cam_next", velocity.camera_step.next); DRW_shgroup_uniform_block(grp, "cam_curr", inst_.camera.ubo_get()); DRW_shgroup_call_procedural_triangles(grp, nullptr, 1); } { /* Animated objects are rendered and output the correct motion vector. */ DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL; DRW_PASS_CREATE(object_ps_, state); { GPUShader *sh = inst_.shaders.static_shader_get(VELOCITY_MESH); DRWShadingGroup *grp = mesh_grp_ = DRW_shgroup_create(sh, object_ps_); DRW_shgroup_uniform_block(grp, "cam_prev", velocity.camera_step.prev); DRW_shgroup_uniform_block(grp, "cam_next", velocity.camera_step.next); DRW_shgroup_uniform_block(grp, "cam_curr", inst_.camera.ubo_get()); } } } void VelocityPass::mesh_add(Object *ob, ObjectHandle &handle) { if (inst_.is_viewport()) { /* FIXME(fclem) As we are using original objects pointers, there is a chance the previous * object key matches a totally different object if the scene was changed by user or python * callback. In this case, we cannot correctly match objects between updates. * What this means is that there will be incorrect motion vectors for these objects. * We live with that until we have a correct way of identifying new objects. */ if (handle.recalc & ID_RECALC_TRANSFORM) { inst_.velocity.step_object_sync(ob, handle.object_key); } } VelocityObjectBuf **data_ptr = inst_.velocity.objects_steps.lookup_ptr(handle.object_key); if (data_ptr == nullptr) { return; } VelocityObjectBuf *data = *data_ptr; GPUBatch *geom = DRW_cache_object_surface_get(ob); if (geom == nullptr) { return; } if (!inst_.is_viewport()) { /* Fill missing matrices if the object was hidden in previous or next frame. */ if (is_zero_m4(data->prev_object_mat.ptr())) { copy_m4_m4(data->prev_object_mat.ptr(), ob->obmat); } /* Avoid drawing object that has no motions since object_moves is always true. */ if (/* !mb_geom->use_deform && */ /* Object deformation can happen without transform. */ equals_m4m4(data->prev_object_mat.ptr(), ob->obmat)) { return; } } else { /* Fill missing matrices if the object was hidden in previous or next frame. */ if (is_zero_m4(data->prev_object_mat.ptr())) { data->prev_object_mat = ob->obmat; } if (is_zero_m4(data->next_object_mat.ptr())) { data->next_object_mat = ob->obmat; } // if (mb_geom->use_deform) { // /* Keep to modify later (after init). */ // mb_geom->batch = geom; // } /* Avoid drawing object that has no motions since object_moves is always true. */ if (/* !mb_geom->use_deform && */ /* Object deformation can happen without transform. */ equals_m4m4(data->prev_object_mat.ptr(), ob->obmat) && equals_m4m4(data->next_object_mat.ptr(), ob->obmat)) { return; } } /* TODO(fclem) Use the same layout as modelBlock from draw so we can reuse the same offset * and avoid the overhead of 1 shading group and one UBO per object. */ DRWShadingGroup *grp = DRW_shgroup_create_sub(mesh_grp_); DRW_shgroup_uniform_block(grp, "velocity", *data); DRW_shgroup_call(grp, geom, ob); } void VelocityPass::render_objects(void) { DRW_draw_pass(object_ps_); } void VelocityPass::resolve_camera_motion(GPUTexture *depth_tx) { input_depth_tx_ = depth_tx; DRW_draw_pass(camera_ps_); } /** \} */ /* -------------------------------------------------------------------- */ /** \name VelocityPass * * Draws velocity data from VelocityModule module to a framebuffer / texture. * \{ */ void Velocity::sync(int extent[2]) { /* HACK: View name should be unique and static. * With this, we can reuse the same texture across views. */ DrawEngineType *owner = (DrawEngineType *)view_name_.c_str(); /* TODO(fclem) Only allocate if needed. RG16F when only doing reprojection. */ velocity_camera_tx_ = DRW_texture_pool_query_2d(UNPACK2(extent), GPU_RGBA16F, owner); /* TODO(fclem) Only allocate if needed. RG16F when only doing motion blur post fx in * panoramic camera. */ velocity_view_tx_ = DRW_texture_pool_query_2d(UNPACK2(extent), GPU_RGBA16F, owner); velocity_only_fb_.ensure(GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(velocity_camera_tx_), GPU_ATTACHMENT_TEXTURE(velocity_view_tx_)); } void Velocity::render(GPUTexture *depth_tx) { DRW_stats_group_start("Velocity"); GPU_framebuffer_bind(velocity_only_fb_); inst_.shading_passes.velocity.resolve_camera_motion(depth_tx); velocity_fb_.ensure(GPU_ATTACHMENT_TEXTURE(depth_tx), GPU_ATTACHMENT_TEXTURE(velocity_camera_tx_), GPU_ATTACHMENT_TEXTURE(velocity_view_tx_)); GPU_framebuffer_bind(velocity_fb_); inst_.shading_passes.velocity.render_objects(); DRW_stats_group_end(); } /** \} */ } // namespace blender::eevee