Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--source/blender/draw/CMakeLists.txt1
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_camera.cc18
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_camera.hh18
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_film.cc44
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_film.hh11
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_instance.cc2
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc7
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_sampling.cc29
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_sampling.hh25
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_shader.cc2
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_shader.hh2
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_shader_shared.hh10
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_velocity.cc89
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_velocity.hh61
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_view.cc15
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_view.hh3
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl20
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl2
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl373
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl11
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl87
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl58
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh8
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh21
24 files changed, 517 insertions, 400 deletions
diff --git a/source/blender/draw/CMakeLists.txt b/source/blender/draw/CMakeLists.txt
index 55d789f64b0..d20745f28c0 100644
--- a/source/blender/draw/CMakeLists.txt
+++ b/source/blender/draw/CMakeLists.txt
@@ -375,7 +375,6 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_surf_lib.glsl
engines/eevee_next/shaders/eevee_surf_world_frag.glsl
engines/eevee_next/shaders/eevee_velocity_lib.glsl
- engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl
engines/eevee_next/eevee_defines.hh
engines/eevee_next/eevee_shader_shared.hh
diff --git a/source/blender/draw/engines/eevee_next/eevee_camera.cc b/source/blender/draw/engines/eevee_next/eevee_camera.cc
index 1f65f887d46..b9040f0f3ab 100644
--- a/source/blender/draw/engines/eevee_next/eevee_camera.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_camera.cc
@@ -29,10 +29,8 @@ namespace blender::eevee {
void Camera::init()
{
const Object *camera_eval = inst_.camera_eval_object;
- synced_ = false;
- data_.swap();
- CameraData &data = data_.current();
+ CameraData &data = data_;
if (camera_eval) {
const ::Camera *cam = reinterpret_cast<const ::Camera *>(camera_eval->data);
@@ -78,9 +76,7 @@ void Camera::sync()
{
const Object *camera_eval = inst_.camera_eval_object;
- data_.swap();
-
- CameraData &data = data_.current();
+ CameraData &data = data_;
if (inst_.drw_view) {
DRW_view_viewmat_get(inst_.drw_view, data.viewmat.ptr(), false);
@@ -142,14 +138,8 @@ void Camera::sync()
data.equirect_scale = float2(0.0f);
}
- data_.current().push_update();
-
- synced_ = true;
-
- /* Detect changes in parameters. */
- if (data_.current() != data_.previous()) {
- inst_.sampling.reset();
- }
+ data_.initialized = true;
+ data_.push_update();
}
/** \} */
diff --git a/source/blender/draw/engines/eevee_next/eevee_camera.hh b/source/blender/draw/engines/eevee_next/eevee_camera.hh
index 3b3586190c4..8bf64199246 100644
--- a/source/blender/draw/engines/eevee_next/eevee_camera.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_camera.hh
@@ -83,9 +83,7 @@ class Camera {
Instance &inst_;
/** Double buffered to detect changes and have history for re-projection. */
- SwapChain<CameraDataBuf, 2> data_;
- /** Detects wrong usage. */
- bool synced_ = false;
+ CameraDataBuf data_;
public:
Camera(Instance &inst) : inst_(inst){};
@@ -99,28 +97,28 @@ class Camera {
**/
const CameraData &data_get() const
{
- BLI_assert(synced_);
- return data_.current();
+ BLI_assert(data_.initialized);
+ return data_;
}
const GPUUniformBuf *ubo_get() const
{
- return data_.current();
+ return data_;
}
bool is_panoramic() const
{
- return eevee::is_panoramic(data_.current().type);
+ return eevee::is_panoramic(data_.type);
}
bool is_orthographic() const
{
- return data_.current().type == CAMERA_ORTHO;
+ return data_.type == CAMERA_ORTHO;
}
const float3 &position() const
{
- return *reinterpret_cast<const float3 *>(data_.current().viewinv[3]);
+ return *reinterpret_cast<const float3 *>(data_.viewinv[3]);
}
const float3 &forward() const
{
- return *reinterpret_cast<const float3 *>(data_.current().viewinv[2]);
+ return *reinterpret_cast<const float3 *>(data_.viewinv[2]);
}
};
diff --git a/source/blender/draw/engines/eevee_next/eevee_film.cc b/source/blender/draw/engines/eevee_next/eevee_film.cc
index 1fd4c278c88..a1becaed9c4 100644
--- a/source/blender/draw/engines/eevee_next/eevee_film.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_film.cc
@@ -163,11 +163,13 @@ inline bool operator!=(const FilmData &a, const FilmData &b)
void Film::init(const int2 &extent, const rcti *output_rect)
{
+ Sampling &sampling = inst_.sampling;
+
init_aovs();
{
/* Enable passes that need to be rendered. */
- eViewLayerEEVEEPassType render_passes;
+ eViewLayerEEVEEPassType render_passes = eViewLayerEEVEEPassType(0);
if (inst_.is_viewport()) {
/* Viewport Case. */
@@ -178,6 +180,8 @@ void Film::init(const int2 &extent, const rcti *output_rect)
* Using the render pass ensure we store the center depth. */
render_passes |= EEVEE_RENDER_PASS_Z;
}
+ /* TEST */
+ render_passes |= EEVEE_RENDER_PASS_VECTOR;
}
else {
/* Render Case. */
@@ -211,7 +215,7 @@ void Film::init(const int2 &extent, const rcti *output_rect)
/* TODO(@fclem): Can't we rely on depsgraph update notification? */
if (assign_if_different(enabled_passes_, render_passes)) {
- inst_.sampling.reset();
+ sampling.reset();
}
}
{
@@ -224,14 +228,18 @@ void Film::init(const int2 &extent, const rcti *output_rect)
FilmData data = data_;
data.extent = int2(BLI_rcti_size_x(output_rect), BLI_rcti_size_y(output_rect));
data.offset = int2(output_rect->xmin, output_rect->ymin);
- data.filter_size = clamp_f(inst_.scene->r.gauss, 0.0f, 100.0f);
+ data.extent_inv = 1.0f / float2(data.extent);
+ /* Disable filtering if sample count is 1. */
+ data.filter_size = (sampling.sample_count() == 1) ?
+ 0.0f :
+ clamp_f(inst_.scene->r.gauss, 0.0f, 100.0f);
/* TODO(fclem): parameter hidden in experimental.
* We need to figure out LOD bias first in order to preserve texture crispiness. */
data.scaling_factor = 1;
FilmData &data_prev_ = data_;
if (assign_if_different(data_prev_, data)) {
- inst_.sampling.reset();
+ sampling.reset();
}
const eViewLayerEEVEEPassType data_passes = EEVEE_RENDER_PASS_Z | EEVEE_RENDER_PASS_NORMAL |
@@ -325,7 +333,7 @@ void Film::init(const int2 &extent, const rcti *output_rect)
(data_.value_len > 0) ? data_.value_len : 1);
if (reset > 0) {
- inst_.sampling.reset();
+ sampling.reset();
data_.use_history = 0;
data_.use_reprojection = 0;
@@ -349,12 +357,22 @@ void Film::sync()
/* TODO(fclem): Shader variation for panoramic & scaled resolution. */
RenderBuffers &rbuffers = inst_.render_buffers;
+ VelocityModule &velocity = inst_.velocity;
+
+ eGPUSamplerState filter = GPU_SAMPLER_FILTER;
+
+ /* For viewport, only previous motion is supported.
+ * Still bind previous step to avoid undefined behavior. */
+ eVelocityStep step_next = inst_.is_viewport() ? STEP_PREVIOUS : STEP_NEXT;
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS;
accumulate_ps_ = DRW_pass_create("Film.Accumulate", state);
GPUShader *sh = inst_.shaders.static_shader_get(shader);
DRWShadingGroup *grp = DRW_shgroup_create(sh, accumulate_ps_);
DRW_shgroup_uniform_block_ref(grp, "film_buf", &data_);
+ DRW_shgroup_uniform_block_ref(grp, "camera_prev", &(*velocity.camera_steps[STEP_PREVIOUS]));
+ DRW_shgroup_uniform_block_ref(grp, "camera_curr", &(*velocity.camera_steps[STEP_CURRENT]));
+ DRW_shgroup_uniform_block_ref(grp, "camera_next", &(*velocity.camera_steps[step_next]));
DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &rbuffers.depth_tx);
DRW_shgroup_uniform_texture_ref(grp, "combined_tx", &rbuffers.combined_tx);
DRW_shgroup_uniform_texture_ref(grp, "normal_tx", &rbuffers.normal_tx);
@@ -375,7 +393,7 @@ void Film::sync()
* use image binding instead. */
DRW_shgroup_uniform_image_ref(grp, "in_weight_img", &weight_tx_.current());
DRW_shgroup_uniform_image_ref(grp, "out_weight_img", &weight_tx_.next());
- DRW_shgroup_uniform_image_ref(grp, "in_combined_img", &combined_tx_.current());
+ DRW_shgroup_uniform_texture_ref_ex(grp, "in_combined_tx", &combined_tx_.current(), filter);
DRW_shgroup_uniform_image_ref(grp, "out_combined_img", &combined_tx_.next());
DRW_shgroup_uniform_image_ref(grp, "depth_img", &depth_tx_);
DRW_shgroup_uniform_image_ref(grp, "color_accum_img", &color_accum_tx_);
@@ -395,13 +413,7 @@ void Film::sync()
void Film::end_sync()
{
- if (inst_.sampling.is_reset()) {
- data_.use_history = 0;
- }
-
- // if (camera.changed_type) {
- // data_.use_reprojection = false;
- // }
+ data_.use_reprojection = inst_.sampling.interactive_mode();
aovs_info.push_update();
@@ -429,6 +441,11 @@ float2 Film::pixel_jitter_get() const
return jitter;
}
+eViewLayerEEVEEPassType Film::enabled_passes_get() const
+{
+ return enabled_passes_;
+}
+
void Film::update_sample_table()
{
data_.subpixel_offset = pixel_jitter_get();
@@ -528,7 +545,6 @@ void Film::accumulate(const DRWView *view)
/* Use history after first sample. */
if (data_.use_history == 0) {
data_.use_history = 1;
- data_.use_reprojection = 1;
}
}
diff --git a/source/blender/draw/engines/eevee_next/eevee_film.hh b/source/blender/draw/engines/eevee_next/eevee_film.hh
index 7ffbd4e45c6..c8ffa0e62c9 100644
--- a/source/blender/draw/engines/eevee_next/eevee_film.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_film.hh
@@ -8,6 +8,12 @@
* The film class handles accumulation of samples with any distorted camera_type
* using a pixel filter. Inputs needs to be jittered so that the filter converges to the right
* result.
+ *
+ * In viewport, we switch between 2 accumulation mode depending on the scene state.
+ * - For static scene, we use a classic weighted accumulation.
+ * - For dynamic scene (if an update is detected), we use a more temporally stable accumulation
+ * following the Temporal Anti-Aliasing method (a.k.a. Temporal Super-Sampling). This does
+ * history reprojection and rectification to avoid most of the flickering.
*/
#pragma once
@@ -75,10 +81,7 @@ class Film {
float2 pixel_jitter_get() const;
- eViewLayerEEVEEPassType enabled_passes_get() const
- {
- return enabled_passes_;
- }
+ eViewLayerEEVEEPassType enabled_passes_get() const;
static bool pass_is_value(eViewLayerEEVEEPassType pass_type)
{
diff --git a/source/blender/draw/engines/eevee_next/eevee_instance.cc b/source/blender/draw/engines/eevee_next/eevee_instance.cc
index 6098d78b81c..9f8cf6dc6ba 100644
--- a/source/blender/draw/engines/eevee_next/eevee_instance.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_instance.cc
@@ -199,7 +199,7 @@ void Instance::render_sync()
**/
void Instance::render_sample()
{
- if (sampling.finished()) {
+ if (sampling.finished_viewport()) {
film.display();
return;
}
diff --git a/source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc b/source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc
index 6e30ba989df..c60054496c1 100644
--- a/source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc
@@ -56,8 +56,13 @@ void RenderBuffers::acquire(int2 extent, void *owner)
depth_tx.acquire(extent, GPU_DEPTH24_STENCIL8, owner);
combined_tx.acquire(extent, color_format, owner);
+ bool do_vector_render_pass = inst_.film.enabled_passes_get() & EEVEE_RENDER_PASS_VECTOR;
+ /* Only RG16F when only doing only reprojection or motion blur. */
+ eGPUTextureFormat vector_format = do_vector_render_pass ? GPU_RGBA16F : GPU_RG16F;
+ /* TODO(fclem): Make vector pass allocation optional if no TAA or motion blur is needed. */
+ vector_tx.acquire(extent, vector_format, owner);
+
normal_tx.acquire(pass_extent(EEVEE_RENDER_PASS_NORMAL), color_format, owner);
- vector_tx.acquire(pass_extent(EEVEE_RENDER_PASS_VECTOR), color_format, owner);
diffuse_light_tx.acquire(pass_extent(EEVEE_RENDER_PASS_DIFFUSE_LIGHT), color_format, owner);
diffuse_color_tx.acquire(pass_extent(EEVEE_RENDER_PASS_DIFFUSE_COLOR), color_format, owner);
specular_light_tx.acquire(pass_extent(EEVEE_RENDER_PASS_SPECULAR_LIGHT), color_format, owner);
diff --git a/source/blender/draw/engines/eevee_next/eevee_sampling.cc b/source/blender/draw/engines/eevee_next/eevee_sampling.cc
index 2f180e58a0b..ef2469647ef 100644
--- a/source/blender/draw/engines/eevee_next/eevee_sampling.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_sampling.cc
@@ -57,22 +57,21 @@ void Sampling::end_sync()
{
if (reset_) {
viewport_sample_ = 0;
- if (inst_.is_viewport()) {
- interactive_mode_ = true;
- }
}
- if (interactive_mode_) {
- int interactive_sample_count = min_ii(interactive_sample_max_, sample_count_);
+ if (inst_.is_viewport()) {
+ interactive_mode_ = viewport_sample_ < interactive_mode_threshold;
+ if (interactive_mode_) {
+ int interactive_sample_count = min_ii(interactive_sample_max_, sample_count_);
- if (viewport_sample_ < interactive_sample_count) {
- /* Loop over the same starting samples. */
- sample_ = sample_ % interactive_sample_count;
- }
- else {
- /* Break out of the loop and resume normal pattern. */
- sample_ = interactive_sample_count;
- interactive_mode_ = false;
+ if (viewport_sample_ < interactive_sample_count) {
+ /* Loop over the same starting samples. */
+ sample_ = sample_ % interactive_sample_count;
+ }
+ else {
+ /* Break out of the loop and resume normal pattern. */
+ sample_ = interactive_sample_count;
+ }
}
}
}
@@ -138,8 +137,6 @@ void Sampling::step()
viewport_sample_++;
sample_++;
- std::cout << sample_ << " " << viewport_sample_ << std::endl;
-
reset_ = false;
}
@@ -218,7 +215,7 @@ void Sampling::dof_disk_sample_get(float *r_radius, float *r_theta) const
/** \} */
/* -------------------------------------------------------------------- */
-/** \name Sampling patterns
+/** \name Cumulative Distribution Function (CDF)
* \{ */
/* Creates a discrete cumulative distribution function table from a given curvemapping.
diff --git a/source/blender/draw/engines/eevee_next/eevee_sampling.hh b/source/blender/draw/engines/eevee_next/eevee_sampling.hh
index 11daa21629a..c604ecef40b 100644
--- a/source/blender/draw/engines/eevee_next/eevee_sampling.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_sampling.hh
@@ -33,7 +33,7 @@ class Sampling {
/* During interactive rendering, loop over the first few samples. */
constexpr static uint64_t interactive_sample_max_ = 8;
- /** 0 based current sample. */
+ /** 0 based current sample. Might not increase sequentially in viewport. */
uint64_t sample_ = 0;
/** Target sample count. */
uint64_t sample_count_ = 64;
@@ -43,7 +43,7 @@ class Sampling {
uint64_t dof_sample_count_ = 1;
/** Motion blur steps. */
uint64_t motion_blur_steps_ = 1;
- /** Increases if the view and the scene is static. */
+ /** Increases if the view and the scene is static. Does increase sequentially. */
int64_t viewport_sample_ = 0;
/** Tag to reset sampling for the next sample. */
bool reset_ = false;
@@ -52,6 +52,12 @@ class Sampling {
* In interactive mode, image stability is prioritized over quality.
*/
bool interactive_mode_ = false;
+ /**
+ * Sample count after which we use the static accumulation.
+ * Interactive sampling from sample 0 to (interactive_mode_threshold - 1).
+ * Accumulation sampling from sample interactive_mode_threshold to sample_count_.
+ */
+ static constexpr int interactive_mode_threshold = 3;
SamplingDataBuf data_;
@@ -102,13 +108,24 @@ class Sampling {
/* Returns true if rendering has finished. */
bool finished() const
{
- return (sample_ >= sample_count_ - 1);
+ return (sample_ >= sample_count_);
}
/* Returns true if viewport smoothing and sampling has finished. */
bool finished_viewport() const
{
- return finished() && (viewport_sample_ >= interactive_sample_max_);
+ return (viewport_sample_ >= sample_count_) && !interactive_mode_;
+ }
+
+ /* Returns true if viewport renderer is in interactive mode and should use TAA. */
+ bool interactive_mode() const
+ {
+ return interactive_mode_;
+ }
+
+ uint64_t sample_count() const
+ {
+ return sample_count_;
}
/* Return true if we are starting a new motion blur step. We need to run sync again since
diff --git a/source/blender/draw/engines/eevee_next/eevee_shader.cc b/source/blender/draw/engines/eevee_next/eevee_shader.cc
index f5d4af2914e..7db9692783a 100644
--- a/source/blender/draw/engines/eevee_next/eevee_shader.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_shader.cc
@@ -82,8 +82,6 @@ const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_
return "eevee_film_frag";
case FILM_COMP:
return "eevee_film_comp";
- case VELOCITY_RESOLVE:
- return "eevee_velocity_resolve";
/* To avoid compiler warning about missing case. */
case MAX_SHADER_TYPE:
return "";
diff --git a/source/blender/draw/engines/eevee_next/eevee_shader.hh b/source/blender/draw/engines/eevee_next/eevee_shader.hh
index 7a0867820af..280aaab4e1c 100644
--- a/source/blender/draw/engines/eevee_next/eevee_shader.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_shader.hh
@@ -29,8 +29,6 @@ enum eShaderType {
FILM_FRAG = 0,
FILM_COMP,
- VELOCITY_RESOLVE,
-
MAX_SHADER_TYPE,
};
diff --git a/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh b/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh
index 62eb5a2b965..9cf7f75b2c3 100644
--- a/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh
@@ -124,7 +124,12 @@ struct CameraData {
float clip_far;
eCameraType type;
- int _pad0;
+ bool initialized;
+
+#ifdef __cplusplus
+ /* Small constructor to allow detecting new buffers. */
+ CameraData() : initialized(false){};
+#endif
};
BLI_STATIC_ASSERT_ALIGN(CameraData, 16)
@@ -156,6 +161,8 @@ struct FilmData {
* pixel if using scaled resolution rendering.
*/
float2 subpixel_offset;
+ /** Scaling factor to convert texel to uvs. */
+ float2 extent_inv;
/** Is true if history is valid and can be sampled. Bypass history to resets accumulation. */
bool1 use_history;
/** Is true if combined buffer is valid and can be re-projected to reduce variance. */
@@ -165,7 +172,6 @@ struct FilmData {
/** Is true if accumulation of filtered passes is needed. */
bool1 any_render_pass_1;
bool1 any_render_pass_2;
- int _pad0, _pad1;
/** Output counts per type. */
int color_len, value_len;
/** Index in color_accum_img or value_accum_img of each pass. -1 if pass is not enabled. */
diff --git a/source/blender/draw/engines/eevee_next/eevee_velocity.cc b/source/blender/draw/engines/eevee_next/eevee_velocity.cc
index 4bd0af8204e..048daf1b2db 100644
--- a/source/blender/draw/engines/eevee_next/eevee_velocity.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_velocity.cc
@@ -9,10 +9,6 @@
* temporal re-projection or motion blur.
*
* It is the module that tracks the objects between frames updates.
- *
- * #VelocityModule contains all motion steps data and logic.
- * #VelocityPass contains the resolve pass for static geometry.
- * #VelocityView is a per view instance that contain the velocity buffer.
*/
#include "BKE_duplilist.h"
@@ -36,8 +32,7 @@ namespace blender::eevee {
void VelocityModule::init()
{
-#if 0 /* TODO renderpasses */
- if (inst_.render && (inst_.render_passes.vector != nullptr)) {
+ if (inst_.render && (inst_.film.enabled_passes_get() & EEVEE_RENDER_PASS_VECTOR)) {
/* No motion blur and the vector pass was requested. Do the step sync here. */
const Scene *scene = inst_.scene;
float initial_time = scene->r.cfra + scene->r.subframe;
@@ -45,7 +40,6 @@ void VelocityModule::init()
step_sync(STEP_NEXT, initial_time + 1.0f);
inst_.set_time(initial_time);
}
-#endif
}
static void step_object_sync_render(void *velocity,
@@ -70,6 +64,11 @@ void VelocityModule::step_camera_sync()
{
inst_.camera.sync();
*camera_steps[step_] = inst_.camera.data_get();
+ /* Fix undefined camera steps when rendering is starting. */
+ if ((step_ == STEP_CURRENT) && (camera_steps[STEP_PREVIOUS]->initialized == false)) {
+ *camera_steps[STEP_PREVIOUS] = *static_cast<CameraData *>(camera_steps[step_]);
+ camera_steps[STEP_PREVIOUS]->initialized = true;
+ }
}
bool VelocityModule::step_object_sync(Object *ob,
@@ -267,6 +266,10 @@ void VelocityModule::end_sync()
inst_.sampling.reset();
}
+ if (inst_.is_viewport() && camera_has_motion()) {
+ inst_.sampling.reset();
+ }
+
for (auto key : deleted_obj) {
velocity_map.remove(key);
}
@@ -300,19 +303,6 @@ void VelocityModule::end_sync()
camera_steps[STEP_CURRENT]->push_update();
camera_steps[STEP_NEXT]->push_update();
indirection_buf.push_update();
-
- {
- resolve_ps_ = DRW_pass_create("Velocity.Resolve", (DRWState)0);
- GPUShader *sh = inst_.shaders.static_shader_get(VELOCITY_RESOLVE);
- DRWShadingGroup *grp = DRW_shgroup_create(sh, resolve_ps_);
- DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &input_depth_tx_);
- DRW_shgroup_uniform_image_ref(grp, "velocity_view_img", &velocity_view_tx_);
- DRW_shgroup_uniform_image_ref(grp, "velocity_camera_img", &velocity_camera_tx_);
- DRW_shgroup_uniform_block(grp, "camera_prev", *camera_steps[STEP_PREVIOUS]);
- DRW_shgroup_uniform_block(grp, "camera_curr", *camera_steps[STEP_CURRENT]);
- DRW_shgroup_uniform_block(grp, "camera_next", *camera_steps[STEP_NEXT]);
- DRW_shgroup_call_compute_ref(grp, resolve_dispatch_size_);
- }
}
bool VelocityModule::object_has_velocity(const Object *ob)
@@ -359,60 +349,15 @@ void VelocityModule::bind_resources(DRWShadingGroup *grp)
DRW_shgroup_storage_block_ref(grp, "velocity_indirection_buf", &indirection_buf);
}
-/* Resolve pass for static geometry and to camera space projection. */
-void VelocityModule::resolve_camera_motion(GPUTexture *depth_tx,
- GPUTexture *velocity_view_tx,
- GPUTexture *velocity_camera_tx)
+bool VelocityModule::camera_has_motion() const
{
- input_depth_tx_ = depth_tx;
- velocity_view_tx_ = velocity_view_tx;
- velocity_camera_tx_ = velocity_camera_tx;
-
- resolve_dispatch_size_.x = divide_ceil_u(GPU_texture_width(depth_tx), 8);
- resolve_dispatch_size_.y = divide_ceil_u(GPU_texture_height(depth_tx), 8);
-
- DRW_draw_pass(resolve_ps_);
-}
-
-/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name Velocity View
- * \{ */
-
-void VelocityView::sync()
-{
- /* TODO: Remove. */
- velocity_view_tx_.sync();
- velocity_camera_tx_.sync();
-}
-
-void VelocityView::acquire(int2 extent)
-{
- /* WORKAROUND: View name should be unique and static.
- * With this, we can reuse the same texture across views. */
- DrawEngineType *owner = (DrawEngineType *)view_name_.c_str();
-
- /* Only RG16F when only doing only reprojection or motion blur. */
- eGPUTextureFormat format = inst_.is_viewport() ? GPU_RG16F : GPU_RGBA16F;
- velocity_view_tx_.acquire(extent, format, owner);
- if (false /* TODO(fclem): Panoramic camera. */) {
- velocity_camera_tx_.acquire(extent, format, owner);
- }
- else {
- velocity_camera_tx_.acquire(int2(1), format, owner);
+ /* Only valid after sync. */
+ if (inst_.is_viewport()) {
+ /* Viewport has no next step. */
+ return *camera_steps[STEP_PREVIOUS] != *camera_steps[STEP_CURRENT];
}
-}
-
-void VelocityView::resolve(GPUTexture *depth_tx)
-{
- inst_.velocity.resolve_camera_motion(depth_tx, velocity_view_tx_, velocity_camera_tx_);
-}
-
-void VelocityView::release()
-{
- velocity_view_tx_.release();
- velocity_camera_tx_.release();
+ return *camera_steps[STEP_PREVIOUS] != *camera_steps[STEP_CURRENT] &&
+ *camera_steps[STEP_NEXT] != *camera_steps[STEP_CURRENT];
}
/** \} */
diff --git a/source/blender/draw/engines/eevee_next/eevee_velocity.hh b/source/blender/draw/engines/eevee_next/eevee_velocity.hh
index e2606c061e1..826cd631a96 100644
--- a/source/blender/draw/engines/eevee_next/eevee_velocity.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_velocity.hh
@@ -27,8 +27,6 @@ namespace blender::eevee {
/** Container for scene velocity data. */
class VelocityModule {
- friend class VelocityView;
-
public:
struct VelocityObjectData : public VelocityIndex {
/** ID to retrieve the corresponding #VelocityGeometryData after copy. */
@@ -69,15 +67,6 @@ class VelocityModule {
eVelocityStep step_ = STEP_CURRENT;
- DRWPass *resolve_ps_ = nullptr;
-
- /** Reference only. Not owned. */
- GPUTexture *input_depth_tx_;
- GPUTexture *velocity_view_tx_;
- GPUTexture *velocity_camera_tx_;
-
- int3 resolve_dispatch_size_ = int3(1, 1, 1);
-
public:
VelocityModule(Instance &inst) : inst_(inst)
{
@@ -89,6 +78,7 @@ class VelocityModule {
}
for (CameraDataBuf *&step_buf : camera_steps) {
step_buf = new CameraDataBuf();
+ /* */
}
};
@@ -121,56 +111,11 @@ class VelocityModule {
void bind_resources(DRWShadingGroup *grp);
+ bool camera_has_motion() const;
+
private:
bool object_has_velocity(const Object *ob);
bool object_is_deform(const Object *ob);
-
- void resolve_camera_motion(GPUTexture *depth_tx,
- GPUTexture *velocity_view_tx,
- GPUTexture *velocity_camera_tx);
-};
-
-/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name Velocity
- *
- * \{ */
-
-/**
- * Per view module.
- */
-class VelocityView {
- private:
- Instance &inst_;
-
- StringRefNull view_name_;
-
- TextureFromPool velocity_camera_tx_ = {"velocity_camera_tx_"};
- TextureFromPool velocity_view_tx_ = {"velocity_view_tx_"};
-
- public:
- VelocityView(Instance &inst, const char *name) : inst_(inst), view_name_(name){};
- ~VelocityView(){};
-
- void sync();
-
- void acquire(int2 extent);
- void release();
-
- void resolve(GPUTexture *depth_tx);
-
- /**
- * Getters
- **/
- GPUTexture *view_vectors_get() const
- {
- return velocity_view_tx_;
- }
- GPUTexture *camera_vectors_get() const
- {
- return (velocity_camera_tx_.is_valid()) ? velocity_camera_tx_ : velocity_view_tx_;
- }
};
/** \} */
diff --git a/source/blender/draw/engines/eevee_next/eevee_view.cc b/source/blender/draw/engines/eevee_next/eevee_view.cc
index f4dba47721d..1a222dc4ebd 100644
--- a/source/blender/draw/engines/eevee_next/eevee_view.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_view.cc
@@ -80,7 +80,6 @@ void ShadingView::sync()
// dof_.sync(winmat_p, extent_);
// mb_.sync(extent_);
- velocity_.sync();
// rt_buffer_opaque_.sync(extent_);
// rt_buffer_refract_.sync(extent_);
// inst_.hiz_back.view_sync(extent_);
@@ -103,18 +102,20 @@ void ShadingView::render()
RenderBuffers &rbufs = inst_.render_buffers;
rbufs.acquire(extent_, owner);
- velocity_.acquire(extent_);
combined_fb_.ensure(GPU_ATTACHMENT_TEXTURE(rbufs.depth_tx),
GPU_ATTACHMENT_TEXTURE(rbufs.combined_tx));
prepass_fb_.ensure(GPU_ATTACHMENT_TEXTURE(rbufs.depth_tx),
- GPU_ATTACHMENT_TEXTURE(velocity_.view_vectors_get()));
+ GPU_ATTACHMENT_TEXTURE(rbufs.vector_tx));
update_view();
DRW_stats_group_start(name_);
DRW_view_set_active(render_view_);
- float4 clear_velocity(VELOCITY_INVALID);
+ /* If camera has any motion, compute motion vector in the film pass. Otherwise, we avoid float
+ * precision issue by setting the motion of all static geometry to 0. */
+ float4 clear_velocity = float4(inst_.velocity.camera_has_motion() ? VELOCITY_INVALID : 0.0f);
+
GPU_framebuffer_bind(prepass_fb_);
GPU_framebuffer_clear_color(prepass_fb_, clear_velocity);
/* Alpha stores transmittance. So start at 1. */
@@ -137,18 +138,14 @@ void ShadingView::render()
// inst_.lights.debug_draw(view_fb_);
// inst_.shadows.debug_draw(view_fb_);
- velocity_.resolve(rbufs.depth_tx);
-
// GPUTexture *final_radiance_tx = render_post(combined_tx_);
inst_.film.accumulate(sub_view_);
rbufs.release();
+ postfx_tx_.release();
DRW_stats_group_end();
-
- postfx_tx_.release();
- velocity_.release();
}
GPUTexture *ShadingView::render_post(GPUTexture *input_tx)
diff --git a/source/blender/draw/engines/eevee_next/eevee_view.hh b/source/blender/draw/engines/eevee_next/eevee_view.hh
index 30e06df9716..c6faebdd0e5 100644
--- a/source/blender/draw/engines/eevee_next/eevee_view.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_view.hh
@@ -44,7 +44,6 @@ class ShadingView {
/** Post-FX modules. */
// DepthOfField dof_;
// MotionBlur mb_;
- VelocityView velocity_;
/** Raytracing persistent buffers. Only opaque and refraction can have surface tracing. */
// RaytraceBuffer rt_buffer_opaque_;
@@ -69,7 +68,7 @@ class ShadingView {
public:
ShadingView(Instance &inst, const char *name, const float (*face_matrix)[4])
- : inst_(inst), name_(name), face_matrix_(face_matrix), velocity_(inst, name){};
+ : inst_(inst), name_(name), face_matrix_(face_matrix){};
~ShadingView(){};
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl
index f79e9102d76..2611f714b59 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl
@@ -143,24 +143,10 @@ vec2 camera_uv_from_view(CameraData cam, vec3 vV)
}
}
-vec2 camera_uv_from_world(CameraData cam, vec3 V)
+vec2 camera_uv_from_world(CameraData cam, vec3 P)
{
- vec3 vV = transform_point(cam.viewmat, V);
- switch (cam.type) {
- default:
- case CAMERA_ORTHO:
- return camera_uv_from_view(cam.persmat, false, V);
- case CAMERA_PERSP:
- return camera_uv_from_view(cam.persmat, true, V);
- case CAMERA_PANO_EQUIRECT:
- return camera_equirectangular_from_direction(cam, vV);
- case CAMERA_PANO_EQUISOLID:
- /* ATTR_FALLTHROUGH; */
- case CAMERA_PANO_EQUIDISTANT:
- return camera_fisheye_from_direction(cam, vV);
- case CAMERA_PANO_MIRROR:
- return camera_mirror_ball_from_direction(cam, vV);
- }
+ vec3 vV = transform_direction(cam.viewmat, normalize(P));
+ return camera_uv_from_view(cam, vV);
}
/** \} */
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl
index 6716c0f126e..454c835673b 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl
@@ -11,7 +11,7 @@ void main()
out_depth = imageLoad(depth_img, texel_film).r;
if (film_buf.display_id == -1) {
- out_color = imageLoad(in_combined_img, texel_film);
+ out_color = texelFetch(in_combined_tx, texel_film, 0);
}
else if (film_buf.display_is_value) {
out_color.rgb = imageLoad(value_accum_img, ivec3(texel_film, film_buf.display_id)).rrr;
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl
index 03af34f27ef..1bafa26924e 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl
@@ -4,7 +4,9 @@
**/
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
+#pragma BLENDER_REQUIRE(common_math_geom_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_camera_lib.glsl)
+#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
/* Return scene linear Z depth from the camera or radial depth for panoramic cameras. */
float film_depth_convert_to_scene(float depth)
@@ -16,6 +18,54 @@ float film_depth_convert_to_scene(float depth)
return abs(get_view_z_from_depth(depth));
}
+vec3 film_YCoCg_from_scene_linear(vec3 rgb_color)
+{
+ const mat3 colorspace_tx = transpose(mat3(vec3(1, 2, 1), /* Y */
+ vec3(2, 0, -2), /* Co */
+ vec3(-1, 2, -1))); /* Cg */
+ return colorspace_tx * rgb_color;
+}
+
+vec4 film_YCoCg_from_scene_linear(vec4 rgba_color)
+{
+ return vec4(film_YCoCg_from_scene_linear(rgba_color.rgb), rgba_color.a);
+}
+
+vec3 film_scene_linear_from_YCoCg(vec3 ycocg_color)
+{
+ float Y = ycocg_color.x;
+ float Co = ycocg_color.y;
+ float Cg = ycocg_color.z;
+
+ vec3 rgb_color;
+ rgb_color.r = Y + Co - Cg;
+ rgb_color.g = Y + Cg;
+ rgb_color.b = Y - Co - Cg;
+ return rgb_color * 0.25;
+}
+
+/* Load a texture sample in a specific format. Combined pass needs to use this. */
+vec4 film_texelfetch_as_YCoCg_opacity(sampler2D tx, ivec2 texel)
+{
+ vec4 color = texelFetch(combined_tx, texel, 0);
+ /* Can we assume safe color from earlier pass? */
+ // color = safe_color(color);
+ /* Convert transmittance to opacity. */
+ color.a = saturate(1.0 - color.a);
+ /* Transform to YCoCg for accumulation. */
+ color.rgb = film_YCoCg_from_scene_linear(color.rgb);
+ return color;
+}
+
+/* Returns a weight based on Luma to reduce the flickering introduced by high energy pixels. */
+float film_luma_weight(float luma)
+{
+ /* Slide 20 of "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014. */
+ /* To preserve more details in dark areas, we use a bigger bias. */
+ /* TODO(fclem): exposure weighting. */
+ return 1.0 / (4.0 + luma);
+}
+
/* -------------------------------------------------------------------- */
/** \name Filter
* \{ */
@@ -116,18 +166,18 @@ void film_sample_accum_mist(FilmSample samp, inout float accum)
accum += mist * samp.weight;
}
-void film_sample_accum_combined(FilmSample samp, inout vec4 accum)
+void film_sample_accum_combined(FilmSample samp, inout vec4 accum, inout float weight_accum)
{
if (film_buf.combined_id == -1) {
return;
}
- vec4 color = texelFetch(combined_tx, samp.texel, 0);
- /* Convert transmittance to opacity. */
- color.a = saturate(1.0 - color.a);
- /* TODO(fclem) Pre-expose. */
- color.rgb = log2(1.0 + color.rgb);
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, samp.texel);
+
+ /* Weight by luma to remove fireflies. */
+ float weight = film_luma_weight(color.x) * samp.weight;
- accum += color * samp.weight;
+ accum += color * weight;
+ weight_accum += weight;
}
/** \} */
@@ -156,46 +206,281 @@ float film_weight_load(ivec2 texel)
/* Repeat texture coordinates as the weight can be optimized to a small portion of the film. */
texel = texel % imageSize(in_weight_img).xy;
- if (film_buf.use_history == false) {
+ if (!film_buf.use_history || film_buf.use_reprojection) {
return 0.0;
}
return imageLoad(in_weight_img, ivec3(texel, WEIGHT_lAYER_ACCUMULATION)).x;
}
-/* Return the motion in pixels. */
-void film_motion_load()
+/* Returns motion in pixel space to retrieve the pixel history. */
+vec2 film_pixel_history_motion_vector(ivec2 texel_sample)
+{
+ /**
+ * Dilate velocity by using the nearest pixel in a cross pattern.
+ * "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014 (Slide 27)
+ */
+ const ivec2 corners[4] = ivec2[4](ivec2(-2, -2), ivec2(2, -2), ivec2(-2, 2), ivec2(2, 2));
+ float min_depth = texelFetch(depth_tx, texel_sample, 0).x;
+ ivec2 nearest_texel = texel_sample;
+ for (int i = 0; i < 4; i++) {
+ ivec2 texel = clamp(texel_sample + corners[i], ivec2(0), textureSize(depth_tx, 0).xy);
+ float depth = texelFetch(depth_tx, texel, 0).x;
+ if (min_depth > depth) {
+ min_depth = depth;
+ nearest_texel = texel;
+ }
+ }
+
+ vec4 vector = velocity_resolve(vector_tx, nearest_texel, min_depth);
+
+ /* Transform to pixel space. */
+ vector.xy *= vec2(film_buf.extent);
+
+ return vector.xy;
+}
+
+/* \a t is inter-pixel position. 0 means perfectly on a pixel center.
+ * Returns weights in both dimensions.
+ * Multiply each dimension weights to get final pixel weights. */
+void film_get_catmull_rom_weights(vec2 t, out vec2 weights[4])
{
- // ivec2 texel_sample = film_sample_get(0, texel_film, distance_sample);
- // vec4 vector = texelFetch(vector_tx, texel_sample);
+ vec2 t2 = t * t;
+ vec2 t3 = t2 * t;
+ float fc = 0.5; /* Catmull-Rom. */
+
+ vec2 fct = t * fc;
+ vec2 fct2 = t2 * fc;
+ vec2 fct3 = t3 * fc;
+ weights[0] = (fct2 * 2.0 - fct3) - fct;
+ weights[1] = (t3 * 2.0 - fct3) + (-t2 * 3.0 + fct2) + 1.0;
+ weights[2] = (-t3 * 2.0 + fct3) + (t2 * 3.0 - (2.0 * fct2)) + fct;
+ weights[3] = fct3 - fct2;
+}
- // vector.xy *= film_buf.extent;
+/* Load color using a special filter to avoid loosing detail.
+ * \a texel is sample position with subpixel accuracy. */
+vec4 film_sample_catmull_rom(sampler2D color_tx, vec2 input_texel)
+{
+ vec2 center_texel;
+ vec2 inter_texel = modf(input_texel, center_texel);
+ vec2 weights[4];
+ film_get_catmull_rom_weights(inter_texel, weights);
+
+#if 0 /* Reference. 16 Taps. */
+ vec4 color = vec4(0.0);
+ for (int y = 0; y < 4; y++) {
+ for (int x = 0; x < 4; x++) {
+ ivec2 texel = ivec2(center_texel) + ivec2(x, y) - 1;
+ texel = clamp(texel, ivec2(0), textureSize(color_tx, 0).xy - 1);
+ color += texelFetch(color_tx, texel, 0) * weights[x].x * weights[y].y;
+ }
+ }
+ return color;
+
+#elif 1 /* Optimize version. 5 Bilinear Taps. */
+ /**
+ * Use optimized version by leveraging bilinear filtering from hardware sampler and by removing
+ * corner taps.
+ * From "Filmic SMAA" by Jorge Jimenez at Siggraph 2016
+ * http://advances.realtimerendering.com/s2016/Filmic%20SMAA%20v7.pptx
+ */
+ center_texel += 0.5;
+
+ /* Slide 92. */
+ vec2 weight_12 = weights[1] + weights[2];
+ vec2 uv_12 = (center_texel + weights[2] / weight_12) * film_buf.extent_inv;
+ vec2 uv_0 = (center_texel - 1.0) * film_buf.extent_inv;
+ vec2 uv_3 = (center_texel + 2.0) * film_buf.extent_inv;
+
+ vec4 color;
+ vec4 weight_cross = weight_12.xyyx * vec4(weights[0].yx, weights[3].xy);
+ float weight_center = weight_12.x * weight_12.y;
+
+ color = textureLod(color_tx, uv_12, 0.0) * weight_center;
+ color += textureLod(color_tx, vec2(uv_12.x, uv_0.y), 0.0) * weight_cross.x;
+ color += textureLod(color_tx, vec2(uv_0.x, uv_12.y), 0.0) * weight_cross.y;
+ color += textureLod(color_tx, vec2(uv_3.x, uv_12.y), 0.0) * weight_cross.z;
+ color += textureLod(color_tx, vec2(uv_12.x, uv_3.y), 0.0) * weight_cross.w;
+ /* Re-normalize for the removed corners. */
+ return color / (weight_center + sum(weight_cross));
+
+#else /* Nearest interpolation for debugging. 1 Tap. */
+ ivec2 texel = ivec2(center_texel) + ivec2(greaterThan(inter_texel, vec2(0.5)));
+ texel = clamp(texel, ivec2(0), textureSize(color_tx, 0).xy - 1);
+ return texelFetch(color_tx, texel, 0);
+#endif
+}
+
+/* Return history clipping bounding box in YCoCg color space. */
+void film_combined_neighbor_boundbox(ivec2 texel, out vec4 min_c, out vec4 max_c)
+{
+ /* Plus (+) shape offsets. */
+ const ivec2 plus_offsets[5] = ivec2[5](ivec2(0, 0), /* Center */
+ ivec2(-1, 0),
+ ivec2(0, -1),
+ ivec2(1, 0),
+ ivec2(0, 1));
+#if 0
+ /**
+ * Compute Variance of neighborhood as described in:
+ * "An Excursion in Temporal Supersampling" by Marco Salvi at GDC 2016.
+ * and:
+ * "A Survey of Temporal Antialiasing Techniques" by Yang et al.
+ */
+
+ /* First 2 moments. */
+ vec4 mu1 = vec4(0), mu2 = vec4(0);
+ for (int i = 0; i < 5; i++) {
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, texel + plus_offsets[i]);
+ mu1 += color;
+ mu2 += sqr(color);
+ }
+ mu1 *= (1.0 / 5.0);
+ mu2 *= (1.0 / 5.0);
+
+ /* Extent scaling. Range [0.75..1.25].
+ * Balance between more flickering (0.75) or more ghosting (1.25). */
+ const float gamma = 1.25;
+ /* Standard deviation. */
+ vec4 sigma = sqrt(abs(mu2 - sqr(mu1)));
+ /* eq. 6 in "A Survey of Temporal Antialiasing Techniques". */
+ min_c = mu1 - gamma * sigma;
+ max_c = mu1 + gamma * sigma;
+#else
+ /**
+ * Simple bounding box calculation in YCoCg as described in:
+ * "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014
+ */
+ min_c = vec4(1e16);
+ max_c = vec4(-1e16);
+ for (int i = 0; i < 5; i++) {
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, texel + plus_offsets[i]);
+ min_c = min(min_c, color);
+ max_c = max(max_c, color);
+ }
+ /* (Slide 32) Simple clamp to min/max of 8 neighbors results in 3x3 box artifacts.
+ * Round bbox shape by averaging 2 different min/max from 2 different neighborhood. */
+ vec4 min_c_3x3 = min_c;
+ vec4 max_c_3x3 = max_c;
+ const ivec2 corners[4] = ivec2[4](ivec2(-1, -1), ivec2(1, -1), ivec2(-1, 1), ivec2(1, 1));
+ for (int i = 0; i < 4; i++) {
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, texel + corners[i]);
+ min_c_3x3 = min(min_c_3x3, color);
+ max_c_3x3 = max(max_c_3x3, color);
+ }
+ min_c = (min_c + min_c_3x3) * 0.5;
+ max_c = (max_c + max_c_3x3) * 0.5;
+#endif
+}
+
+/* 1D equivalent of line_aabb_clipping_dist(). */
+float film_aabb_clipping_dist_alpha(float origin, float direction, float aabb_min, float aabb_max)
+{
+ if (abs(direction) < 1e-5) {
+ return 0.0;
+ }
+ float nearest_plane = (direction > 0.0) ? aabb_min : aabb_max;
+ return (nearest_plane - origin) / direction;
+}
+
+/* Modulate the history color to avoid ghosting artifact. */
+vec4 film_amend_combined_history(vec4 color_history, vec4 src_color, ivec2 src_texel)
+{
+ /* Get local color bounding box of source neighboorhood. */
+ vec4 min_color, max_color;
+ film_combined_neighbor_boundbox(src_texel, min_color, max_color);
+
+ /* Clip instead of clamping to avoid color accumulating in the AABB corners. */
+ vec4 clip_dir = src_color - color_history;
+
+ float t = line_aabb_clipping_dist(color_history.rgb, clip_dir.rgb, min_color.rgb, max_color.rgb);
+ color_history.rgb += clip_dir.rgb * saturate(t);
+
+ /* Clip alpha on its own to avoid interference with other chanels. */
+ float t_a = film_aabb_clipping_dist_alpha(color_history.a, clip_dir.a, min_color.a, max_color.a);
+ color_history.a += clip_dir.a * saturate(t_a);
+
+ return color_history;
+}
+
+float film_history_blend_factor(float velocity,
+ vec2 texel,
+ float luma_incoming,
+ float luma_history)
+{
+ /* 5% of incoming color by default. */
+ float blend = 0.05;
+ /* Blend less history if the pixel has substential velocity. */
+ blend = mix(blend, 0.20, saturate(velocity * 0.02));
+ /* Weight by luma. */
+ blend = max(blend, saturate(0.01 * luma_history / abs(luma_history - luma_incoming)));
+ /* Discard out of view history. */
+ if (any(lessThan(texel, vec2(0))) || any(greaterThanEqual(texel, film_buf.extent))) {
+ blend = 1.0;
+ }
+ /* Discard history if invalid. */
+ if (film_buf.use_history == false) {
+ blend = 1.0;
+ }
+ return blend;
}
/* Returns resolved final color. */
-void film_store_combined(FilmSample dst, vec4 color, inout vec4 display)
+void film_store_combined(
+ FilmSample dst, ivec2 src_texel, vec4 color, float color_weight, inout vec4 display)
{
if (film_buf.combined_id == -1) {
return;
}
- /* Could we assume safe color from earlier pass? */
- color = safe_color(color);
- if (false) {
- /* Re-projection using motion vectors. */
- // ivec2 texel_combined = texel_film + film_motion_load(texel_film);
- // float weight_combined = film_weight_load(texel_combined);
- }
-#ifdef USE_NEIGHBORHOOD_CLAMPING
- /* Only do that for combined pass as it has a non-negligeable performance impact. */
- // color = clamp_bbox(color, min, max);
-#endif
+ vec4 color_src, color_dst;
+ float weight_src, weight_dst;
+
+ /* Undo the weighting to get final spatialy-filtered color. */
+ color_src = color / color_weight;
+
+ if (film_buf.use_reprojection) {
+ /* Interactive accumulation. Do reprojection and Temporal Anti-Aliasing. */
- vec4 dst_color = imageLoad(in_combined_img, dst.texel);
+ /* Reproject by finding where this pixel was in the previous frame. */
+ vec2 motion = film_pixel_history_motion_vector(dst.texel);
+ vec2 history_texel = vec2(dst.texel) + motion;
- color = (dst_color * dst.weight + color) * dst.weight_sum_inv;
+ float velocity = length(motion);
- /* TODO(fclem) undo Pre-expose. */
- // color.rgb = exp2(color.rgb) - 1.0;
+ /* Load weight if it is not uniform accross the whole buffer (i.e: upsampling, panoramic). */
+ // dst.weight = film_weight_load(texel_combined);
+
+ color_dst = film_sample_catmull_rom(in_combined_tx, history_texel);
+ color_dst.rgb = film_YCoCg_from_scene_linear(color_dst.rgb);
+
+ float blend = film_history_blend_factor(velocity, history_texel, color_src.x, color_dst.x);
+
+ color_dst = film_amend_combined_history(color_dst, color_src, src_texel);
+
+ /* Luma weighted blend to avoid flickering. */
+ weight_dst = film_luma_weight(color_dst.x) * (1.0 - blend);
+ weight_src = film_luma_weight(color_src.x) * (blend);
+ }
+ else {
+ /* Everything is static. Use render accumulation. */
+ color_dst = texelFetch(in_combined_tx, dst.texel, 0);
+ color_dst.rgb = film_YCoCg_from_scene_linear(color_dst.rgb);
+
+ /* Luma weighted blend to avoid flickering. */
+ weight_dst = film_luma_weight(color_dst.x) * dst.weight;
+ weight_src = color_weight;
+ }
+ /* Weighted blend. */
+ color = color_dst * weight_dst + color_src * weight_src;
+ color /= weight_src + weight_dst;
+
+ color.rgb = film_scene_linear_from_YCoCg(color.rgb);
+
+ /* Fix alpha not accumulating to 1 because of float imprecision. */
+ if (color.a > 0.995) {
+ color.a = 1.0;
+ }
if (film_buf.display_id == -1) {
display = color;
@@ -290,16 +575,28 @@ void film_process_data(ivec2 texel_film, out vec4 out_color, out float out_depth
/* NOTE: We split the accumulations into separate loops to avoid using too much registers and
* maximize occupancy. */
+ if (film_buf.combined_id != -1) {
+ /* NOTE: Do weight accumulation again since we use custom weights. */
+ float weight_accum = 0.0;
+ vec4 combined_accum = vec4(0.0);
+
+ for (int i = 0; i < film_buf.samples_len; i++) {
+ FilmSample src = film_sample_get(i, texel_film);
+ film_sample_accum_combined(src, combined_accum, weight_accum);
+ }
+ film_store_combined(dst, texel_film, combined_accum, weight_accum, out_color);
+ }
+
if (film_buf.has_data) {
- float film_weight = film_distance_load(texel_film);
+ float film_distance = film_distance_load(texel_film);
/* Get sample closest to target texel. It is always sample 0. */
FilmSample film_sample = film_sample_get(0, texel_film);
- if (film_sample.weight < film_weight) {
- float depth = texelFetch(depth_tx, film_sample.texel, 0).x;
+ if (film_buf.use_reprojection || film_sample.weight < film_distance) {
vec4 normal = texelFetch(normal_tx, film_sample.texel, 0);
- vec4 vector = texelFetch(vector_tx, film_sample.texel, 0);
+ float depth = texelFetch(depth_tx, film_sample.texel, 0).x;
+ vec4 vector = velocity_resolve(vector_tx, film_sample.texel, depth);
film_store_depth(texel_film, depth, out_depth);
film_store_data(texel_film, film_buf.normal_id, normal, out_color);
@@ -311,16 +608,6 @@ void film_process_data(ivec2 texel_film, out vec4 out_color, out float out_depth
}
}
- if (film_buf.combined_id != -1) {
- vec4 combined_accum = vec4(0.0);
-
- for (int i = 0; i < film_buf.samples_len; i++) {
- FilmSample src = film_sample_get(i, texel_film);
- film_sample_accum_combined(src, combined_accum);
- }
- film_store_combined(dst, combined_accum, out_color);
- }
-
if (film_buf.any_render_pass_1) {
vec4 diffuse_light_accum = vec4(0.0);
vec4 specular_light_accum = vec4(0.0);
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
index 7ddf941df7c..f19b6038a6a 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
@@ -72,14 +72,7 @@ void main()
#endif
#ifdef MAT_VELOCITY
- vec4 out_velocity_camera; /* TODO(fclem): Panoramic cameras. */
- velocity_camera(interp.P + motion.prev,
- interp.P,
- interp.P - motion.next,
- out_velocity_camera,
- out_velocity_view);
-
- /* For testing in viewport. */
- out_velocity_view.zw = vec2(0.0);
+ out_velocity = velocity_surface(interp.P + motion.prev, interp.P, interp.P - motion.next);
+ out_velocity = velocity_pack(out_velocity);
#endif
}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl
index 435ae6658c9..fb9c9faaca2 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl
@@ -4,21 +4,28 @@
#ifdef VELOCITY_CAMERA
+vec4 velocity_pack(vec4 data)
+{
+ return data * 0.01;
+}
+
+vec4 velocity_unpack(vec4 data)
+{
+ return data * 100.0;
+}
+
/**
* Given a triple of position, compute the previous and next motion vectors.
- * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy)
+ * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy).
*/
-vec4 velocity_view(vec3 P_prev, vec3 P, vec3 P_next)
+vec4 velocity_surface(vec3 P_prv, vec3 P, vec3 P_nxt)
{
- vec2 prev_uv, curr_uv, next_uv;
-
- prev_uv = transform_point(ProjectionMatrix, transform_point(camera_prev.viewmat, P_prev)).xy;
- curr_uv = transform_point(ViewProjectionMatrix, P).xy;
- next_uv = transform_point(ProjectionMatrix, transform_point(camera_next.viewmat, P_next)).xy;
+ /* NOTE: We don't use the drw_view.persmat to avoid adding the TAA jitter to the velocity. */
+ vec2 prev_uv = project_point(camera_prev.persmat, P_prv).xy;
+ vec2 curr_uv = project_point(camera_curr.persmat, P).xy;
+ vec2 next_uv = project_point(camera_next.persmat, P_nxt).xy;
- vec4 motion;
- motion.xy = prev_uv - curr_uv;
- motion.zw = curr_uv - next_uv;
+ vec4 motion = vec4(prev_uv - curr_uv, curr_uv - next_uv);
/* Convert NDC velocity to UV velocity */
motion *= 0.5;
@@ -26,37 +33,41 @@ vec4 velocity_view(vec3 P_prev, vec3 P, vec3 P_next)
}
/**
- * Given a triple of position, compute the previous and next motion vectors.
- * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy)
- * \a velocity_camera is the motion in film UV space after camera projection.
- * \a velocity_view is the motion in ShadingView UV space. It is different
- * from velocity_camera for multi-view rendering.
+ * Given a view space view vector \a vV, compute the previous and next motion vectors for
+ * background pixels.
+ * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy).
*/
-void velocity_camera(vec3 P_prev, vec3 P, vec3 P_next, out vec4 vel_camera, out vec4 vel_view)
+vec4 velocity_background(vec3 vV)
{
- vec2 prev_uv, curr_uv, next_uv;
- prev_uv = camera_uv_from_world(camera_prev, P_prev);
- curr_uv = camera_uv_from_world(camera_curr, P);
- next_uv = camera_uv_from_world(camera_next, P_next);
-
- vel_camera.xy = prev_uv - curr_uv;
- vel_camera.zw = curr_uv - next_uv;
-
- if (is_panoramic(camera_curr.type)) {
- /* This path is only used if using using panoramic projections. Since the views always have
- * the same 45° aperture angle, we can safely reuse the projection matrix. */
- prev_uv = transform_point(ProjectionMatrix, transform_point(camera_prev.viewmat, P_prev)).xy;
- curr_uv = transform_point(ViewProjectionMatrix, P).xy;
- next_uv = transform_point(ProjectionMatrix, transform_point(camera_next.viewmat, P_next)).xy;
-
- vel_view.xy = prev_uv - curr_uv;
- vel_view.zw = curr_uv - next_uv;
- /* Convert NDC velocity to UV velocity */
- vel_view *= 0.5;
- }
- else {
- vel_view = vel_camera;
+ /* Only transform direction to avoid loosing precision. */
+ vec3 V = transform_direction(camera_curr.viewinv, vV);
+
+ return velocity_surface(V, V, V);
+}
+
+/**
+ * Load and resolve correct velocity as some pixels might still not have correct
+ * motion data for performance reasons.
+ */
+vec4 velocity_resolve(sampler2D vector_tx, ivec2 texel, float depth)
+{
+ vec2 uv = (vec2(texel) + 0.5) / vec2(textureSize(vector_tx, 0).xy);
+ vec4 vector = texelFetch(vector_tx, texel, 0);
+
+ if (vector.x == VELOCITY_INVALID) {
+ bool is_background = (depth == 1.0);
+ if (is_background) {
+ /* NOTE: Use viewCameraVec to avoid imprecision if camera is far from origin. */
+ vec3 vV = viewCameraVec(get_view_space_from_depth(uv, 1.0));
+ return velocity_background(vV);
+ }
+ else {
+ /* Static geometry. No translation in world space. */
+ vec3 P = get_world_space_from_depth(uv, depth);
+ return velocity_surface(P, P, P);
+ }
}
+ return velocity_unpack(vector);
}
#endif
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl
deleted file mode 100644
index b68b2eaf117..00000000000
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl
+++ /dev/null
@@ -1,58 +0,0 @@
-
-/**
- * Fullscreen pass that compute motion vector for static geometry.
- * Animated geometry has already written correct motion vectors.
- */
-
-#pragma BLENDER_REQUIRE(common_view_lib.glsl)
-#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
-
-#define is_valid_output(img_) (imageSize(img_).x > 1)
-
-void main()
-{
- ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
- vec4 motion = imageLoad(velocity_view_img, texel);
-
- bool pixel_has_valid_motion = (motion.x != VELOCITY_INVALID);
- float depth = texelFetch(depth_tx, texel, 0).r;
- bool is_background = (depth == 1.0f);
-
- vec2 uv = vec2(texel) * drw_view.viewport_size_inverse;
- vec3 P_next, P_prev, P_curr;
-
- if (pixel_has_valid_motion) {
- /* Animated geometry. View motion already computed during prepass. Convert only to camera. */
- // P_prev = get_world_space_from_depth(uv + motion.xy, 0.5);
- // P_curr = get_world_space_from_depth(uv, 0.5);
- // P_next = get_world_space_from_depth(uv + motion.zw, 0.5);
- return;
- }
- else if (is_background) {
- /* NOTE: Use viewCameraVec to avoid imprecision if camera is far from origin. */
- vec3 vV = viewCameraVec(get_view_space_from_depth(uv, 1.0));
- vec3 V = transform_direction(ViewMatrixInverse, vV);
- /* Background has no motion under camera translation. Translate view vector with the camera. */
- /* WATCH(fclem): Might create precision issues. */
- P_next = camera_next.viewinv[3].xyz + V;
- P_curr = camera_curr.viewinv[3].xyz + V;
- P_prev = camera_prev.viewinv[3].xyz + V;
- }
- else {
- /* Static geometry. No translation in world space. */
- P_curr = get_world_space_from_depth(uv, depth);
- P_prev = P_curr;
- P_next = P_curr;
- }
-
- vec4 vel_camera, vel_view;
- velocity_camera(P_prev, P_curr, P_next, vel_camera, vel_view);
-
- if (in_texture_range(texel, depth_tx)) {
- imageStore(velocity_view_img, texel, vel_view);
-
- if (is_valid_output(velocity_camera_img)) {
- imageStore(velocity_camera_img, texel, vel_camera);
- }
- }
-}
diff --git a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh
index eec7b8ae615..a5baaca51f9 100644
--- a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh
+++ b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh
@@ -4,7 +4,7 @@
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(eevee_film)
- .uniform_buf(1, "FilmData", "film_buf")
+ .uniform_buf(4, "FilmData", "film_buf")
.sampler(0, ImageType::DEPTH_2D, "depth_tx")
.sampler(1, ImageType::FLOAT_2D, "combined_tx")
.sampler(2, ImageType::FLOAT_2D, "normal_tx")
@@ -20,15 +20,19 @@ GPU_SHADER_CREATE_INFO(eevee_film)
.sampler(12, ImageType::FLOAT_2D, "ambient_occlusion_tx")
.sampler(13, ImageType::FLOAT_2D_ARRAY, "aov_color_tx")
.sampler(14, ImageType::FLOAT_2D_ARRAY, "aov_value_tx")
+ /* Color History for TAA needs to be sampler to leverage bilinear sampling. */
+ .sampler(15, ImageType::FLOAT_2D, "in_combined_tx")
// .sampler(15, ImageType::FLOAT_2D, "cryptomatte_tx") /* TODO */
.image(0, GPU_R32F, Qualifier::READ, ImageType::FLOAT_2D_ARRAY, "in_weight_img")
.image(1, GPU_R32F, Qualifier::WRITE, ImageType::FLOAT_2D_ARRAY, "out_weight_img")
- .image(2, GPU_RGBA16F, Qualifier::READ, ImageType::FLOAT_2D, "in_combined_img")
+ /* Color History for TAA needs to be sampler to leverage bilinear sampling. */
+ //.image(2, GPU_RGBA16F, Qualifier::READ, ImageType::FLOAT_2D, "in_combined_img")
.image(3, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "out_combined_img")
.image(4, GPU_R32F, Qualifier::READ_WRITE, ImageType::FLOAT_2D, "depth_img")
.image(5, GPU_RGBA16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D_ARRAY, "color_accum_img")
.image(6, GPU_R16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D_ARRAY, "value_accum_img")
.additional_info("eevee_shared")
+ .additional_info("eevee_velocity_camera")
.additional_info("draw_view");
GPU_SHADER_CREATE_INFO(eevee_film_frag)
diff --git a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh
index c6cbf9b1456..6e8e8fb020a 100644
--- a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh
+++ b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh
@@ -31,26 +31,7 @@ GPU_SHADER_CREATE_INFO(eevee_velocity_geom)
.storage_buf(
7, Qualifier::READ, "VelocityIndex", "velocity_indirection_buf[]", Frequency::PASS)
.vertex_out(eevee_velocity_surface_iface)
- .fragment_out(0, Type::VEC4, "out_velocity_view")
+ .fragment_out(0, Type::VEC4, "out_velocity")
.additional_info("eevee_velocity_camera");
/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name Velocity Resolve
- *
- * Computes velocity for static objects.
- * Also converts motion to camera space (as opposed to view space) if needed.
- * \{ */
-
-GPU_SHADER_CREATE_INFO(eevee_velocity_resolve)
- .do_static_compilation(true)
- .local_group_size(8, 8)
- .sampler(0, ImageType::DEPTH_2D, "depth_tx")
- .image(0, GPU_RG16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D, "velocity_view_img")
- .image(1, GPU_RG16F, Qualifier::WRITE, ImageType::FLOAT_2D, "velocity_camera_img")
- .additional_info("eevee_shared")
- .compute_source("eevee_velocity_resolve_comp.glsl")
- .additional_info("draw_view", "eevee_velocity_camera");
-
-/** \} */