Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/engines/eevee_next')
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_camera.cc21
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_camera.hh21
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_defines.hh2
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_engine.cc22
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_film.cc637
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_film.hh220
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_instance.cc83
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_instance.hh29
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_material.cc2
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_pipeline.cc29
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc101
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_renderbuffers.hh57
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_sampling.cc268
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_sampling.hh189
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_shader.cc7
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_shader.hh3
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_shader_shared.hh201
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_sync.cc16
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_velocity.cc93
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_velocity.hh61
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_view.cc91
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_view.hh22
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_world.cc2
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_attributes_lib.glsl42
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl20
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_film_comp.glsl13
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl31
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl713
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_nodetree_lib.glsl58
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl23
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl44
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl11
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl93
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl58
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh48
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh58
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh22
37 files changed, 2954 insertions, 457 deletions
diff --git a/source/blender/draw/engines/eevee_next/eevee_camera.cc b/source/blender/draw/engines/eevee_next/eevee_camera.cc
index e6d2e2db764..b9040f0f3ab 100644
--- a/source/blender/draw/engines/eevee_next/eevee_camera.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_camera.cc
@@ -29,10 +29,8 @@ namespace blender::eevee {
void Camera::init()
{
const Object *camera_eval = inst_.camera_eval_object;
- synced_ = false;
- data_.swap();
- CameraData &data = data_.current();
+ CameraData &data = data_;
if (camera_eval) {
const ::Camera *cam = reinterpret_cast<const ::Camera *>(camera_eval->data);
@@ -77,9 +75,8 @@ void Camera::init()
void Camera::sync()
{
const Object *camera_eval = inst_.camera_eval_object;
- CameraData &data = data_.current();
- data.filter_size = inst_.scene->r.gauss;
+ CameraData &data = data_;
if (inst_.drw_view) {
DRW_view_viewmat_get(inst_.drw_view, data.viewmat.ptr(), false);
@@ -127,6 +124,10 @@ void Camera::sync()
data.equirect_scale *= data.uv_scale;
data.equirect_scale_inv = 1.0f / data.equirect_scale;
+#else
+ data.fisheye_fov = data.fisheye_lens = -1.0f;
+ data.equirect_bias = float2(0.0f);
+ data.equirect_scale = float2(0.0f);
#endif
}
else if (inst_.drw_view) {
@@ -137,14 +138,8 @@ void Camera::sync()
data.equirect_scale = float2(0.0f);
}
- data_.current().push_update();
-
- synced_ = true;
-
- /* Detect changes in parameters. */
- if (data_.current() != data_.previous()) {
- // inst_.sampling.reset();
- }
+ data_.initialized = true;
+ data_.push_update();
}
/** \} */
diff --git a/source/blender/draw/engines/eevee_next/eevee_camera.hh b/source/blender/draw/engines/eevee_next/eevee_camera.hh
index dfec738b1f3..8bf64199246 100644
--- a/source/blender/draw/engines/eevee_next/eevee_camera.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_camera.hh
@@ -61,8 +61,7 @@ inline bool operator==(const CameraData &a, const CameraData &b)
return compare_m4m4(a.persmat.ptr(), b.persmat.ptr(), FLT_MIN) && (a.uv_scale == b.uv_scale) &&
(a.uv_bias == b.uv_bias) && (a.equirect_scale == b.equirect_scale) &&
(a.equirect_bias == b.equirect_bias) && (a.fisheye_fov == b.fisheye_fov) &&
- (a.fisheye_lens == b.fisheye_lens) && (a.filter_size == b.filter_size) &&
- (a.type == b.type);
+ (a.fisheye_lens == b.fisheye_lens) && (a.type == b.type);
}
inline bool operator!=(const CameraData &a, const CameraData &b)
@@ -84,9 +83,7 @@ class Camera {
Instance &inst_;
/** Double buffered to detect changes and have history for re-projection. */
- SwapChain<CameraDataBuf, 2> data_;
- /** Detects wrong usage. */
- bool synced_ = false;
+ CameraDataBuf data_;
public:
Camera(Instance &inst) : inst_(inst){};
@@ -100,28 +97,28 @@ class Camera {
**/
const CameraData &data_get() const
{
- BLI_assert(synced_);
- return data_.current();
+ BLI_assert(data_.initialized);
+ return data_;
}
const GPUUniformBuf *ubo_get() const
{
- return data_.current();
+ return data_;
}
bool is_panoramic() const
{
- return eevee::is_panoramic(data_.current().type);
+ return eevee::is_panoramic(data_.type);
}
bool is_orthographic() const
{
- return data_.current().type == CAMERA_ORTHO;
+ return data_.type == CAMERA_ORTHO;
}
const float3 &position() const
{
- return *reinterpret_cast<const float3 *>(data_.current().viewinv[3]);
+ return *reinterpret_cast<const float3 *>(data_.viewinv[3]);
}
const float3 &forward() const
{
- return *reinterpret_cast<const float3 *>(data_.current().viewinv[2]);
+ return *reinterpret_cast<const float3 *>(data_.viewinv[2]);
}
};
diff --git a/source/blender/draw/engines/eevee_next/eevee_defines.hh b/source/blender/draw/engines/eevee_next/eevee_defines.hh
index f75ebd2bd13..1e7979b594e 100644
--- a/source/blender/draw/engines/eevee_next/eevee_defines.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_defines.hh
@@ -43,3 +43,5 @@
/* Minimum visibility size. */
#define LIGHTPROBE_FILTER_VIS_GROUP_SIZE 16
+
+#define FILM_GROUP_SIZE 16
diff --git a/source/blender/draw/engines/eevee_next/eevee_engine.cc b/source/blender/draw/engines/eevee_next/eevee_engine.cc
index be0adfad568..37b4bde324e 100644
--- a/source/blender/draw/engines/eevee_next/eevee_engine.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_engine.cc
@@ -12,6 +12,8 @@
#include "DRW_render.h"
+#include "RE_pipeline.h"
+
#include "eevee_engine.h" /* Own include. */
#include "eevee_instance.hh"
@@ -97,6 +99,8 @@ static void eevee_draw_scene(void *vedata)
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
ved->instance->draw_viewport(dfbl);
STRNCPY(ved->info, ved->instance->info.c_str());
+ /* Reset view for other following engines. */
+ DRW_view_set_active(nullptr);
}
static void eevee_cache_init(void *vedata)
@@ -144,7 +148,23 @@ static void eevee_render_to_image(void *UNUSED(vedata),
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
- UNUSED_VARS(engine, layer);
+
+ eevee::Instance *instance = new eevee::Instance();
+
+ Render *render = engine->re;
+ Depsgraph *depsgraph = DRW_context_state_get()->depsgraph;
+ Object *camera_original_ob = RE_GetCamera(engine->re);
+ const char *viewname = RE_GetActiveRenderView(engine->re);
+ int size[2] = {engine->resolution_x, engine->resolution_y};
+
+ rctf view_rect;
+ rcti rect;
+ RE_GetViewPlane(render, &view_rect, &rect);
+
+ instance->init(size, &rect, engine, depsgraph, nullptr, camera_original_ob, layer);
+ instance->render_frame(layer, viewname);
+
+ delete instance;
}
static void eevee_render_update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer)
diff --git a/source/blender/draw/engines/eevee_next/eevee_film.cc b/source/blender/draw/engines/eevee_next/eevee_film.cc
new file mode 100644
index 00000000000..49f43265aa8
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/eevee_film.cc
@@ -0,0 +1,637 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2021 Blender Foundation.
+ */
+
+/** \file
+ * \ingroup eevee
+ *
+ * A film is a fullscreen buffer (usually at output extent)
+ * that will be able to accumulate sample in any distorted camera_type
+ * using a pixel filter.
+ *
+ * Input needs to be jittered so that the filter converges to the right result.
+ */
+
+#include "BLI_hash.h"
+#include "BLI_rect.h"
+
+#include "GPU_framebuffer.h"
+#include "GPU_texture.h"
+
+#include "DRW_render.h"
+#include "RE_pipeline.h"
+
+#include "eevee_film.hh"
+#include "eevee_instance.hh"
+
+namespace blender::eevee {
+
+ENUM_OPERATORS(eViewLayerEEVEEPassType, 1 << EEVEE_RENDER_PASS_MAX_BIT)
+
+/* -------------------------------------------------------------------- */
+/** \name Arbitrary Output Variables
+ * \{ */
+
+void Film::init_aovs()
+{
+ Vector<ViewLayerAOV *> aovs;
+
+ aovs_info.display_id = -1;
+ aovs_info.display_is_value = false;
+ aovs_info.value_len = aovs_info.color_len = 0;
+
+ if (inst_.is_viewport()) {
+ /* Viewport case. */
+ if (inst_.v3d->shading.render_pass == EEVEE_RENDER_PASS_AOV) {
+ /* AOV display, request only a single AOV. */
+ ViewLayerAOV *aov = (ViewLayerAOV *)BLI_findstring(
+ &inst_.view_layer->aovs, inst_.v3d->shading.aov_name, offsetof(ViewLayerAOV, name));
+
+ if (aov == nullptr) {
+ /* AOV not found in view layer. */
+ return;
+ }
+
+ aovs.append(aov);
+ aovs_info.display_id = 0;
+ aovs_info.display_is_value = (aov->type == AOV_TYPE_VALUE);
+ }
+ else {
+ /* TODO(fclem): The realtime compositor could ask for several AOVs. */
+ }
+ }
+ else {
+ /* Render case. */
+ LISTBASE_FOREACH (ViewLayerAOV *, aov, &inst_.view_layer->aovs) {
+ aovs.append(aov);
+ }
+ }
+
+ if (aovs.size() > AOV_MAX) {
+ inst_.info = "Error: Too many AOVs";
+ return;
+ }
+
+ for (ViewLayerAOV *aov : aovs) {
+ bool is_value = (aov->type == AOV_TYPE_VALUE);
+ uint &index = is_value ? aovs_info.value_len : aovs_info.color_len;
+ uint &hash = is_value ? aovs_info.hash_value[index] : aovs_info.hash_color[index];
+ hash = BLI_hash_string(aov->name);
+ index++;
+ }
+}
+
+float *Film::read_aov(ViewLayerAOV *aov)
+{
+ bool is_value = (aov->type == AOV_TYPE_VALUE);
+ Texture &accum_tx = is_value ? value_accum_tx_ : color_accum_tx_;
+
+ Span<uint> aovs_hash(is_value ? aovs_info.hash_value : aovs_info.hash_color,
+ is_value ? aovs_info.value_len : aovs_info.color_len);
+ /* Find AOV index. */
+ uint hash = BLI_hash_string(aov->name);
+ int aov_index = -1;
+ int i = 0;
+ for (uint candidate_hash : aovs_hash) {
+ if (candidate_hash == hash) {
+ aov_index = i;
+ break;
+ }
+ i++;
+ }
+
+ accum_tx.ensure_layer_views();
+
+ int index = aov_index + (is_value ? data_.aov_value_id : data_.aov_color_id);
+ GPUTexture *pass_tx = accum_tx.layer_view(index);
+
+ return (float *)GPU_texture_read(pass_tx, GPU_DATA_FLOAT, 0);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Mist Pass
+ * \{ */
+
+void Film::sync_mist()
+{
+ const CameraData &cam = inst_.camera.data_get();
+ const ::World *world = inst_.scene->world;
+ float mist_start = world ? world->miststa : cam.clip_near;
+ float mist_distance = world ? world->mistdist : fabsf(cam.clip_far - cam.clip_near);
+ int mist_type = world ? world->mistype : (int)WO_MIST_LINEAR;
+
+ switch (mist_type) {
+ case WO_MIST_QUADRATIC:
+ data_.mist_exponent = 2.0f;
+ break;
+ case WO_MIST_LINEAR:
+ data_.mist_exponent = 1.0f;
+ break;
+ case WO_MIST_INVERSE_QUADRATIC:
+ data_.mist_exponent = 0.5f;
+ break;
+ }
+
+ data_.mist_scale = 1.0 / mist_distance;
+ data_.mist_bias = -mist_start / mist_distance;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name FilmData
+ * \{ */
+
+inline bool operator==(const FilmData &a, const FilmData &b)
+{
+ return (a.extent == b.extent) && (a.offset == b.offset) &&
+ (a.filter_radius == b.filter_radius) && (a.scaling_factor == b.scaling_factor) &&
+ (a.background_opacity == b.background_opacity);
+}
+
+inline bool operator!=(const FilmData &a, const FilmData &b)
+{
+ return !(a == b);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Film
+ * \{ */
+
+void Film::init(const int2 &extent, const rcti *output_rect)
+{
+ Sampling &sampling = inst_.sampling;
+ Scene &scene = *inst_.scene;
+ SceneEEVEE &scene_eevee = scene.eevee;
+
+ init_aovs();
+
+ {
+ /* Enable passes that need to be rendered. */
+ eViewLayerEEVEEPassType render_passes = eViewLayerEEVEEPassType(0);
+
+ if (inst_.is_viewport()) {
+ /* Viewport Case. */
+ render_passes = eViewLayerEEVEEPassType(inst_.v3d->shading.render_pass);
+
+ if (inst_.overlays_enabled() || inst_.gpencil_engine_enabled) {
+ /* Overlays and Grease Pencil needs the depth for correct compositing.
+ * Using the render pass ensure we store the center depth. */
+ render_passes |= EEVEE_RENDER_PASS_Z;
+ }
+ /* TEST */
+ render_passes |= EEVEE_RENDER_PASS_VECTOR;
+ }
+ else {
+ /* Render Case. */
+ render_passes = eViewLayerEEVEEPassType(inst_.view_layer->eevee.render_passes);
+
+ render_passes |= EEVEE_RENDER_PASS_COMBINED;
+
+#define ENABLE_FROM_LEGACY(name_legacy, name_eevee) \
+ SET_FLAG_FROM_TEST(render_passes, \
+ (inst_.view_layer->passflag & SCE_PASS_##name_legacy) != 0, \
+ EEVEE_RENDER_PASS_##name_eevee);
+
+ ENABLE_FROM_LEGACY(Z, Z)
+ ENABLE_FROM_LEGACY(MIST, MIST)
+ ENABLE_FROM_LEGACY(NORMAL, NORMAL)
+ ENABLE_FROM_LEGACY(SHADOW, SHADOW)
+ ENABLE_FROM_LEGACY(AO, AO)
+ ENABLE_FROM_LEGACY(EMIT, EMIT)
+ ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT)
+ ENABLE_FROM_LEGACY(DIFFUSE_COLOR, DIFFUSE_COLOR)
+ ENABLE_FROM_LEGACY(GLOSSY_COLOR, SPECULAR_COLOR)
+ ENABLE_FROM_LEGACY(DIFFUSE_DIRECT, DIFFUSE_LIGHT)
+ ENABLE_FROM_LEGACY(GLOSSY_DIRECT, SPECULAR_LIGHT)
+ ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT)
+
+#undef ENABLE_FROM_LEGACY
+ }
+
+ /* Filter obsolete passes. */
+ render_passes &= ~(EEVEE_RENDER_PASS_UNUSED_8 | EEVEE_RENDER_PASS_BLOOM);
+
+ /* TODO(@fclem): Can't we rely on depsgraph update notification? */
+ if (assign_if_different(enabled_passes_, render_passes)) {
+ sampling.reset();
+ }
+ }
+ {
+ rcti fallback_rect;
+ if (BLI_rcti_is_empty(output_rect)) {
+ BLI_rcti_init(&fallback_rect, 0, extent[0], 0, extent[1]);
+ output_rect = &fallback_rect;
+ }
+
+ FilmData data = data_;
+ data.extent = int2(BLI_rcti_size_x(output_rect), BLI_rcti_size_y(output_rect));
+ data.offset = int2(output_rect->xmin, output_rect->ymin);
+ data.extent_inv = 1.0f / float2(data.extent);
+ /* Disable filtering if sample count is 1. */
+ data.filter_radius = (sampling.sample_count() == 1) ? 0.0f :
+ clamp_f(scene.r.gauss, 0.0f, 100.0f);
+ /* TODO(fclem): parameter hidden in experimental.
+ * We need to figure out LOD bias first in order to preserve texture crispiness. */
+ data.scaling_factor = 1;
+
+ data.background_opacity = (scene.r.alphamode == R_ALPHAPREMUL) ? 0.0f : 1.0f;
+ if (inst_.is_viewport() && false /* TODO(fclem): StudioLight */) {
+ data.background_opacity = inst_.v3d->shading.studiolight_background;
+ }
+
+ FilmData &data_prev_ = data_;
+ if (assign_if_different(data_prev_, data)) {
+ sampling.reset();
+ }
+
+ const eViewLayerEEVEEPassType data_passes = EEVEE_RENDER_PASS_Z | EEVEE_RENDER_PASS_NORMAL |
+ EEVEE_RENDER_PASS_VECTOR;
+ const eViewLayerEEVEEPassType color_passes_1 = EEVEE_RENDER_PASS_DIFFUSE_LIGHT |
+ EEVEE_RENDER_PASS_SPECULAR_LIGHT |
+ EEVEE_RENDER_PASS_VOLUME_LIGHT |
+ EEVEE_RENDER_PASS_EMIT;
+ const eViewLayerEEVEEPassType color_passes_2 = EEVEE_RENDER_PASS_DIFFUSE_COLOR |
+ EEVEE_RENDER_PASS_SPECULAR_COLOR |
+ EEVEE_RENDER_PASS_ENVIRONMENT |
+ EEVEE_RENDER_PASS_MIST |
+ EEVEE_RENDER_PASS_SHADOW | EEVEE_RENDER_PASS_AO;
+
+ data_.exposure_scale = pow2f(scene.view_settings.exposure);
+ data_.has_data = (enabled_passes_ & data_passes) != 0;
+ data_.any_render_pass_1 = (enabled_passes_ & color_passes_1) != 0;
+ data_.any_render_pass_2 = (enabled_passes_ & color_passes_2) != 0;
+ }
+ {
+ /* Set pass offsets. */
+
+ data_.display_id = aovs_info.display_id;
+ data_.display_is_value = aovs_info.display_is_value;
+
+ /* Combined is in a separate buffer. */
+ data_.combined_id = (enabled_passes_ & EEVEE_RENDER_PASS_COMBINED) ? 0 : -1;
+ /* Depth is in a separate buffer. */
+ data_.depth_id = (enabled_passes_ & EEVEE_RENDER_PASS_Z) ? 0 : -1;
+
+ data_.color_len = 0;
+ data_.value_len = 0;
+
+ auto pass_index_get = [&](eViewLayerEEVEEPassType pass_type) {
+ bool is_value = pass_is_value(pass_type);
+ int index = (enabled_passes_ & pass_type) ?
+ (is_value ? data_.value_len : data_.color_len)++ :
+ -1;
+ if (inst_.is_viewport() && inst_.v3d->shading.render_pass == pass_type) {
+ data_.display_id = index;
+ data_.display_is_value = is_value;
+ }
+ return index;
+ };
+
+ data_.mist_id = pass_index_get(EEVEE_RENDER_PASS_MIST);
+ data_.normal_id = pass_index_get(EEVEE_RENDER_PASS_NORMAL);
+ data_.vector_id = pass_index_get(EEVEE_RENDER_PASS_VECTOR);
+ data_.diffuse_light_id = pass_index_get(EEVEE_RENDER_PASS_DIFFUSE_LIGHT);
+ data_.diffuse_color_id = pass_index_get(EEVEE_RENDER_PASS_DIFFUSE_COLOR);
+ data_.specular_light_id = pass_index_get(EEVEE_RENDER_PASS_SPECULAR_LIGHT);
+ data_.specular_color_id = pass_index_get(EEVEE_RENDER_PASS_SPECULAR_COLOR);
+ data_.volume_light_id = pass_index_get(EEVEE_RENDER_PASS_VOLUME_LIGHT);
+ data_.emission_id = pass_index_get(EEVEE_RENDER_PASS_EMIT);
+ data_.environment_id = pass_index_get(EEVEE_RENDER_PASS_ENVIRONMENT);
+ data_.shadow_id = pass_index_get(EEVEE_RENDER_PASS_SHADOW);
+ data_.ambient_occlusion_id = pass_index_get(EEVEE_RENDER_PASS_AO);
+
+ data_.aov_color_id = data_.color_len;
+ data_.aov_value_id = data_.value_len;
+
+ data_.aov_color_len = aovs_info.color_len;
+ data_.aov_value_len = aovs_info.value_len;
+
+ data_.color_len += data_.aov_color_len;
+ data_.value_len += data_.aov_value_len;
+ }
+ {
+ /* TODO(@fclem): Over-scans. */
+
+ data_.render_extent = math::divide_ceil(extent, int2(data_.scaling_factor));
+ int2 weight_extent = inst_.camera.is_panoramic() ? data_.extent : int2(data_.scaling_factor);
+
+ eGPUTextureFormat color_format = GPU_RGBA16F;
+ eGPUTextureFormat float_format = GPU_R16F;
+ eGPUTextureFormat weight_format = GPU_R32F;
+ eGPUTextureFormat depth_format = GPU_R32F;
+
+ int reset = 0;
+ reset += depth_tx_.ensure_2d(depth_format, data_.extent);
+ reset += combined_tx_.current().ensure_2d(color_format, data_.extent);
+ reset += combined_tx_.next().ensure_2d(color_format, data_.extent);
+ /* Two layers, one for nearest sample weight and one for weight accumulation. */
+ reset += weight_tx_.current().ensure_2d_array(weight_format, weight_extent, 2);
+ reset += weight_tx_.next().ensure_2d_array(weight_format, weight_extent, 2);
+ reset += color_accum_tx_.ensure_2d_array(color_format,
+ (data_.color_len > 0) ? data_.extent : int2(1),
+ (data_.color_len > 0) ? data_.color_len : 1);
+ reset += value_accum_tx_.ensure_2d_array(float_format,
+ (data_.value_len > 0) ? data_.extent : int2(1),
+ (data_.value_len > 0) ? data_.value_len : 1);
+
+ if (reset > 0) {
+ sampling.reset();
+ data_.use_history = 0;
+ data_.use_reprojection = 0;
+
+ /* Avoid NaN in uninitialized texture memory making history blending dangerous. */
+ color_accum_tx_.clear(float4(0.0f));
+ value_accum_tx_.clear(float4(0.0f));
+ combined_tx_.current().clear(float4(0.0f));
+ weight_tx_.current().clear(float4(0.0f));
+ depth_tx_.clear(float4(0.0f));
+ }
+ }
+
+ force_disable_reprojection_ = (scene_eevee.flag & SCE_EEVEE_TAA_REPROJECTION) == 0;
+}
+
+void Film::sync()
+{
+ /* We use a fragment shader for viewport because we need to output the depth. */
+ bool use_compute = (inst_.is_viewport() == false);
+
+ eShaderType shader = use_compute ? FILM_COMP : FILM_FRAG;
+
+ /* TODO(fclem): Shader variation for panoramic & scaled resolution. */
+
+ RenderBuffers &rbuffers = inst_.render_buffers;
+ VelocityModule &velocity = inst_.velocity;
+
+ eGPUSamplerState filter = GPU_SAMPLER_FILTER;
+
+ /* For viewport, only previous motion is supported.
+ * Still bind previous step to avoid undefined behavior. */
+ eVelocityStep step_next = inst_.is_viewport() ? STEP_PREVIOUS : STEP_NEXT;
+
+ DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS;
+ accumulate_ps_ = DRW_pass_create("Film.Accumulate", state);
+ GPUShader *sh = inst_.shaders.static_shader_get(shader);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, accumulate_ps_);
+ DRW_shgroup_uniform_block_ref(grp, "film_buf", &data_);
+ DRW_shgroup_uniform_block_ref(grp, "camera_prev", &(*velocity.camera_steps[STEP_PREVIOUS]));
+ DRW_shgroup_uniform_block_ref(grp, "camera_curr", &(*velocity.camera_steps[STEP_CURRENT]));
+ DRW_shgroup_uniform_block_ref(grp, "camera_next", &(*velocity.camera_steps[step_next]));
+ DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &rbuffers.depth_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "combined_tx", &rbuffers.combined_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "normal_tx", &rbuffers.normal_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "vector_tx", &rbuffers.vector_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "diffuse_light_tx", &rbuffers.diffuse_light_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "diffuse_color_tx", &rbuffers.diffuse_color_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "specular_light_tx", &rbuffers.specular_light_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "specular_color_tx", &rbuffers.specular_color_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "volume_light_tx", &rbuffers.volume_light_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "emission_tx", &rbuffers.emission_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "environment_tx", &rbuffers.environment_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "shadow_tx", &rbuffers.shadow_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "ambient_occlusion_tx", &rbuffers.ambient_occlusion_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "aov_color_tx", &rbuffers.aov_color_tx);
+ DRW_shgroup_uniform_texture_ref(grp, "aov_value_tx", &rbuffers.aov_value_tx);
+ /* NOTE(@fclem): 16 is the max number of sampled texture in many implementations.
+ * If we need more, we need to pack more of the similar passes in the same textures as arrays or
+ * use image binding instead. */
+ DRW_shgroup_uniform_image_ref(grp, "in_weight_img", &weight_src_tx_);
+ DRW_shgroup_uniform_image_ref(grp, "out_weight_img", &weight_dst_tx_);
+ DRW_shgroup_uniform_texture_ref_ex(grp, "in_combined_tx", &combined_src_tx_, filter);
+ DRW_shgroup_uniform_image_ref(grp, "out_combined_img", &combined_dst_tx_);
+ DRW_shgroup_uniform_image_ref(grp, "depth_img", &depth_tx_);
+ DRW_shgroup_uniform_image_ref(grp, "color_accum_img", &color_accum_tx_);
+ DRW_shgroup_uniform_image_ref(grp, "value_accum_img", &value_accum_tx_);
+ /* Sync with rendering passes. */
+ DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
+ /* Sync with rendering passes. */
+ DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_IMAGE_ACCESS);
+ if (use_compute) {
+ int2 dispatch_size = math::divide_ceil(data_.extent, int2(FILM_GROUP_SIZE));
+ DRW_shgroup_call_compute(grp, UNPACK2(dispatch_size), 1);
+ }
+ else {
+ DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
+ }
+}
+
+void Film::end_sync()
+{
+ data_.use_reprojection = inst_.sampling.interactive_mode();
+
+ /* Just bypass the reprojection and reset the accumulation. */
+ if (force_disable_reprojection_ && inst_.sampling.is_reset()) {
+ data_.use_reprojection = false;
+ data_.use_history = false;
+ }
+
+ aovs_info.push_update();
+
+ sync_mist();
+}
+
+float2 Film::pixel_jitter_get() const
+{
+ float2 jitter = inst_.sampling.rng_2d_get(SAMPLING_FILTER_U);
+
+ if (!use_box_filter && data_.filter_radius < M_SQRT1_2 && !inst_.camera.is_panoramic()) {
+ /* For filter size less than a pixel, change sampling strategy and use a uniform disk
+ * distribution covering the filter shape. This avoids putting samples in areas without any
+ * weights. */
+ /* TODO(fclem): Importance sampling could be a better option here. */
+ jitter = Sampling::sample_disk(jitter) * data_.filter_radius;
+ }
+ else {
+ /* Jitter the size of a whole pixel. [-0.5..0.5] */
+ jitter -= 0.5f;
+ }
+ /* TODO(fclem): Mixed-resolution rendering: We need to offset to each of the target pixel covered
+ * by a render pixel, ideally, by choosing one randomly using another sampling dimension, or by
+ * repeating the same sample RNG sequence for each pixel offset. */
+ return jitter;
+}
+
+eViewLayerEEVEEPassType Film::enabled_passes_get() const
+{
+ return enabled_passes_;
+}
+
+void Film::update_sample_table()
+{
+ data_.subpixel_offset = pixel_jitter_get();
+
+ int filter_radius_ceil = ceilf(data_.filter_radius);
+ float filter_radius_sqr = square_f(data_.filter_radius);
+
+ data_.samples_len = 0;
+ if (use_box_filter || data_.filter_radius < 0.01f) {
+ /* Disable gather filtering. */
+ data_.samples[0].texel = int2(0, 0);
+ data_.samples[0].weight = 1.0f;
+ data_.samples_weight_total = 1.0f;
+ data_.samples_len = 1;
+ }
+ /* NOTE: Threshold determined by hand until we don't hit the assert bellow. */
+ else if (data_.filter_radius < 2.20f) {
+ /* Small filter Size. */
+ int closest_index = 0;
+ float closest_distance = FLT_MAX;
+ data_.samples_weight_total = 0.0f;
+ /* TODO(fclem): For optimization, could try Z-tile ordering. */
+ for (int y = -filter_radius_ceil; y <= filter_radius_ceil; y++) {
+ for (int x = -filter_radius_ceil; x <= filter_radius_ceil; x++) {
+ float2 pixel_offset = float2(x, y) - data_.subpixel_offset;
+ float distance_sqr = math::length_squared(pixel_offset);
+ if (distance_sqr < filter_radius_sqr) {
+ if (data_.samples_len >= FILM_PRECOMP_SAMPLE_MAX) {
+ BLI_assert_msg(0, "Precomputed sample table is too small.");
+ break;
+ }
+ FilmSample &sample = data_.samples[data_.samples_len];
+ sample.texel = int2(x, y);
+ sample.weight = film_filter_weight(data_.filter_radius, distance_sqr);
+ data_.samples_weight_total += sample.weight;
+
+ if (distance_sqr < closest_distance) {
+ closest_distance = distance_sqr;
+ closest_index = data_.samples_len;
+ }
+ data_.samples_len++;
+ }
+ }
+ }
+ /* Put the closest one in first position. */
+ if (closest_index != 0) {
+ SWAP(FilmSample, data_.samples[closest_index], data_.samples[0]);
+ }
+ }
+ else {
+ /* Large Filter Size. */
+ MutableSpan<FilmSample> sample_table(data_.samples, FILM_PRECOMP_SAMPLE_MAX);
+ /* To avoid hitting driver TDR and slowing rendering too much we use random sampling. */
+ /* TODO(fclem): This case needs more work. We could distribute the samples better to avoid
+ * loading the same pixel twice. */
+ data_.samples_len = sample_table.size();
+ data_.samples_weight_total = 0.0f;
+
+ int i = 0;
+ for (FilmSample &sample : sample_table) {
+ /* TODO(fclem): Own RNG. */
+ float2 random_2d = inst_.sampling.rng_2d_get(SAMPLING_SSS_U);
+ /* This randomization makes sure we converge to the right result but also makes nearest
+ * neighbor filtering not converging rapidly. */
+ random_2d.x = (random_2d.x + i) / float(FILM_PRECOMP_SAMPLE_MAX);
+
+ float2 pixel_offset = math::floor(Sampling::sample_spiral(random_2d) * data_.filter_radius);
+ sample.texel = int2(pixel_offset);
+
+ float distance_sqr = math::length_squared(pixel_offset - data_.subpixel_offset);
+ sample.weight = film_filter_weight(data_.filter_radius, distance_sqr);
+ data_.samples_weight_total += sample.weight;
+ i++;
+ }
+ }
+}
+
+void Film::accumulate(const DRWView *view)
+{
+ if (inst_.is_viewport()) {
+ DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
+ DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
+ GPU_framebuffer_bind(dfbl->default_fb);
+ /* Clear when using render borders. */
+ if (data_.extent != int2(GPU_texture_width(dtxl->color), GPU_texture_height(dtxl->color))) {
+ float4 clear_color = {0.0f, 0.0f, 0.0f, 0.0f};
+ GPU_framebuffer_clear_color(dfbl->default_fb, clear_color);
+ }
+ GPU_framebuffer_viewport_set(dfbl->default_fb, UNPACK2(data_.offset), UNPACK2(data_.extent));
+ }
+
+ update_sample_table();
+
+ /* Need to update the static references as there could have change from a previous swap. */
+ weight_src_tx_ = weight_tx_.current();
+ weight_dst_tx_ = weight_tx_.next();
+ combined_src_tx_ = combined_tx_.current();
+ combined_dst_tx_ = combined_tx_.next();
+
+ data_.display_only = false;
+ data_.push_update();
+
+ DRW_view_set_active(view);
+ DRW_draw_pass(accumulate_ps_);
+
+ combined_tx_.swap();
+ weight_tx_.swap();
+
+ /* Use history after first sample. */
+ if (data_.use_history == 0) {
+ data_.use_history = 1;
+ }
+}
+
+void Film::display()
+{
+ BLI_assert(inst_.is_viewport());
+
+ /* Acquire dummy render buffers for correct binding. They will not be used. */
+ inst_.render_buffers.acquire(int2(1), (void *)this);
+
+ DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
+ GPU_framebuffer_bind(dfbl->default_fb);
+ GPU_framebuffer_viewport_set(dfbl->default_fb, UNPACK2(data_.offset), UNPACK2(data_.extent));
+
+ /* Need to update the static references as there could have change from a previous swap. */
+ weight_src_tx_ = weight_tx_.current();
+ weight_dst_tx_ = weight_tx_.next();
+ combined_src_tx_ = combined_tx_.current();
+ combined_dst_tx_ = combined_tx_.next();
+
+ data_.display_only = true;
+ data_.push_update();
+
+ DRW_view_set_active(nullptr);
+ DRW_draw_pass(accumulate_ps_);
+
+ inst_.render_buffers.release();
+
+ /* IMPORTANT: Do not swap! No accumulation has happened. */
+}
+
+float *Film::read_pass(eViewLayerEEVEEPassType pass_type)
+{
+
+ bool is_value = pass_is_value(pass_type);
+ Texture &accum_tx = (pass_type == EEVEE_RENDER_PASS_COMBINED) ?
+ combined_tx_.current() :
+ (pass_type == EEVEE_RENDER_PASS_Z) ?
+ depth_tx_ :
+ (is_value ? value_accum_tx_ : color_accum_tx_);
+
+ accum_tx.ensure_layer_views();
+
+ int index = pass_id_get(pass_type);
+ GPUTexture *pass_tx = accum_tx.layer_view(index);
+
+ GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE);
+
+ float *result = (float *)GPU_texture_read(pass_tx, GPU_DATA_FLOAT, 0);
+
+ if (pass_is_float3(pass_type)) {
+ /* Convert result in place as we cannot do this conversion on GPU. */
+ for (auto px : IndexRange(accum_tx.width() * accum_tx.height())) {
+ *(reinterpret_cast<float3 *>(result) + px) = *(reinterpret_cast<float3 *>(result + px * 4));
+ }
+ }
+
+ return result;
+}
+
+/** \} */
+
+} // namespace blender::eevee
diff --git a/source/blender/draw/engines/eevee_next/eevee_film.hh b/source/blender/draw/engines/eevee_next/eevee_film.hh
new file mode 100644
index 00000000000..1165b9a4c12
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/eevee_film.hh
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2021 Blender Foundation.
+ */
+
+/** \file
+ * \ingroup eevee
+ *
+ * The film class handles accumulation of samples with any distorted camera_type
+ * using a pixel filter. Inputs needs to be jittered so that the filter converges to the right
+ * result.
+ *
+ * In viewport, we switch between 2 accumulation mode depending on the scene state.
+ * - For static scene, we use a classic weighted accumulation.
+ * - For dynamic scene (if an update is detected), we use a more temporally stable accumulation
+ * following the Temporal Anti-Aliasing method (a.k.a. Temporal Super-Sampling). This does
+ * history reprojection and rectification to avoid most of the flickering.
+ */
+
+#pragma once
+
+#include "DRW_render.h"
+
+#include "eevee_shader_shared.hh"
+
+namespace blender::eevee {
+
+class Instance;
+
+/* -------------------------------------------------------------------- */
+/** \name Film
+ * \{ */
+
+class Film {
+ public:
+ /** Stores indirection table of AOVs based on their name hash and their type. */
+ AOVsInfoDataBuf aovs_info;
+ /** For debugging purpose but could be a user option in the future. */
+ static constexpr bool use_box_filter = false;
+
+ private:
+ Instance &inst_;
+
+ /** Main accumulation textures containing every render-pass except depth and combined. */
+ Texture color_accum_tx_;
+ Texture value_accum_tx_;
+ /** Depth accumulation texture. Separated because using a different format. */
+ Texture depth_tx_;
+ /** Combined "Color" buffer. Double buffered to allow re-projection. */
+ SwapChain<Texture, 2> combined_tx_;
+ /** Static reference as SwapChain does not actually move the objects when swapping. */
+ GPUTexture *combined_src_tx_ = nullptr;
+ GPUTexture *combined_dst_tx_ = nullptr;
+ /** Weight buffers. Double buffered to allow updating it during accumulation. */
+ SwapChain<Texture, 2> weight_tx_;
+ /** Static reference as SwapChain does not actually move the objects when swapping. */
+ GPUTexture *weight_src_tx_ = nullptr;
+ GPUTexture *weight_dst_tx_ = nullptr;
+ /** User setting to disable reprojection. Useful for debugging or have a more precise render. */
+ bool force_disable_reprojection_ = false;
+
+ DRWPass *accumulate_ps_ = nullptr;
+
+ FilmDataBuf data_;
+
+ eViewLayerEEVEEPassType enabled_passes_ = eViewLayerEEVEEPassType(0);
+
+ public:
+ Film(Instance &inst) : inst_(inst){};
+ ~Film(){};
+
+ void init(const int2 &full_extent, const rcti *output_rect);
+
+ void sync();
+ void end_sync();
+
+ /** Accumulate the newly rendered sample contained in #RenderBuffers and blit to display. */
+ void accumulate(const DRWView *view);
+
+ /** Blit to display. No rendered sample needed. */
+ void display();
+
+ float *read_pass(eViewLayerEEVEEPassType pass_type);
+ float *read_aov(ViewLayerAOV *aov);
+
+ int2 render_extent_get() const
+ {
+ return data_.render_extent;
+ }
+
+ float2 pixel_jitter_get() const;
+
+ float background_opacity_get() const
+ {
+ return data_.background_opacity;
+ }
+
+ eViewLayerEEVEEPassType enabled_passes_get() const;
+
+ static bool pass_is_value(eViewLayerEEVEEPassType pass_type)
+ {
+ switch (pass_type) {
+ case EEVEE_RENDER_PASS_Z:
+ case EEVEE_RENDER_PASS_MIST:
+ case EEVEE_RENDER_PASS_SHADOW:
+ case EEVEE_RENDER_PASS_AO:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool pass_is_float3(eViewLayerEEVEEPassType pass_type)
+ {
+ switch (pass_type) {
+ case EEVEE_RENDER_PASS_NORMAL:
+ case EEVEE_RENDER_PASS_DIFFUSE_LIGHT:
+ case EEVEE_RENDER_PASS_DIFFUSE_COLOR:
+ case EEVEE_RENDER_PASS_SPECULAR_LIGHT:
+ case EEVEE_RENDER_PASS_SPECULAR_COLOR:
+ case EEVEE_RENDER_PASS_VOLUME_LIGHT:
+ case EEVEE_RENDER_PASS_EMIT:
+ case EEVEE_RENDER_PASS_ENVIRONMENT:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /* Returns layer offset in the accumulation texture. -1 if the pass is not enabled. */
+ int pass_id_get(eViewLayerEEVEEPassType pass_type) const
+ {
+ switch (pass_type) {
+ case EEVEE_RENDER_PASS_COMBINED:
+ return data_.combined_id;
+ case EEVEE_RENDER_PASS_Z:
+ return data_.depth_id;
+ case EEVEE_RENDER_PASS_MIST:
+ return data_.mist_id;
+ case EEVEE_RENDER_PASS_NORMAL:
+ return data_.normal_id;
+ case EEVEE_RENDER_PASS_DIFFUSE_LIGHT:
+ return data_.diffuse_light_id;
+ case EEVEE_RENDER_PASS_DIFFUSE_COLOR:
+ return data_.diffuse_color_id;
+ case EEVEE_RENDER_PASS_SPECULAR_LIGHT:
+ return data_.specular_light_id;
+ case EEVEE_RENDER_PASS_SPECULAR_COLOR:
+ return data_.specular_color_id;
+ case EEVEE_RENDER_PASS_VOLUME_LIGHT:
+ return data_.volume_light_id;
+ case EEVEE_RENDER_PASS_EMIT:
+ return data_.emission_id;
+ case EEVEE_RENDER_PASS_ENVIRONMENT:
+ return data_.environment_id;
+ case EEVEE_RENDER_PASS_SHADOW:
+ return data_.shadow_id;
+ case EEVEE_RENDER_PASS_AO:
+ return data_.ambient_occlusion_id;
+ case EEVEE_RENDER_PASS_CRYPTOMATTE:
+ return -1; /* TODO */
+ case EEVEE_RENDER_PASS_VECTOR:
+ return data_.vector_id;
+ default:
+ return -1;
+ }
+ }
+
+ static const char *pass_to_render_pass_name(eViewLayerEEVEEPassType pass_type)
+ {
+ switch (pass_type) {
+ case EEVEE_RENDER_PASS_COMBINED:
+ return RE_PASSNAME_COMBINED;
+ case EEVEE_RENDER_PASS_Z:
+ return RE_PASSNAME_Z;
+ case EEVEE_RENDER_PASS_MIST:
+ return RE_PASSNAME_MIST;
+ case EEVEE_RENDER_PASS_NORMAL:
+ return RE_PASSNAME_NORMAL;
+ case EEVEE_RENDER_PASS_DIFFUSE_LIGHT:
+ return RE_PASSNAME_DIFFUSE_DIRECT;
+ case EEVEE_RENDER_PASS_DIFFUSE_COLOR:
+ return RE_PASSNAME_DIFFUSE_COLOR;
+ case EEVEE_RENDER_PASS_SPECULAR_LIGHT:
+ return RE_PASSNAME_GLOSSY_DIRECT;
+ case EEVEE_RENDER_PASS_SPECULAR_COLOR:
+ return RE_PASSNAME_GLOSSY_COLOR;
+ case EEVEE_RENDER_PASS_VOLUME_LIGHT:
+ return RE_PASSNAME_VOLUME_LIGHT;
+ case EEVEE_RENDER_PASS_EMIT:
+ return RE_PASSNAME_EMIT;
+ case EEVEE_RENDER_PASS_ENVIRONMENT:
+ return RE_PASSNAME_ENVIRONMENT;
+ case EEVEE_RENDER_PASS_SHADOW:
+ return RE_PASSNAME_SHADOW;
+ case EEVEE_RENDER_PASS_AO:
+ return RE_PASSNAME_AO;
+ case EEVEE_RENDER_PASS_CRYPTOMATTE:
+ BLI_assert_msg(0, "Cryptomatte is not implemented yet.");
+ return ""; /* TODO */
+ case EEVEE_RENDER_PASS_VECTOR:
+ return RE_PASSNAME_VECTOR;
+ default:
+ BLI_assert(0);
+ return "";
+ }
+ }
+
+ private:
+ void init_aovs();
+ void sync_mist();
+
+ /**
+ * Precompute sample weights if they are uniform across the whole film extent.
+ */
+ void update_sample_table();
+};
+
+/** \} */
+
+} // namespace blender::eevee
diff --git a/source/blender/draw/engines/eevee_next/eevee_instance.cc b/source/blender/draw/engines/eevee_next/eevee_instance.cc
index 606630bcdef..9f8cf6dc6ba 100644
--- a/source/blender/draw/engines/eevee_next/eevee_instance.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_instance.cc
@@ -17,6 +17,7 @@
#include "DNA_ID.h"
#include "DNA_lightprobe_types.h"
#include "DNA_modifier_types.h"
+#include "RE_pipeline.h"
#include "eevee_instance.hh"
@@ -43,7 +44,7 @@ void Instance::init(const int2 &output_res,
const View3D *v3d_,
const RegionView3D *rv3d_)
{
- UNUSED_VARS(light_probe_, output_rect);
+ UNUSED_VARS(light_probe_);
render = render_;
depsgraph = depsgraph_;
camera_orig_object = camera_object_;
@@ -56,7 +57,10 @@ void Instance::init(const int2 &output_res,
update_eval_members();
- main_view.init(output_res);
+ sampling.init(scene);
+ camera.init();
+ film.init(output_res, output_rect);
+ main_view.init();
}
void Instance::set_time(float time)
@@ -90,9 +94,14 @@ void Instance::begin_sync()
materials.begin_sync();
velocity.begin_sync();
+ gpencil_engine_enabled = false;
+
+ render_buffers.sync();
pipelines.sync();
main_view.sync();
world.sync();
+ camera.sync();
+ film.sync();
}
void Instance::object_sync(Object *ob)
@@ -146,13 +155,36 @@ void Instance::object_sync(Object *ob)
ob_handle.reset_recalc_flag();
}
+/* Wrapper to use with DRW_render_object_iter. */
+void Instance::object_sync_render(void *instance_,
+ Object *ob,
+ RenderEngine *engine,
+ Depsgraph *depsgraph)
+{
+ UNUSED_VARS(engine, depsgraph);
+ Instance &inst = *reinterpret_cast<Instance *>(instance_);
+ inst.object_sync(ob);
+}
+
void Instance::end_sync()
{
velocity.end_sync();
+ sampling.end_sync();
+ film.end_sync();
}
void Instance::render_sync()
{
+ DRW_cache_restart();
+
+ begin_sync();
+ DRW_render_object_iter(this, render, depsgraph, object_sync_render);
+ end_sync();
+
+ DRW_render_instance_buffer_finish();
+ /* Also we weed to have a correct FBO bound for #DRW_hair_update */
+ // GPU_framebuffer_bind();
+ // DRW_hair_update();
}
/** \} */
@@ -167,6 +199,18 @@ void Instance::render_sync()
**/
void Instance::render_sample()
{
+ if (sampling.finished_viewport()) {
+ film.display();
+ return;
+ }
+
+ /* Motion blur may need to do re-sync after a certain number of sample. */
+ if (!is_viewport() && sampling.do_render_sync()) {
+ render_sync();
+ }
+
+ sampling.step();
+
main_view.render();
}
@@ -178,7 +222,36 @@ void Instance::render_sample()
void Instance::render_frame(RenderLayer *render_layer, const char *view_name)
{
- UNUSED_VARS(render_layer, view_name);
+ while (!sampling.finished()) {
+ this->render_sample();
+ /* TODO(fclem) print progression. */
+ }
+
+ /* Read Results. */
+ eViewLayerEEVEEPassType pass_bits = film.enabled_passes_get();
+ for (auto i : IndexRange(EEVEE_RENDER_PASS_MAX_BIT)) {
+ eViewLayerEEVEEPassType pass_type = eViewLayerEEVEEPassType(pass_bits & (1 << i));
+ if (pass_type == 0) {
+ continue;
+ }
+
+ const char *pass_name = Film::pass_to_render_pass_name(pass_type);
+ RenderPass *rp = RE_pass_find_by_name(render_layer, pass_name, view_name);
+ if (rp) {
+ float *result = film.read_pass(pass_type);
+ if (result) {
+ std::cout << "read " << pass_name << std::endl;
+ BLI_mutex_lock(&render->update_render_passes_mutex);
+ /* WORKAROUND: We use texture read to avoid using a framebuffer to get the render result.
+ * However, on some implementation, we need a buffer with a few extra bytes for the read to
+ * happen correctly (see GLTexture::read()). So we need a custom memory allocation. */
+ /* Avoid memcpy(), replace the pointer directly. */
+ MEM_SAFE_FREE(rp->rect);
+ rp->rect = result;
+ BLI_mutex_unlock(&render->update_render_passes_mutex);
+ }
+ }
+ }
}
void Instance::draw_viewport(DefaultFramebufferList *dfbl)
@@ -187,6 +260,10 @@ void Instance::draw_viewport(DefaultFramebufferList *dfbl)
render_sample();
velocity.step_swap();
+ if (!sampling.finished_viewport()) {
+ DRW_viewport_request_redraw();
+ }
+
if (materials.queued_shaders_count > 0) {
std::stringstream ss;
ss << "Compiling Shaders " << materials.queued_shaders_count;
diff --git a/source/blender/draw/engines/eevee_next/eevee_instance.hh b/source/blender/draw/engines/eevee_next/eevee_instance.hh
index 84be59fc5f0..1efda769648 100644
--- a/source/blender/draw/engines/eevee_next/eevee_instance.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_instance.hh
@@ -16,8 +16,11 @@
#include "DRW_render.h"
#include "eevee_camera.hh"
+#include "eevee_film.hh"
#include "eevee_material.hh"
#include "eevee_pipeline.hh"
+#include "eevee_renderbuffers.hh"
+#include "eevee_sampling.hh"
#include "eevee_shader.hh"
#include "eevee_sync.hh"
#include "eevee_view.hh"
@@ -38,7 +41,10 @@ class Instance {
MaterialModule materials;
PipelineModule pipelines;
VelocityModule velocity;
+ Sampling sampling;
Camera camera;
+ Film film;
+ RenderBuffers render_buffers;
MainView main_view;
World world;
@@ -57,6 +63,9 @@ class Instance {
const View3D *v3d;
const RegionView3D *rv3d;
+ /** True if the grease pencil engine might be running. */
+ bool gpencil_engine_enabled;
+
/* Info string displayed at the top of the render / viewport. */
std::string info = "";
@@ -67,7 +76,10 @@ class Instance {
materials(*this),
pipelines(*this),
velocity(*this),
+ sampling(*this),
camera(*this),
+ film(*this),
+ render_buffers(*this),
main_view(*this),
world(*this){};
~Instance(){};
@@ -92,12 +104,17 @@ class Instance {
void draw_viewport(DefaultFramebufferList *dfbl);
- bool is_viewport(void)
+ bool is_viewport() const
+ {
+ return render == nullptr;
+ }
+
+ bool overlays_enabled() const
{
- return !DRW_state_is_scene_render();
+ return v3d && ((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0);
}
- bool use_scene_lights(void) const
+ bool use_scene_lights() const
{
return (!v3d) ||
((v3d->shading.type == OB_MATERIAL) &&
@@ -107,7 +124,7 @@ class Instance {
}
/* Light the scene using the selected HDRI in the viewport shading pop-over. */
- bool use_studio_light(void) const
+ bool use_studio_light() const
{
return (v3d) && (((v3d->shading.type == OB_MATERIAL) &&
((v3d->shading.flag & V3D_SHADING_SCENE_WORLD) == 0)) ||
@@ -116,6 +133,10 @@ class Instance {
}
private:
+ static void object_sync_render(void *instance_,
+ Object *ob,
+ RenderEngine *engine,
+ Depsgraph *depsgraph);
void render_sample();
void mesh_sync(Object *ob, ObjectHandle &ob_handle);
diff --git a/source/blender/draw/engines/eevee_next/eevee_material.cc b/source/blender/draw/engines/eevee_next/eevee_material.cc
index 1676c89d679..b3161a67092 100644
--- a/source/blender/draw/engines/eevee_next/eevee_material.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_material.cc
@@ -195,7 +195,7 @@ MaterialPass MaterialModule::material_pass_get(::Material *blender_mat,
BLI_assert(GPU_material_status(matpass.gpumat) == GPU_MAT_SUCCESS);
if (GPU_material_recalc_flag_get(matpass.gpumat)) {
- // inst_.sampling.reset();
+ inst_.sampling.reset();
}
if ((pipeline_type == MAT_PIPE_DEFERRED) &&
diff --git a/source/blender/draw/engines/eevee_next/eevee_pipeline.cc b/source/blender/draw/engines/eevee_next/eevee_pipeline.cc
index 33853eba06c..214fe9c7153 100644
--- a/source/blender/draw/engines/eevee_next/eevee_pipeline.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_pipeline.cc
@@ -24,6 +24,8 @@ namespace blender::eevee {
void WorldPipeline::sync(GPUMaterial *gpumat)
{
+ RenderBuffers &rbufs = inst_.render_buffers;
+
DRWState state = DRW_STATE_WRITE_COLOR;
world_ps_ = DRW_pass_create("World", state);
@@ -34,6 +36,20 @@ void WorldPipeline::sync(GPUMaterial *gpumat)
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, world_ps_);
DRW_shgroup_uniform_texture(grp, "utility_tx", inst_.pipelines.utility_tx);
DRW_shgroup_call_obmat(grp, DRW_cache_fullscreen_quad_get(), camera_mat.ptr());
+ DRW_shgroup_uniform_float_copy(grp, "world_opacity_fade", inst_.film.background_opacity_get());
+ /* AOVs. */
+ DRW_shgroup_uniform_image_ref(grp, "aov_color_img", &rbufs.aov_color_tx);
+ DRW_shgroup_uniform_image_ref(grp, "aov_value_img", &rbufs.aov_value_tx);
+ DRW_shgroup_storage_block_ref(grp, "aov_buf", &inst_.film.aovs_info);
+ /* RenderPasses. Cleared by background (even if bad practice). */
+ DRW_shgroup_uniform_image_ref(grp, "rp_normal_img", &rbufs.normal_tx);
+ DRW_shgroup_uniform_image_ref(grp, "rp_diffuse_light_img", &rbufs.diffuse_light_tx);
+ DRW_shgroup_uniform_image_ref(grp, "rp_diffuse_color_img", &rbufs.diffuse_color_tx);
+ DRW_shgroup_uniform_image_ref(grp, "rp_specular_light_img", &rbufs.specular_light_tx);
+ DRW_shgroup_uniform_image_ref(grp, "rp_specular_color_img", &rbufs.specular_color_tx);
+ DRW_shgroup_uniform_image_ref(grp, "rp_emission_img", &rbufs.emission_tx);
+ /* To allow opaque pass rendering over it. */
+ DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_IMAGE_ACCESS);
}
void WorldPipeline::render()
@@ -83,6 +99,7 @@ void ForwardPipeline::sync()
DRWShadingGroup *ForwardPipeline::material_opaque_add(::Material *blender_mat, GPUMaterial *gpumat)
{
+ RenderBuffers &rbufs = inst_.render_buffers;
DRWPass *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ? opaque_culled_ps_ : opaque_ps_;
// LightModule &lights = inst_.lights;
// LightProbeModule &lightprobes = inst_.lightprobes;
@@ -97,6 +114,18 @@ DRWShadingGroup *ForwardPipeline::material_opaque_add(::Material *blender_mat, G
// DRW_shgroup_uniform_texture_ref(grp, "lightprobe_grid_tx", lightprobes.grid_tx_ref_get());
// DRW_shgroup_uniform_texture_ref(grp, "lightprobe_cube_tx", lightprobes.cube_tx_ref_get());
DRW_shgroup_uniform_texture(grp, "utility_tx", inst_.pipelines.utility_tx);
+ /* AOVs. */
+ DRW_shgroup_uniform_image_ref(grp, "aov_color_img", &rbufs.aov_color_tx);
+ DRW_shgroup_uniform_image_ref(grp, "aov_value_img", &rbufs.aov_value_tx);
+ DRW_shgroup_storage_block_ref(grp, "aov_buf", &inst_.film.aovs_info);
+ /* RenderPasses. */
+ DRW_shgroup_uniform_image_ref(grp, "rp_normal_img", &rbufs.normal_tx);
+ DRW_shgroup_uniform_image_ref(grp, "rp_diffuse_light_img", &rbufs.diffuse_light_tx);
+ DRW_shgroup_uniform_image_ref(grp, "rp_diffuse_color_img", &rbufs.diffuse_color_tx);
+ DRW_shgroup_uniform_image_ref(grp, "rp_specular_light_img", &rbufs.specular_light_tx);
+ DRW_shgroup_uniform_image_ref(grp, "rp_specular_color_img", &rbufs.specular_color_tx);
+ DRW_shgroup_uniform_image_ref(grp, "rp_emission_img", &rbufs.emission_tx);
+
/* TODO(fclem): Make this only needed if material uses it ... somehow. */
// if (true) {
// DRW_shgroup_uniform_texture_ref(
diff --git a/source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc b/source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc
new file mode 100644
index 00000000000..c60054496c1
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/eevee_renderbuffers.cc
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2021 Blender Foundation.
+ */
+
+/** \file
+ * \ingroup eevee
+ *
+ * A film is a fullscreen buffer (usually at output extent)
+ * that will be able to accumulate sample in any distorted camera_type
+ * using a pixel filter.
+ *
+ * Input needs to be jittered so that the filter converges to the right result.
+ */
+
+#include "BLI_rect.h"
+
+#include "GPU_framebuffer.h"
+#include "GPU_texture.h"
+
+#include "DRW_render.h"
+
+#include "eevee_film.hh"
+#include "eevee_instance.hh"
+
+namespace blender::eevee {
+
+void RenderBuffers::sync()
+{
+ depth_tx.sync();
+ combined_tx.sync();
+
+ normal_tx.sync();
+ vector_tx.sync();
+ diffuse_light_tx.sync();
+ diffuse_color_tx.sync();
+ specular_light_tx.sync();
+ specular_color_tx.sync();
+ volume_light_tx.sync();
+ emission_tx.sync();
+ environment_tx.sync();
+ shadow_tx.sync();
+ ambient_occlusion_tx.sync();
+}
+
+void RenderBuffers::acquire(int2 extent, void *owner)
+{
+ auto pass_extent = [&](eViewLayerEEVEEPassType pass_bit) -> int2 {
+ /* Use dummy texture for disabled passes. Allows correct bindings. */
+ return (inst_.film.enabled_passes_get() & pass_bit) ? extent : int2(1);
+ };
+
+ eGPUTextureFormat color_format = GPU_RGBA16F;
+ eGPUTextureFormat float_format = GPU_R16F;
+
+ /* Depth and combined are always needed. */
+ depth_tx.acquire(extent, GPU_DEPTH24_STENCIL8, owner);
+ combined_tx.acquire(extent, color_format, owner);
+
+ bool do_vector_render_pass = inst_.film.enabled_passes_get() & EEVEE_RENDER_PASS_VECTOR;
+ /* Only RG16F when only doing only reprojection or motion blur. */
+ eGPUTextureFormat vector_format = do_vector_render_pass ? GPU_RGBA16F : GPU_RG16F;
+ /* TODO(fclem): Make vector pass allocation optional if no TAA or motion blur is needed. */
+ vector_tx.acquire(extent, vector_format, owner);
+
+ normal_tx.acquire(pass_extent(EEVEE_RENDER_PASS_NORMAL), color_format, owner);
+ diffuse_light_tx.acquire(pass_extent(EEVEE_RENDER_PASS_DIFFUSE_LIGHT), color_format, owner);
+ diffuse_color_tx.acquire(pass_extent(EEVEE_RENDER_PASS_DIFFUSE_COLOR), color_format, owner);
+ specular_light_tx.acquire(pass_extent(EEVEE_RENDER_PASS_SPECULAR_LIGHT), color_format, owner);
+ specular_color_tx.acquire(pass_extent(EEVEE_RENDER_PASS_SPECULAR_COLOR), color_format, owner);
+ volume_light_tx.acquire(pass_extent(EEVEE_RENDER_PASS_VOLUME_LIGHT), color_format, owner);
+ emission_tx.acquire(pass_extent(EEVEE_RENDER_PASS_EMIT), color_format, owner);
+ environment_tx.acquire(pass_extent(EEVEE_RENDER_PASS_ENVIRONMENT), color_format, owner);
+ shadow_tx.acquire(pass_extent(EEVEE_RENDER_PASS_SHADOW), float_format, owner);
+ ambient_occlusion_tx.acquire(pass_extent(EEVEE_RENDER_PASS_AO), float_format, owner);
+
+ const AOVsInfoData &aovs = inst_.film.aovs_info;
+ aov_color_tx.ensure_2d_array(
+ color_format, (aovs.color_len > 0) ? extent : int2(1), max_ii(1, aovs.color_len));
+ aov_value_tx.ensure_2d_array(
+ float_format, (aovs.value_len > 0) ? extent : int2(1), max_ii(1, aovs.value_len));
+}
+
+void RenderBuffers::release()
+{
+ depth_tx.release();
+ combined_tx.release();
+
+ normal_tx.release();
+ vector_tx.release();
+ diffuse_light_tx.release();
+ diffuse_color_tx.release();
+ specular_light_tx.release();
+ specular_color_tx.release();
+ volume_light_tx.release();
+ emission_tx.release();
+ environment_tx.release();
+ shadow_tx.release();
+ ambient_occlusion_tx.release();
+}
+
+} // namespace blender::eevee
diff --git a/source/blender/draw/engines/eevee_next/eevee_renderbuffers.hh b/source/blender/draw/engines/eevee_next/eevee_renderbuffers.hh
new file mode 100644
index 00000000000..8c91fed2f0f
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/eevee_renderbuffers.hh
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2022 Blender Foundation.
+ */
+
+/** \file
+ * \ingroup eevee
+ *
+ * Render buffers are textures that are filled during a view rendering.
+ * Their content is then added to the accumulation buffers of the film class.
+ * They are short lived and can be reused when doing multi view rendering.
+ */
+
+#pragma once
+
+#include "DRW_render.h"
+
+#include "eevee_shader_shared.hh"
+
+namespace blender::eevee {
+
+class Instance;
+
+class RenderBuffers {
+ public:
+ TextureFromPool depth_tx;
+ TextureFromPool combined_tx;
+
+ // TextureFromPool mist_tx; /* Derived from depth_tx during accumulation. */
+ TextureFromPool normal_tx;
+ TextureFromPool vector_tx;
+ TextureFromPool diffuse_light_tx;
+ TextureFromPool diffuse_color_tx;
+ TextureFromPool specular_light_tx;
+ TextureFromPool specular_color_tx;
+ TextureFromPool volume_light_tx;
+ TextureFromPool emission_tx;
+ TextureFromPool environment_tx;
+ TextureFromPool shadow_tx;
+ TextureFromPool ambient_occlusion_tx;
+ // TextureFromPool cryptomatte_tx; /* TODO */
+ /* TODO(fclem): Use texture from pool once they support texture array. */
+ Texture aov_color_tx;
+ Texture aov_value_tx;
+
+ private:
+ Instance &inst_;
+
+ public:
+ RenderBuffers(Instance &inst) : inst_(inst){};
+
+ void sync();
+ /* Acquires (also ensures) the render buffer before rendering to them. */
+ void acquire(int2 extent, void *owner);
+ void release();
+};
+
+} // namespace blender::eevee
diff --git a/source/blender/draw/engines/eevee_next/eevee_sampling.cc b/source/blender/draw/engines/eevee_next/eevee_sampling.cc
new file mode 100644
index 00000000000..1d320c75f16
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/eevee_sampling.cc
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2021 Blender Foundation.
+ */
+
+/** \file
+ * \ingroup eevee
+ *
+ * Random number generator, contains persistent state and sample count logic.
+ */
+
+#include "BLI_rand.h"
+
+#include "eevee_instance.hh"
+#include "eevee_sampling.hh"
+
+namespace blender::eevee {
+
+/* -------------------------------------------------------------------- */
+/** \name Sampling
+ * \{ */
+
+void Sampling::init(const Scene *scene)
+{
+ sample_count_ = inst_.is_viewport() ? scene->eevee.taa_samples : scene->eevee.taa_render_samples;
+
+ if (sample_count_ == 0) {
+ BLI_assert(inst_.is_viewport());
+ sample_count_ = infinite_sample_count_;
+ }
+
+ motion_blur_steps_ = !inst_.is_viewport() ? scene->eevee.motion_blur_steps : 1;
+ sample_count_ = divide_ceil_u(sample_count_, motion_blur_steps_);
+
+ if (scene->eevee.flag & SCE_EEVEE_DOF_JITTER) {
+ if (sample_count_ == infinite_sample_count_) {
+ /* Special case for viewport continuous rendering. We clamp to a max sample
+ * to avoid the jittered dof never converging. */
+ dof_ring_count_ = 6;
+ }
+ else {
+ dof_ring_count_ = sampling_web_ring_count_get(dof_web_density_, sample_count_);
+ }
+ dof_sample_count_ = sampling_web_sample_count_get(dof_web_density_, dof_ring_count_);
+ /* Change total sample count to fill the web pattern entirely. */
+ sample_count_ = divide_ceil_u(sample_count_, dof_sample_count_) * dof_sample_count_;
+ }
+ else {
+ dof_ring_count_ = 0;
+ dof_sample_count_ = 1;
+ }
+
+ /* Only multiply after to have full the full DoF web pattern for each time steps. */
+ sample_count_ *= motion_blur_steps_;
+}
+
+void Sampling::end_sync()
+{
+ if (reset_) {
+ viewport_sample_ = 0;
+ }
+
+ if (inst_.is_viewport()) {
+
+ interactive_mode_ = viewport_sample_ < interactive_mode_threshold;
+
+ bool interactive_mode_disabled = (inst_.scene->eevee.flag & SCE_EEVEE_TAA_REPROJECTION) == 0;
+ if (interactive_mode_disabled) {
+ interactive_mode_ = false;
+ sample_ = viewport_sample_;
+ }
+ else if (interactive_mode_) {
+ int interactive_sample_count = min_ii(interactive_sample_max_, sample_count_);
+
+ if (viewport_sample_ < interactive_sample_count) {
+ /* Loop over the same starting samples. */
+ sample_ = sample_ % interactive_sample_count;
+ }
+ else {
+ /* Break out of the loop and resume normal pattern. */
+ sample_ = interactive_sample_count;
+ }
+ }
+ }
+}
+
+void Sampling::step()
+{
+ {
+ /* TODO(fclem) we could use some persistent states to speedup the computation. */
+ double2 r, offset = {0, 0};
+ /* Using 2,3 primes as per UE4 Temporal AA presentation.
+ * http://advances.realtimerendering.com/s2014/epic/TemporalAA.pptx (slide 14) */
+ uint2 primes = {2, 3};
+ BLI_halton_2d(primes, offset, sample_ + 1, r);
+ /* WORKAROUND: We offset the distribution to make the first sample (0,0). This way, we are
+ * assured that at least one of the samples inside the TAA rotation will match the one from the
+ * draw manager. This makes sure overlays are correctly composited in static scene. */
+ data_.dimensions[SAMPLING_FILTER_U] = fractf(r[0] + (1.0 / 2.0));
+ data_.dimensions[SAMPLING_FILTER_V] = fractf(r[1] + (2.0 / 3.0));
+ /* TODO de-correlate. */
+ data_.dimensions[SAMPLING_TIME] = r[0];
+ data_.dimensions[SAMPLING_CLOSURE] = r[1];
+ data_.dimensions[SAMPLING_RAYTRACE_X] = r[0];
+ }
+ {
+ double2 r, offset = {0, 0};
+ uint2 primes = {5, 7};
+ BLI_halton_2d(primes, offset, sample_ + 1, r);
+ data_.dimensions[SAMPLING_LENS_U] = r[0];
+ data_.dimensions[SAMPLING_LENS_V] = r[1];
+ /* TODO de-correlate. */
+ data_.dimensions[SAMPLING_LIGHTPROBE] = r[0];
+ data_.dimensions[SAMPLING_TRANSPARENCY] = r[1];
+ }
+ {
+ /* Using leaped Halton sequence so we can reused the same primes as lens. */
+ double3 r, offset = {0, 0, 0};
+ uint64_t leap = 11;
+ uint3 primes = {5, 4, 7};
+ BLI_halton_3d(primes, offset, sample_ * leap, r);
+ data_.dimensions[SAMPLING_SHADOW_U] = r[0];
+ data_.dimensions[SAMPLING_SHADOW_V] = r[1];
+ data_.dimensions[SAMPLING_SHADOW_W] = r[2];
+ /* TODO de-correlate. */
+ data_.dimensions[SAMPLING_RAYTRACE_U] = r[0];
+ data_.dimensions[SAMPLING_RAYTRACE_V] = r[1];
+ data_.dimensions[SAMPLING_RAYTRACE_W] = r[2];
+ }
+ {
+ /* Using leaped Halton sequence so we can reused the same primes. */
+ double2 r, offset = {0, 0};
+ uint64_t leap = 5;
+ uint2 primes = {2, 3};
+ BLI_halton_2d(primes, offset, sample_ * leap, r);
+ data_.dimensions[SAMPLING_SHADOW_X] = r[0];
+ data_.dimensions[SAMPLING_SHADOW_Y] = r[1];
+ /* TODO de-correlate. */
+ data_.dimensions[SAMPLING_SSS_U] = r[0];
+ data_.dimensions[SAMPLING_SSS_V] = r[1];
+ }
+
+ data_.push_update();
+
+ viewport_sample_++;
+ sample_++;
+
+ reset_ = false;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Sampling patterns
+ * \{ */
+
+float3 Sampling::sample_ball(const float3 &rand)
+{
+ float3 sample;
+ sample.z = rand.x * 2.0f - 1.0f; /* cos theta */
+
+ float r = sqrtf(fmaxf(0.0f, 1.0f - square_f(sample.z))); /* sin theta */
+
+ float omega = rand.y * 2.0f * M_PI;
+ sample.x = r * cosf(omega);
+ sample.y = r * sinf(omega);
+
+ sample *= sqrtf(sqrtf(rand.z));
+ return sample;
+}
+
+float2 Sampling::sample_disk(const float2 &rand)
+{
+ float omega = rand.y * 2.0f * M_PI;
+ return sqrtf(rand.x) * float2(cosf(omega), sinf(omega));
+}
+
+float2 Sampling::sample_spiral(const float2 &rand)
+{
+ /* Fibonacci spiral. */
+ float omega = 4.0f * M_PI * (1.0f + sqrtf(5.0f)) * rand.x;
+ float r = sqrtf(rand.x);
+ /* Random rotation. */
+ omega += rand.y * 2.0f * M_PI;
+ return r * float2(cosf(omega), sinf(omega));
+}
+
+void Sampling::dof_disk_sample_get(float *r_radius, float *r_theta) const
+{
+ if (dof_ring_count_ == 0) {
+ *r_radius = *r_theta = 0.0f;
+ return;
+ }
+
+ int s = sample_ - 1;
+ int ring = 0;
+ int ring_sample_count = 1;
+ int ring_sample = 1;
+
+ s = s * (dof_web_density_ - 1);
+ s = s % dof_sample_count_;
+
+ /* Choosing sample to we get faster convergence.
+ * The issue here is that we cannot map a low discrepancy sequence to this sampling pattern
+ * because the same sample could be chosen twice in relatively short intervals. */
+ /* For now just use an ascending sequence with an offset. This gives us relatively quick
+ * initial coverage and relatively high distance between samples. */
+ /* TODO(@fclem) We can try to order samples based on a LDS into a table to avoid duplicates.
+ * The drawback would be some memory consumption and initialize time. */
+ int samples_passed = 1;
+ while (s >= samples_passed) {
+ ring++;
+ ring_sample_count = ring * dof_web_density_;
+ ring_sample = s - samples_passed;
+ ring_sample = (ring_sample + 1) % ring_sample_count;
+ samples_passed += ring_sample_count;
+ }
+
+ *r_radius = ring / (float)dof_ring_count_;
+ *r_theta = 2.0f * M_PI * ring_sample / (float)ring_sample_count;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Cumulative Distribution Function (CDF)
+ * \{ */
+
+/* Creates a discrete cumulative distribution function table from a given curvemapping.
+ * Output cdf vector is expected to already be sized according to the wanted resolution. */
+void Sampling::cdf_from_curvemapping(const CurveMapping &curve, Vector<float> &cdf)
+{
+ BLI_assert(cdf.size() > 1);
+ cdf[0] = 0.0f;
+ /* Actual CDF evaluation. */
+ for (int u : cdf.index_range()) {
+ float x = (float)(u + 1) / (float)(cdf.size() - 1);
+ cdf[u + 1] = cdf[u] + BKE_curvemapping_evaluateF(&curve, 0, x);
+ }
+ /* Normalize the CDF. */
+ for (int u : cdf.index_range()) {
+ cdf[u] /= cdf.last();
+ }
+ /* Just to make sure. */
+ cdf.last() = 1.0f;
+}
+
+/* Inverts a cumulative distribution function.
+ * Output vector is expected to already be sized according to the wanted resolution. */
+void Sampling::cdf_invert(Vector<float> &cdf, Vector<float> &inverted_cdf)
+{
+ for (int u : inverted_cdf.index_range()) {
+ float x = (float)u / (float)(inverted_cdf.size() - 1);
+ for (int i : cdf.index_range()) {
+ if (i == cdf.size() - 1) {
+ inverted_cdf[u] = 1.0f;
+ }
+ else if (cdf[i] >= x) {
+ float t = (x - cdf[i]) / (cdf[i + 1] - cdf[i]);
+ inverted_cdf[u] = ((float)i + t) / (float)(cdf.size() - 1);
+ break;
+ }
+ }
+ }
+}
+
+/** \} */
+
+} // namespace blender::eevee
diff --git a/source/blender/draw/engines/eevee_next/eevee_sampling.hh b/source/blender/draw/engines/eevee_next/eevee_sampling.hh
new file mode 100644
index 00000000000..c604ecef40b
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/eevee_sampling.hh
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2021 Blender Foundation.
+ */
+
+/** \file
+ * \ingroup eevee
+ *
+ * Random number generator, contains persistent state and sample count logic.
+ */
+
+#pragma once
+
+#include "BKE_colortools.h"
+#include "BLI_system.h"
+#include "BLI_vector.hh"
+#include "DNA_scene_types.h"
+#include "DRW_render.h"
+
+#include "eevee_shader_shared.hh"
+
+namespace blender::eevee {
+
+class Instance;
+
+class Sampling {
+ private:
+ Instance &inst_;
+
+ /* Number of samples in the first ring of jittered depth of field. */
+ constexpr static uint64_t dof_web_density_ = 6;
+ /* High number of sample for viewport infinite rendering. */
+ constexpr static uint64_t infinite_sample_count_ = 0xFFFFFFu;
+ /* During interactive rendering, loop over the first few samples. */
+ constexpr static uint64_t interactive_sample_max_ = 8;
+
+ /** 0 based current sample. Might not increase sequentially in viewport. */
+ uint64_t sample_ = 0;
+ /** Target sample count. */
+ uint64_t sample_count_ = 64;
+ /** Number of ring in the web pattern of the jittered Depth of Field. */
+ uint64_t dof_ring_count_ = 0;
+ /** Number of samples in the web pattern of the jittered Depth of Field. */
+ uint64_t dof_sample_count_ = 1;
+ /** Motion blur steps. */
+ uint64_t motion_blur_steps_ = 1;
+ /** Increases if the view and the scene is static. Does increase sequentially. */
+ int64_t viewport_sample_ = 0;
+ /** Tag to reset sampling for the next sample. */
+ bool reset_ = false;
+ /**
+ * Switch between interactive and static accumulation.
+ * In interactive mode, image stability is prioritized over quality.
+ */
+ bool interactive_mode_ = false;
+ /**
+ * Sample count after which we use the static accumulation.
+ * Interactive sampling from sample 0 to (interactive_mode_threshold - 1).
+ * Accumulation sampling from sample interactive_mode_threshold to sample_count_.
+ */
+ static constexpr int interactive_mode_threshold = 3;
+
+ SamplingDataBuf data_;
+
+ public:
+ Sampling(Instance &inst) : inst_(inst){};
+ ~Sampling(){};
+
+ void init(const Scene *scene);
+ void end_sync();
+ void step();
+
+ /* Viewport Only: Function to call to notify something in the scene changed.
+ * This will reset accumulation. Do not call after end_sync() or during sample rendering. */
+ void reset()
+ {
+ reset_ = true;
+ }
+
+ /* Viewport Only: true if an update happened in the scene and accumulation needs reset. */
+ bool is_reset() const
+ {
+ return reset_;
+ }
+
+ void bind_resources(DRWShadingGroup *grp)
+ {
+ DRW_shgroup_storage_block_ref(grp, "sampling_buf", &data_);
+ }
+
+ /* Returns a pseudo random number in [0..1] range. Each dimension are de-correlated. */
+ float rng_get(eSamplingDimension dimension) const
+ {
+ return data_.dimensions[dimension];
+ }
+
+ /* Returns a pseudo random number in [0..1] range. Each dimension are de-correlated. */
+ float2 rng_2d_get(eSamplingDimension starting_dimension) const
+ {
+ return *reinterpret_cast<const float2 *>(&data_.dimensions[starting_dimension]);
+ }
+
+ /* Returns a pseudo random number in [0..1] range. Each dimension are de-correlated. */
+ float3 rng_3d_get(eSamplingDimension starting_dimension) const
+ {
+ return *reinterpret_cast<const float3 *>(&data_.dimensions[starting_dimension]);
+ }
+
+ /* Returns true if rendering has finished. */
+ bool finished() const
+ {
+ return (sample_ >= sample_count_);
+ }
+
+ /* Returns true if viewport smoothing and sampling has finished. */
+ bool finished_viewport() const
+ {
+ return (viewport_sample_ >= sample_count_) && !interactive_mode_;
+ }
+
+ /* Returns true if viewport renderer is in interactive mode and should use TAA. */
+ bool interactive_mode() const
+ {
+ return interactive_mode_;
+ }
+
+ uint64_t sample_count() const
+ {
+ return sample_count_;
+ }
+
+ /* Return true if we are starting a new motion blur step. We need to run sync again since
+ * depsgraph was updated by MotionBlur::step(). */
+ bool do_render_sync() const
+ {
+ return ((sample_ % (sample_count_ / motion_blur_steps_)) == 0);
+ }
+
+ /**
+ * Special ball distribution:
+ * Point are distributed in a way that when they are orthogonally
+ * projected into any plane, the resulting distribution is (close to)
+ * a uniform disc distribution.
+ * \a rand is 3 random float in the [0..1] range.
+ * Returns point in a ball of radius 1 and centered on the origin.
+ */
+ static float3 sample_ball(const float3 &rand);
+
+ /**
+ * Uniform disc distribution.
+ * \a rand is 2 random float in the [0..1] range.
+ * Returns point in a disk of radius 1 and centered on the origin.
+ */
+ static float2 sample_disk(const float2 &rand);
+
+ /**
+ * Uniform disc distribution using Fibonacci spiral sampling.
+ * \a rand is 2 random float in the [0..1] range.
+ * Returns point in a disk of radius 1 and centered on the origin.
+ */
+ static float2 sample_spiral(const float2 &rand);
+
+ /**
+ * Special RNG for depth of field.
+ * Returns \a radius and \a theta angle offset to apply to the web sampling pattern.
+ */
+ void dof_disk_sample_get(float *r_radius, float *r_theta) const;
+
+ /**
+ * Returns sample count inside the jittered depth of field web pattern.
+ */
+ uint64_t dof_ring_count_get() const
+ {
+ return dof_ring_count_;
+ }
+
+ /**
+ * Returns sample count inside the jittered depth of field web pattern.
+ */
+ uint64_t dof_sample_count_get() const
+ {
+ return dof_sample_count_;
+ }
+
+ /* Cumulative Distribution Function Utils. */
+ static void cdf_from_curvemapping(const CurveMapping &curve, Vector<float> &cdf);
+ static void cdf_invert(Vector<float> &cdf, Vector<float> &inverted_cdf);
+};
+
+} // namespace blender::eevee
diff --git a/source/blender/draw/engines/eevee_next/eevee_shader.cc b/source/blender/draw/engines/eevee_next/eevee_shader.cc
index 09aa97e49e9..7db9692783a 100644
--- a/source/blender/draw/engines/eevee_next/eevee_shader.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_shader.cc
@@ -78,8 +78,10 @@ ShaderModule::~ShaderModule()
const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_type)
{
switch (shader_type) {
- case VELOCITY_RESOLVE:
- return "eevee_velocity_resolve";
+ case FILM_FRAG:
+ return "eevee_film_frag";
+ case FILM_COMP:
+ return "eevee_film_comp";
/* To avoid compiler warning about missing case. */
case MAX_SHADER_TYPE:
return "";
@@ -161,7 +163,6 @@ void ShaderModule::material_create_info_ammend(GPUMaterial *gpumat, GPUCodegenOu
}
}
info.vertex_inputs_.clear();
- info.additional_info("draw_curves_infos");
break;
case MAT_GEOM_WORLD:
/**
diff --git a/source/blender/draw/engines/eevee_next/eevee_shader.hh b/source/blender/draw/engines/eevee_next/eevee_shader.hh
index 0f42e880a10..280aaab4e1c 100644
--- a/source/blender/draw/engines/eevee_next/eevee_shader.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_shader.hh
@@ -26,7 +26,8 @@ namespace blender::eevee {
/* Keep alphabetical order and clean prefix. */
enum eShaderType {
- VELOCITY_RESOLVE = 0,
+ FILM_FRAG = 0,
+ FILM_COMP,
MAX_SHADER_TYPE,
};
diff --git a/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh b/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh
index eb409f076f3..3c10f633740 100644
--- a/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_shader_shared.hh
@@ -12,7 +12,7 @@
# include "BLI_memory_utils.hh"
# include "DRW_gpu_wrapper.hh"
-// # include "eevee_defines.hh"
+# include "eevee_defines.hh"
# include "GPU_shader_shared.h"
@@ -28,6 +28,63 @@ using draw::TextureFromPool;
#define UBO_MIN_MAX_SUPPORTED_SIZE 1 << 14
/* -------------------------------------------------------------------- */
+/** \name Sampling
+ * \{ */
+
+enum eSamplingDimension : uint32_t {
+ SAMPLING_FILTER_U = 0u,
+ SAMPLING_FILTER_V = 1u,
+ SAMPLING_LENS_U = 2u,
+ SAMPLING_LENS_V = 3u,
+ SAMPLING_TIME = 4u,
+ SAMPLING_SHADOW_U = 5u,
+ SAMPLING_SHADOW_V = 6u,
+ SAMPLING_SHADOW_W = 7u,
+ SAMPLING_SHADOW_X = 8u,
+ SAMPLING_SHADOW_Y = 9u,
+ SAMPLING_CLOSURE = 10u,
+ SAMPLING_LIGHTPROBE = 11u,
+ SAMPLING_TRANSPARENCY = 12u,
+ SAMPLING_SSS_U = 13u,
+ SAMPLING_SSS_V = 14u,
+ SAMPLING_RAYTRACE_U = 15u,
+ SAMPLING_RAYTRACE_V = 16u,
+ SAMPLING_RAYTRACE_W = 17u,
+ SAMPLING_RAYTRACE_X = 18u
+};
+
+/**
+ * IMPORTANT: Make sure the array can contain all sampling dimensions.
+ * Also note that it needs to be multiple of 4.
+ */
+#define SAMPLING_DIMENSION_COUNT 20
+
+/* NOTE(@fclem): Needs to be used in #StorageBuffer because of arrays of scalar. */
+struct SamplingData {
+ /** Array containing random values from Low Discrepancy Sequence in [0..1) range. */
+ float dimensions[SAMPLING_DIMENSION_COUNT];
+};
+BLI_STATIC_ASSERT_ALIGN(SamplingData, 16)
+
+/* Returns total sample count in a web pattern of the given size. */
+static inline int sampling_web_sample_count_get(int web_density, int ring_count)
+{
+ return ((ring_count * ring_count + ring_count) / 2) * web_density + 1;
+}
+
+/* Returns lowest possible ring count that contains at least sample_count samples. */
+static inline int sampling_web_ring_count_get(int web_density, int sample_count)
+{
+ /* Inversion of web_sample_count_get(). */
+ float x = 2.0f * (float(sample_count) - 1.0f) / float(web_density);
+ /* Solving polynomial. We only search positive solution. */
+ float discriminant = 1.0f + 4.0f * x;
+ return int(ceilf(0.5f * (sqrtf(discriminant) - 1.0f)));
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
/** \name Camera
* \{ */
@@ -65,15 +122,148 @@ struct CameraData {
/** Clipping distances. */
float clip_near;
float clip_far;
- /** Film pixel filter radius. */
- float filter_size;
eCameraType type;
+
+ bool initialized;
+
+#ifdef __cplusplus
+ /* Small constructor to allow detecting new buffers. */
+ CameraData() : initialized(false){};
+#endif
};
BLI_STATIC_ASSERT_ALIGN(CameraData, 16)
/** \} */
/* -------------------------------------------------------------------- */
+/** \name Film
+ * \{ */
+
+#define FILM_PRECOMP_SAMPLE_MAX 16
+
+struct FilmSample {
+ int2 texel;
+ float weight;
+ /** Used for accumulation. */
+ float weight_sum_inv;
+};
+BLI_STATIC_ASSERT_ALIGN(FilmSample, 16)
+
+struct FilmData {
+ /** Size of the film in pixels. */
+ int2 extent;
+ /** Offset of the film in the full-res frame, in pixels. */
+ int2 offset;
+ /** Extent used by the render buffers when rendering the main views. */
+ int2 render_extent;
+ /** Sub-pixel offset applied to the window matrix.
+ * NOTE: In final film pixel unit.
+ * NOTE: Positive values makes the view translate in the negative axes direction.
+ * NOTE: The origin is the center of the lower left film pixel of the area covered by a render
+ * pixel if using scaled resolution rendering.
+ */
+ float2 subpixel_offset;
+ /** Scaling factor to convert texel to uvs. */
+ float2 extent_inv;
+ /** Is true if history is valid and can be sampled. Bypass history to resets accumulation. */
+ bool1 use_history;
+ /** Is true if combined buffer is valid and can be re-projected to reduce variance. */
+ bool1 use_reprojection;
+ /** Is true if accumulation of non-filtered passes is needed. */
+ bool1 has_data;
+ /** Is true if accumulation of filtered passes is needed. */
+ bool1 any_render_pass_1;
+ bool1 any_render_pass_2;
+ /** Controlled by user in lookdev mode or by render settings. */
+ float background_opacity;
+ float _pad0;
+ /** Output counts per type. */
+ int color_len, value_len;
+ /** Index in color_accum_img or value_accum_img of each pass. -1 if pass is not enabled. */
+ int mist_id;
+ int normal_id;
+ int vector_id;
+ int diffuse_light_id;
+ int diffuse_color_id;
+ int specular_light_id;
+ int specular_color_id;
+ int volume_light_id;
+ int emission_id;
+ int environment_id;
+ int shadow_id;
+ int ambient_occlusion_id;
+ /** Not indexed but still not -1 if enabled. */
+ int depth_id;
+ int combined_id;
+ /** Id of the render-pass to be displayed. -1 for combined. */
+ int display_id;
+ /** True if the render-pass to be displayed is from the value accum buffer. */
+ bool1 display_is_value;
+ /** True if we bypass the accumulation and directly output the accumulation buffer. */
+ bool1 display_only;
+ /** Start of AOVs and number of aov. */
+ int aov_color_id, aov_color_len;
+ int aov_value_id, aov_value_len;
+ /** Settings to render mist pass */
+ float mist_scale, mist_bias, mist_exponent;
+ /** Scene exposure used for better noise reduction. */
+ float exposure_scale;
+ /** Scaling factor for scaled resolution rendering. */
+ int scaling_factor;
+ /** Film pixel filter radius. */
+ float filter_radius;
+ /** Precomputed samples. First in the table is the closest one. The rest is unordered. */
+ int samples_len;
+ /** Sum of the weights of all samples in the sample table. */
+ float samples_weight_total;
+ FilmSample samples[FILM_PRECOMP_SAMPLE_MAX];
+};
+BLI_STATIC_ASSERT_ALIGN(FilmData, 16)
+
+static inline float film_filter_weight(float filter_radius, float sample_distance_sqr)
+{
+#if 1 /* Faster */
+ /* Gaussian fitted to Blackman-Harris. */
+ float r = sample_distance_sqr / (filter_radius * filter_radius);
+ const float sigma = 0.284;
+ const float fac = -0.5 / (sigma * sigma);
+ float weight = expf(fac * r);
+#else
+ /* Blackman-Harris filter. */
+ float r = M_2PI * saturate(0.5 + sqrtf(sample_distance_sqr) / (2.0 * filter_radius));
+ float weight = 0.35875 - 0.48829 * cosf(r) + 0.14128 * cosf(2.0 * r) - 0.01168 * cosf(3.0 * r);
+#endif
+ return weight;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Arbitrary Output Variables
+ * \{ */
+
+/* Theoretical max is 128 as we are using texture array and VRAM usage.
+ * However, the output_aov() function perform a linear search inside all the hashes.
+ * If we find a way to avoid this we could bump this number up. */
+#define AOV_MAX 16
+
+/* NOTE(@fclem): Needs to be used in #StorageBuffer because of arrays of scalar. */
+struct AOVsInfoData {
+ uint hash_value[AOV_MAX];
+ uint hash_color[AOV_MAX];
+ /* Length of used data. */
+ uint color_len;
+ uint value_len;
+ /** Id of the AOV to be displayed (from the start of the AOV array). -1 for combined. */
+ int display_id;
+ /** True if the AOV to be displayed is from the value accum buffer. */
+ bool1 display_is_value;
+};
+BLI_STATIC_ASSERT_ALIGN(AOVsInfoData, 16)
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
/** \name VelocityModule
* \{ */
@@ -178,10 +368,13 @@ float4 utility_tx_sample(sampler2DArray util_tx, float2 uv, float layer)
#ifdef __cplusplus
+using AOVsInfoDataBuf = draw::StorageBuffer<AOVsInfoData>;
using CameraDataBuf = draw::UniformBuffer<CameraData>;
+using FilmDataBuf = draw::UniformBuffer<FilmData>;
+using SamplingDataBuf = draw::StorageBuffer<SamplingData>;
+using VelocityGeometryBuf = draw::StorageArrayBuffer<float4, 16, true>;
using VelocityIndexBuf = draw::StorageArrayBuffer<VelocityIndex, 16>;
using VelocityObjectBuf = draw::StorageArrayBuffer<float4x4, 16>;
-using VelocityGeometryBuf = draw::StorageArrayBuffer<float4, 16, true>;
} // namespace blender::eevee
#endif
diff --git a/source/blender/draw/engines/eevee_next/eevee_sync.cc b/source/blender/draw/engines/eevee_next/eevee_sync.cc
index 42af251d770..e2d4b0ac1c2 100644
--- a/source/blender/draw/engines/eevee_next/eevee_sync.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_sync.cc
@@ -47,7 +47,7 @@ ObjectHandle &SyncModule::sync_object(Object *ob)
const int recalc_flags = ID_RECALC_COPY_ON_WRITE | ID_RECALC_TRANSFORM | ID_RECALC_SHADING |
ID_RECALC_GEOMETRY;
if ((eevee_dd.recalc & recalc_flags) != 0) {
- // inst_.sampling.reset();
+ inst_.sampling.reset();
UNUSED_VARS(inst_);
}
@@ -63,7 +63,7 @@ WorldHandle &SyncModule::sync_world(::World *world)
const int recalc_flags = ID_RECALC_ALL;
if ((eevee_dd.recalc & recalc_flags) != 0) {
- // inst_.sampling.reset();
+ inst_.sampling.reset();
}
return eevee_dd;
}
@@ -253,7 +253,10 @@ static void gpencil_stroke_sync(bGPDlayer *UNUSED(gpl),
void SyncModule::sync_gpencil(Object *ob, ObjectHandle &ob_handle)
{
/* TODO(fclem): Waiting for a user option to use the render engine instead of gpencil engine. */
- return;
+ if (true) {
+ inst_.gpencil_engine_enabled = true;
+ return;
+ }
gpIterData iter(inst_, ob, ob_handle);
@@ -280,7 +283,12 @@ static void shgroup_curves_call(MaterialPass &matpass,
if (matpass.shgrp == nullptr) {
return;
}
- DRW_shgroup_hair_create_sub(ob, part_sys, modifier_data, matpass.shgrp, matpass.gpumat);
+ if (part_sys != nullptr) {
+ DRW_shgroup_hair_create_sub(ob, part_sys, modifier_data, matpass.shgrp, matpass.gpumat);
+ }
+ else {
+ DRW_shgroup_curves_create_sub(ob, matpass.shgrp, matpass.gpumat);
+ }
}
void SyncModule::sync_curves(Object *ob, ObjectHandle &ob_handle, ModifierData *modifier_data)
diff --git a/source/blender/draw/engines/eevee_next/eevee_velocity.cc b/source/blender/draw/engines/eevee_next/eevee_velocity.cc
index ceae9df44d0..048daf1b2db 100644
--- a/source/blender/draw/engines/eevee_next/eevee_velocity.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_velocity.cc
@@ -9,10 +9,6 @@
* temporal re-projection or motion blur.
*
* It is the module that tracks the objects between frames updates.
- *
- * #VelocityModule contains all motion steps data and logic.
- * #VelocityPass contains the resolve pass for static geometry.
- * #VelocityView is a per view instance that contain the velocity buffer.
*/
#include "BKE_duplilist.h"
@@ -36,8 +32,7 @@ namespace blender::eevee {
void VelocityModule::init()
{
-#if 0 /* TODO renderpasses */
- if (inst_.render && (inst_.render_passes.vector != nullptr)) {
+ if (inst_.render && (inst_.film.enabled_passes_get() & EEVEE_RENDER_PASS_VECTOR)) {
/* No motion blur and the vector pass was requested. Do the step sync here. */
const Scene *scene = inst_.scene;
float initial_time = scene->r.cfra + scene->r.subframe;
@@ -45,7 +40,6 @@ void VelocityModule::init()
step_sync(STEP_NEXT, initial_time + 1.0f);
inst_.set_time(initial_time);
}
-#endif
}
static void step_object_sync_render(void *velocity,
@@ -70,6 +64,11 @@ void VelocityModule::step_camera_sync()
{
inst_.camera.sync();
*camera_steps[step_] = inst_.camera.data_get();
+ /* Fix undefined camera steps when rendering is starting. */
+ if ((step_ == STEP_CURRENT) && (camera_steps[STEP_PREVIOUS]->initialized == false)) {
+ *camera_steps[STEP_PREVIOUS] = *static_cast<CameraData *>(camera_steps[step_]);
+ camera_steps[STEP_PREVIOUS]->initialized = true;
+ }
}
bool VelocityModule::step_object_sync(Object *ob,
@@ -162,7 +161,7 @@ bool VelocityModule::step_object_sync(Object *ob,
}
/* TODO(@fclem): Reset sampling here? Should ultimately be covered by depsgraph update tags. */
- // inst_.sampling.reset();
+ inst_.sampling.reset();
return true;
}
@@ -264,7 +263,11 @@ void VelocityModule::end_sync()
}
if (deleted_obj.size() > 0) {
- // inst_.sampling.reset();
+ inst_.sampling.reset();
+ }
+
+ if (inst_.is_viewport() && camera_has_motion()) {
+ inst_.sampling.reset();
}
for (auto key : deleted_obj) {
@@ -300,19 +303,6 @@ void VelocityModule::end_sync()
camera_steps[STEP_CURRENT]->push_update();
camera_steps[STEP_NEXT]->push_update();
indirection_buf.push_update();
-
- {
- resolve_ps_ = DRW_pass_create("Velocity.Resolve", (DRWState)0);
- GPUShader *sh = inst_.shaders.static_shader_get(VELOCITY_RESOLVE);
- DRWShadingGroup *grp = DRW_shgroup_create(sh, resolve_ps_);
- DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &input_depth_tx_);
- DRW_shgroup_uniform_image_ref(grp, "velocity_view_img", &velocity_view_tx_);
- DRW_shgroup_uniform_image_ref(grp, "velocity_camera_img", &velocity_camera_tx_);
- DRW_shgroup_uniform_block(grp, "camera_prev", *camera_steps[STEP_PREVIOUS]);
- DRW_shgroup_uniform_block(grp, "camera_curr", *camera_steps[STEP_CURRENT]);
- DRW_shgroup_uniform_block(grp, "camera_next", *camera_steps[STEP_NEXT]);
- DRW_shgroup_call_compute_ref(grp, resolve_dispatch_size_);
- }
}
bool VelocityModule::object_has_velocity(const Object *ob)
@@ -359,60 +349,15 @@ void VelocityModule::bind_resources(DRWShadingGroup *grp)
DRW_shgroup_storage_block_ref(grp, "velocity_indirection_buf", &indirection_buf);
}
-/* Resolve pass for static geometry and to camera space projection. */
-void VelocityModule::resolve_camera_motion(GPUTexture *depth_tx,
- GPUTexture *velocity_view_tx,
- GPUTexture *velocity_camera_tx)
-{
- input_depth_tx_ = depth_tx;
- velocity_view_tx_ = velocity_view_tx;
- velocity_camera_tx_ = velocity_camera_tx;
-
- resolve_dispatch_size_.x = divide_ceil_u(GPU_texture_width(depth_tx), 8);
- resolve_dispatch_size_.y = divide_ceil_u(GPU_texture_height(depth_tx), 8);
-
- DRW_draw_pass(resolve_ps_);
-}
-
-/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name Velocity View
- * \{ */
-
-void VelocityView::sync()
-{
- /* TODO: Remove. */
- velocity_view_tx_.sync();
- velocity_camera_tx_.sync();
-}
-
-void VelocityView::acquire(int2 extent)
+bool VelocityModule::camera_has_motion() const
{
- /* WORKAROUND: View name should be unique and static.
- * With this, we can reuse the same texture across views. */
- DrawEngineType *owner = (DrawEngineType *)view_name_.c_str();
-
- /* Only RG16F when only doing only reprojection or motion blur. */
- eGPUTextureFormat format = inst_.is_viewport() ? GPU_RG16F : GPU_RGBA16F;
- velocity_view_tx_.acquire(extent, format, owner);
- if (false /* TODO(fclem): Panoramic camera. */) {
- velocity_camera_tx_.acquire(extent, format, owner);
- }
- else {
- velocity_camera_tx_.acquire(int2(1), format, owner);
+ /* Only valid after sync. */
+ if (inst_.is_viewport()) {
+ /* Viewport has no next step. */
+ return *camera_steps[STEP_PREVIOUS] != *camera_steps[STEP_CURRENT];
}
-}
-
-void VelocityView::resolve(GPUTexture *depth_tx)
-{
- inst_.velocity.resolve_camera_motion(depth_tx, velocity_view_tx_, velocity_camera_tx_);
-}
-
-void VelocityView::release()
-{
- velocity_view_tx_.release();
- velocity_camera_tx_.release();
+ return *camera_steps[STEP_PREVIOUS] != *camera_steps[STEP_CURRENT] &&
+ *camera_steps[STEP_NEXT] != *camera_steps[STEP_CURRENT];
}
/** \} */
diff --git a/source/blender/draw/engines/eevee_next/eevee_velocity.hh b/source/blender/draw/engines/eevee_next/eevee_velocity.hh
index e2606c061e1..826cd631a96 100644
--- a/source/blender/draw/engines/eevee_next/eevee_velocity.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_velocity.hh
@@ -27,8 +27,6 @@ namespace blender::eevee {
/** Container for scene velocity data. */
class VelocityModule {
- friend class VelocityView;
-
public:
struct VelocityObjectData : public VelocityIndex {
/** ID to retrieve the corresponding #VelocityGeometryData after copy. */
@@ -69,15 +67,6 @@ class VelocityModule {
eVelocityStep step_ = STEP_CURRENT;
- DRWPass *resolve_ps_ = nullptr;
-
- /** Reference only. Not owned. */
- GPUTexture *input_depth_tx_;
- GPUTexture *velocity_view_tx_;
- GPUTexture *velocity_camera_tx_;
-
- int3 resolve_dispatch_size_ = int3(1, 1, 1);
-
public:
VelocityModule(Instance &inst) : inst_(inst)
{
@@ -89,6 +78,7 @@ class VelocityModule {
}
for (CameraDataBuf *&step_buf : camera_steps) {
step_buf = new CameraDataBuf();
+ /* */
}
};
@@ -121,56 +111,11 @@ class VelocityModule {
void bind_resources(DRWShadingGroup *grp);
+ bool camera_has_motion() const;
+
private:
bool object_has_velocity(const Object *ob);
bool object_is_deform(const Object *ob);
-
- void resolve_camera_motion(GPUTexture *depth_tx,
- GPUTexture *velocity_view_tx,
- GPUTexture *velocity_camera_tx);
-};
-
-/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name Velocity
- *
- * \{ */
-
-/**
- * Per view module.
- */
-class VelocityView {
- private:
- Instance &inst_;
-
- StringRefNull view_name_;
-
- TextureFromPool velocity_camera_tx_ = {"velocity_camera_tx_"};
- TextureFromPool velocity_view_tx_ = {"velocity_view_tx_"};
-
- public:
- VelocityView(Instance &inst, const char *name) : inst_(inst), view_name_(name){};
- ~VelocityView(){};
-
- void sync();
-
- void acquire(int2 extent);
- void release();
-
- void resolve(GPUTexture *depth_tx);
-
- /**
- * Getters
- **/
- GPUTexture *view_vectors_get() const
- {
- return velocity_view_tx_;
- }
- GPUTexture *camera_vectors_get() const
- {
- return (velocity_camera_tx_.is_valid()) ? velocity_camera_tx_ : velocity_view_tx_;
- }
};
/** \} */
diff --git a/source/blender/draw/engines/eevee_next/eevee_view.cc b/source/blender/draw/engines/eevee_next/eevee_view.cc
index e21342c5ef6..8052ea76def 100644
--- a/source/blender/draw/engines/eevee_next/eevee_view.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_view.cc
@@ -34,17 +34,19 @@ void ShadingView::init()
// mb_.init();
}
-void ShadingView::sync(int2 render_extent_)
+void ShadingView::sync()
{
+ int2 render_extent = inst_.film.render_extent_get();
+
if (false /* inst_.camera.is_panoramic() */) {
- int64_t render_pixel_count = render_extent_.x * (int64_t)render_extent_.y;
+ int64_t render_pixel_count = render_extent.x * (int64_t)render_extent.y;
/* Divide pixel count between the 6 views. Rendering to a square target. */
extent_[0] = extent_[1] = ceilf(sqrtf(1 + (render_pixel_count / 6)));
/* TODO(@fclem): Clip unused views here. */
is_enabled_ = true;
}
else {
- extent_ = render_extent_;
+ extent_ = render_extent;
/* Only enable -Z view. */
is_enabled_ = (StringRefNull(name_) == "negZ_view");
}
@@ -54,31 +56,23 @@ void ShadingView::sync(int2 render_extent_)
}
/* Create views. */
- // const CameraData &data = inst_.camera.data_get();
+ const CameraData &cam = inst_.camera.data_get();
float4x4 viewmat, winmat;
const float(*viewmat_p)[4] = viewmat.ptr(), (*winmat_p)[4] = winmat.ptr();
-#if 0
if (false /* inst_.camera.is_panoramic() */) {
/* TODO(@fclem) Over-scans. */
/* For now a mandatory 5% over-scan for DoF. */
- float side = data.clip_near * 1.05f;
- float near = data.clip_near;
- float far = data.clip_far;
+ float side = cam.clip_near * 1.05f;
+ float near = cam.clip_near;
+ float far = cam.clip_far;
perspective_m4(winmat.ptr(), -side, side, -side, side, near, far);
- viewmat = face_matrix_ * data.viewmat;
+ viewmat = face_matrix_ * cam.viewmat;
}
else {
- viewmat_p = data.viewmat.ptr();
- winmat_p = data.winmat.ptr();
+ viewmat_p = cam.viewmat.ptr();
+ winmat_p = cam.winmat.ptr();
}
-#else
- /* TEMP */
- UNUSED_VARS(face_matrix_);
- const DRWView *default_view = DRW_view_default_get();
- DRW_view_winmat_get(default_view, winmat.ptr(), false);
- DRW_view_viewmat_get(default_view, viewmat.ptr(), false);
-#endif
main_view_ = DRW_view_create(viewmat_p, winmat_p, nullptr, nullptr, nullptr);
sub_view_ = DRW_view_create_sub(main_view_, viewmat_p, winmat_p);
@@ -86,14 +80,12 @@ void ShadingView::sync(int2 render_extent_)
// dof_.sync(winmat_p, extent_);
// mb_.sync(extent_);
- velocity_.sync();
// rt_buffer_opaque_.sync(extent_);
// rt_buffer_refract_.sync(extent_);
// inst_.hiz_back.view_sync(extent_);
// inst_.hiz_front.view_sync(extent_);
// inst_.gbuffer.view_sync(extent_);
- combined_tx_.sync();
postfx_tx_.sync();
}
@@ -108,24 +100,22 @@ void ShadingView::render()
* With this, we can reuse the same texture across views. */
DrawEngineType *owner = (DrawEngineType *)name_;
- DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
-
- depth_tx_.ensure_2d(GPU_DEPTH24_STENCIL8, extent_);
- combined_tx_.acquire(extent_, GPU_RGBA16F, owner);
- velocity_.acquire(extent_);
- // combined_fb_.ensure(GPU_ATTACHMENT_TEXTURE(depth_tx_), GPU_ATTACHMENT_TEXTURE(combined_tx_));
- // prepass_fb_.ensure(GPU_ATTACHMENT_TEXTURE(depth_tx_),
- // GPU_ATTACHMENT_TEXTURE(velocity_.view_vectors_get()));
- combined_fb_.ensure(GPU_ATTACHMENT_TEXTURE(dtxl->depth), GPU_ATTACHMENT_TEXTURE(dtxl->color));
- prepass_fb_.ensure(GPU_ATTACHMENT_TEXTURE(dtxl->depth),
- GPU_ATTACHMENT_TEXTURE(velocity_.view_vectors_get()));
+ RenderBuffers &rbufs = inst_.render_buffers;
+ rbufs.acquire(extent_, owner);
+ combined_fb_.ensure(GPU_ATTACHMENT_TEXTURE(rbufs.depth_tx),
+ GPU_ATTACHMENT_TEXTURE(rbufs.combined_tx));
+ prepass_fb_.ensure(GPU_ATTACHMENT_TEXTURE(rbufs.depth_tx),
+ GPU_ATTACHMENT_TEXTURE(rbufs.vector_tx));
update_view();
DRW_stats_group_start(name_);
- // DRW_view_set_active(render_view_);
+ DRW_view_set_active(render_view_);
+
+ /* If camera has any motion, compute motion vector in the film pass. Otherwise, we avoid float
+ * precision issue by setting the motion of all static geometry to 0. */
+ float4 clear_velocity = float4(inst_.velocity.camera_has_motion() ? VELOCITY_INVALID : 0.0f);
- float4 clear_velocity(VELOCITY_INVALID);
GPU_framebuffer_bind(prepass_fb_);
GPU_framebuffer_clear_color(prepass_fb_, clear_velocity);
/* Alpha stores transmittance. So start at 1. */
@@ -142,33 +132,20 @@ void ShadingView::render()
// inst_.lookdev.render_overlay(view_fb_);
- inst_.pipelines.forward.render(render_view_, prepass_fb_, combined_fb_, depth_tx_, combined_tx_);
+ inst_.pipelines.forward.render(
+ render_view_, prepass_fb_, combined_fb_, rbufs.depth_tx, rbufs.combined_tx);
// inst_.lights.debug_draw(view_fb_);
// inst_.shadows.debug_draw(view_fb_);
- // velocity_.resolve(depth_tx_);
- velocity_.resolve(dtxl->depth);
-
- // if (inst_.render_passes.vector) {
- // inst_.render_passes.vector->accumulate(velocity_.camera_vectors_get(), sub_view_);
- // }
-
// GPUTexture *final_radiance_tx = render_post(combined_tx_);
- // if (inst_.render_passes.combined) {
- // inst_.render_passes.combined->accumulate(final_radiance_tx, sub_view_);
- // }
+ inst_.film.accumulate(sub_view_);
- // if (inst_.render_passes.depth) {
- // inst_.render_passes.depth->accumulate(depth_tx_, sub_view_);
- // }
+ rbufs.release();
+ postfx_tx_.release();
DRW_stats_group_end();
-
- combined_tx_.release();
- postfx_tx_.release();
- velocity_.release();
}
GPUTexture *ShadingView::render_post(GPUTexture *input_tx)
@@ -197,11 +174,17 @@ void ShadingView::update_view()
DRW_view_viewmat_get(main_view_, viewmat.ptr(), false);
DRW_view_winmat_get(main_view_, winmat.ptr(), false);
+ /* TODO(fclem): Mixed-resolution rendering: We need to make sure we render with exactly the same
+ * distances between pixels to line up render samples and target pixels.
+ * So if the target resolution is not a multiple of the resolution divisor, we need to make the
+ * projection window bigger in the +X and +Y directions. */
+
/* Anti-Aliasing / Super-Sampling jitter. */
- // float jitter_u = 2.0f * (inst_.sampling.rng_get(SAMPLING_FILTER_U) - 0.5f) / extent_[0];
- // float jitter_v = 2.0f * (inst_.sampling.rng_get(SAMPLING_FILTER_V) - 0.5f) / extent_[1];
+ float2 jitter = inst_.film.pixel_jitter_get() / float2(extent_);
+ /* Transform to NDC space. */
+ jitter *= 2.0f;
- // window_translate_m4(winmat.ptr(), winmat.ptr(), jitter_u, jitter_v);
+ window_translate_m4(winmat.ptr(), winmat.ptr(), UNPACK2(jitter));
DRW_view_update_sub(sub_view_, viewmat.ptr(), winmat.ptr());
/* FIXME(fclem): The offset may be is noticeably large and the culling might make object pop
diff --git a/source/blender/draw/engines/eevee_next/eevee_view.hh b/source/blender/draw/engines/eevee_next/eevee_view.hh
index fb74412f557..c6faebdd0e5 100644
--- a/source/blender/draw/engines/eevee_next/eevee_view.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_view.hh
@@ -44,7 +44,6 @@ class ShadingView {
/** Post-FX modules. */
// DepthOfField dof_;
// MotionBlur mb_;
- VelocityView velocity_;
/** Raytracing persistent buffers. Only opaque and refraction can have surface tracing. */
// RaytraceBuffer rt_buffer_opaque_;
@@ -52,8 +51,6 @@ class ShadingView {
Framebuffer prepass_fb_;
Framebuffer combined_fb_;
- Texture depth_tx_;
- TextureFromPool combined_tx_;
TextureFromPool postfx_tx_;
/** Main views is created from the camera (or is from the viewport). It is not jittered. */
@@ -71,13 +68,13 @@ class ShadingView {
public:
ShadingView(Instance &inst, const char *name, const float (*face_matrix)[4])
- : inst_(inst), name_(name), face_matrix_(face_matrix), velocity_(inst, name){};
+ : inst_(inst), name_(name), face_matrix_(face_matrix){};
~ShadingView(){};
void init();
- void sync(int2 render_extent_);
+ void sync();
void render();
@@ -94,7 +91,7 @@ class ShadingView {
*
* Container for all views needed to render the final image.
* We might need up to 6 views for panoramic cameras.
- * All views are always available but only enabled for if need.
+ * All views are always available but only enabled for if needed.
* \{ */
class MainView {
@@ -109,8 +106,6 @@ class MainView {
ShadingView shading_views_4;
ShadingView shading_views_5;
#define shading_views_ (&shading_views_0)
- /** Internal render size. */
- int render_extent_[2];
public:
MainView(Instance &inst)
@@ -123,15 +118,8 @@ class MainView {
{
}
- void init(const int2 full_extent_)
+ void init()
{
- /* TODO(fclem) parameter hidden in experimental. We need to figure out mipmap bias to preserve
- * texture crispiness. */
- float resolution_scale = 1.0f;
- for (int i = 0; i < 2; i++) {
- render_extent_[i] = max_ii(1, roundf(full_extent_[i] * resolution_scale));
- }
-
for (auto i : IndexRange(6)) {
shading_views_[i].init();
}
@@ -140,7 +128,7 @@ class MainView {
void sync()
{
for (auto i : IndexRange(6)) {
- shading_views_[i].sync(render_extent_);
+ shading_views_[i].sync();
}
}
diff --git a/source/blender/draw/engines/eevee_next/eevee_world.cc b/source/blender/draw/engines/eevee_next/eevee_world.cc
index b9cb24fe30a..56cb0f127db 100644
--- a/source/blender/draw/engines/eevee_next/eevee_world.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_world.cc
@@ -79,7 +79,7 @@ void World::sync()
/* TODO(fclem) This should be detected to scene level. */
::World *orig_world = (::World *)DEG_get_original_id(&bl_world->id);
if (assign_if_different(prev_original_world, orig_world)) {
- // inst_.sampling.reset();
+ inst_.sampling.reset();
}
bNodeTree *ntree = (bl_world->nodetree && bl_world->use_nodes) ?
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_attributes_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_attributes_lib.glsl
index 326481a1db6..974581e674e 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_attributes_lib.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_attributes_lib.glsl
@@ -3,6 +3,8 @@
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_codegen_lib.glsl)
+#defined EEVEE_ATTRIBUTE_LIB
+
#if defined(MAT_GEOM_MESH)
/* -------------------------------------------------------------------- */
@@ -282,43 +284,3 @@ vec3 attr_load_uv(vec3 attr)
/** \} */
#endif
-
-/* -------------------------------------------------------------------- */
-/** \name Volume Attribute post
- *
- * TODO(@fclem): These implementation details should concern the DRWManager and not be a fix on
- * the engine side. But as of now, the engines are responsible for loading the attributes.
- *
- * \{ */
-
-#if defined(MAT_GEOM_VOLUME)
-
-float attr_load_temperature_post(float attr)
-{
- /* Bring the into standard range without having to modify the grid values */
- attr = (attr > 0.01) ? (attr * drw_volume.temperature_mul + drw_volume.temperature_bias) : 0.0;
- return attr;
-}
-vec4 attr_load_color_post(vec4 attr)
-{
- /* Density is premultiplied for interpolation, divide it out here. */
- attr.rgb *= safe_rcp(attr.a);
- attr.rgb *= drw_volume.color_mul.rgb;
- attr.a = 1.0;
- return attr;
-}
-
-#else /* Noop for any other surface. */
-
-float attr_load_temperature_post(float attr)
-{
- return attr;
-}
-vec4 attr_load_color_post(vec4 attr)
-{
- return attr;
-}
-
-#endif
-
-/** \} */
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl
index f79e9102d76..2611f714b59 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl
@@ -143,24 +143,10 @@ vec2 camera_uv_from_view(CameraData cam, vec3 vV)
}
}
-vec2 camera_uv_from_world(CameraData cam, vec3 V)
+vec2 camera_uv_from_world(CameraData cam, vec3 P)
{
- vec3 vV = transform_point(cam.viewmat, V);
- switch (cam.type) {
- default:
- case CAMERA_ORTHO:
- return camera_uv_from_view(cam.persmat, false, V);
- case CAMERA_PERSP:
- return camera_uv_from_view(cam.persmat, true, V);
- case CAMERA_PANO_EQUIRECT:
- return camera_equirectangular_from_direction(cam, vV);
- case CAMERA_PANO_EQUISOLID:
- /* ATTR_FALLTHROUGH; */
- case CAMERA_PANO_EQUIDISTANT:
- return camera_fisheye_from_direction(cam, vV);
- case CAMERA_PANO_MIRROR:
- return camera_mirror_ball_from_direction(cam, vV);
- }
+ vec3 vV = transform_direction(cam.viewmat, normalize(P));
+ return camera_uv_from_view(cam, vV);
}
/** \} */
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_film_comp.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_film_comp.glsl
new file mode 100644
index 00000000000..ce1f19edf53
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_film_comp.glsl
@@ -0,0 +1,13 @@
+
+#pragma BLENDER_REQUIRE(common_view_lib.glsl)
+#pragma BLENDER_REQUIRE(eevee_film_lib.glsl)
+
+void main()
+{
+ ivec2 texel_film = ivec2(gl_GlobalInvocationID.xy);
+ /* Not used. */
+ vec4 out_color;
+ float out_depth;
+
+ film_process_data(texel_film, out_color, out_depth);
+}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl
new file mode 100644
index 00000000000..26040234fd0
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl
@@ -0,0 +1,31 @@
+
+#pragma BLENDER_REQUIRE(common_view_lib.glsl)
+#pragma BLENDER_REQUIRE(eevee_film_lib.glsl)
+
+void main()
+{
+ ivec2 texel_film = ivec2(gl_FragCoord.xy) - film_buf.offset;
+ float out_depth;
+
+ if (film_buf.display_only) {
+ out_depth = imageLoad(depth_img, texel_film).r;
+
+ if (film_buf.display_id == -1) {
+ out_color = texelFetch(in_combined_tx, texel_film, 0);
+ }
+ else if (film_buf.display_is_value) {
+ out_color.rgb = imageLoad(value_accum_img, ivec3(texel_film, film_buf.display_id)).rrr;
+ out_color.a = 1.0;
+ }
+ else {
+ out_color = imageLoad(color_accum_img, ivec3(texel_film, film_buf.display_id));
+ }
+ }
+ else {
+ film_process_data(texel_film, out_color, out_depth);
+ }
+
+ gl_FragDepth = get_depth_from_view_z(-out_depth);
+
+ gl_FragDepth = film_display_depth_ammend(texel_film, gl_FragDepth);
+}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl
new file mode 100644
index 00000000000..b286836e8df
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl
@@ -0,0 +1,713 @@
+
+/**
+ * Film accumulation utils functions.
+ **/
+
+#pragma BLENDER_REQUIRE(common_view_lib.glsl)
+#pragma BLENDER_REQUIRE(common_math_geom_lib.glsl)
+#pragma BLENDER_REQUIRE(eevee_camera_lib.glsl)
+#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
+
+/* Return scene linear Z depth from the camera or radial depth for panoramic cameras. */
+float film_depth_convert_to_scene(float depth)
+{
+ if (false /* Panoramic */) {
+ /* TODO */
+ return 1.0;
+ }
+ return abs(get_view_z_from_depth(depth));
+}
+
+vec3 film_YCoCg_from_scene_linear(vec3 rgb_color)
+{
+ const mat3 colorspace_tx = transpose(mat3(vec3(1, 2, 1), /* Y */
+ vec3(2, 0, -2), /* Co */
+ vec3(-1, 2, -1))); /* Cg */
+ return colorspace_tx * rgb_color;
+}
+
+vec4 film_YCoCg_from_scene_linear(vec4 rgba_color)
+{
+ return vec4(film_YCoCg_from_scene_linear(rgba_color.rgb), rgba_color.a);
+}
+
+vec3 film_scene_linear_from_YCoCg(vec3 ycocg_color)
+{
+ float Y = ycocg_color.x;
+ float Co = ycocg_color.y;
+ float Cg = ycocg_color.z;
+
+ vec3 rgb_color;
+ rgb_color.r = Y + Co - Cg;
+ rgb_color.g = Y + Cg;
+ rgb_color.b = Y - Co - Cg;
+ return rgb_color * 0.25;
+}
+
+/* Load a texture sample in a specific format. Combined pass needs to use this. */
+vec4 film_texelfetch_as_YCoCg_opacity(sampler2D tx, ivec2 texel)
+{
+ vec4 color = texelFetch(combined_tx, texel, 0);
+ /* Convert transmittance to opacity. */
+ color.a = saturate(1.0 - color.a);
+ /* Transform to YCoCg for accumulation. */
+ color.rgb = film_YCoCg_from_scene_linear(color.rgb);
+ return color;
+}
+
+/* Returns a weight based on Luma to reduce the flickering introduced by high energy pixels. */
+float film_luma_weight(float luma)
+{
+ /* Slide 20 of "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014. */
+ /* To preserve more details in dark areas, we use a bigger bias. */
+ return 1.0 / (4.0 + luma * film_buf.exposure_scale);
+}
+
+/* -------------------------------------------------------------------- */
+/** \name Filter
+ * \{ */
+
+FilmSample film_sample_get(int sample_n, ivec2 texel_film)
+{
+#ifdef PANORAMIC
+ /* TODO(fclem): Panoramic projection will be more complex. The samples will have to be retrieve
+ * at runtime, maybe by scanning a whole region. Offset and weight will have to be computed by
+ * reprojecting the incoming pixel data into film pixel space. */
+#else
+
+# ifdef SCALED_RENDERING
+ texel_film /= film_buf.scaling_factor;
+# endif
+
+ FilmSample film_sample = film_buf.samples[sample_n];
+ film_sample.texel += texel_film + film_buf.offset;
+ /* Use extend on borders. */
+ film_sample.texel = clamp(film_sample.texel, ivec2(0, 0), film_buf.render_extent - 1);
+
+ /* TODO(fclem): Panoramic projection will need to compute the sample weight in the shader
+ * instead of precomputing it on CPU. */
+# ifdef SCALED_RENDERING
+ /* We need to compute the real distance and weight since a sample
+ * can be used by many final pixel. */
+ vec2 offset = film_buf.subpixel_offset - vec2(texel_film % film_buf.scaling_factor);
+ film_sample.weight = film_filter_weight(film_buf.filter_size, len_squared(offset));
+# endif
+
+#endif /* PANORAMIC */
+
+ /* Always return a weight above 0 to avoid blind spots between samples. */
+ film_sample.weight = max(film_sample.weight, 1e-6);
+
+ return film_sample;
+}
+
+/* Returns the combined weights of all samples affecting this film pixel. */
+float film_weight_accumulation(ivec2 texel_film)
+{
+#if 0 /* TODO(fclem): Reference implementation, also needed for panoramic cameras. */
+ float weight = 0.0;
+ for (int i = 0; i < film_buf.samples_len; i++) {
+ weight += film_sample_get(i, texel_film).weight;
+ }
+ return weight;
+#endif
+ return film_buf.samples_weight_total;
+}
+
+void film_sample_accum(FilmSample samp, int pass_id, sampler2D tex, inout vec4 accum)
+{
+ if (pass_id == -1) {
+ return;
+ }
+ accum += texelFetch(tex, samp.texel, 0) * samp.weight;
+}
+
+void film_sample_accum(FilmSample samp, int pass_id, sampler2D tex, inout float accum)
+{
+ if (pass_id == -1) {
+ return;
+ }
+ accum += texelFetch(tex, samp.texel, 0).x * samp.weight;
+}
+
+void film_sample_accum(FilmSample samp, int pass_id, sampler2DArray tex, inout vec4 accum)
+{
+ if (pass_id == -1) {
+ return;
+ }
+ accum += texelFetch(tex, ivec3(samp.texel, pass_id), 0) * samp.weight;
+}
+
+void film_sample_accum(FilmSample samp, int pass_id, sampler2DArray tex, inout float accum)
+{
+ if (pass_id == -1) {
+ return;
+ }
+ accum += texelFetch(tex, ivec3(samp.texel, pass_id), 0).x * samp.weight;
+}
+
+void film_sample_accum_mist(FilmSample samp, inout float accum)
+{
+ if (film_buf.mist_id == -1) {
+ return;
+ }
+ float depth = texelFetch(depth_tx, samp.texel, 0).x;
+ vec2 uv = (vec2(samp.texel) + 0.5) / textureSize(depth_tx, 0).xy;
+ vec3 vP = get_view_space_from_depth(uv, depth);
+ bool is_persp = ProjectionMatrix[3][3] == 0.0;
+ float mist = (is_persp) ? length(vP) : abs(vP.z);
+ /* Remap to 0..1 range. */
+ mist = saturate(mist * film_buf.mist_scale + film_buf.mist_bias);
+ /* Falloff. */
+ mist = pow(mist, film_buf.mist_exponent);
+ accum += mist * samp.weight;
+}
+
+void film_sample_accum_combined(FilmSample samp, inout vec4 accum, inout float weight_accum)
+{
+ if (film_buf.combined_id == -1) {
+ return;
+ }
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, samp.texel);
+
+ /* Weight by luma to remove fireflies. */
+ float weight = film_luma_weight(color.x) * samp.weight;
+
+ accum += color * weight;
+ weight_accum += weight;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Load/Store Data
+ * \{ */
+
+#define WEIGHT_lAYER_ACCUMULATION 0
+#define WEIGHT_lAYER_DISTANCE 1
+
+/* Returns the distance used to store nearest interpolation data. */
+float film_distance_load(ivec2 texel)
+{
+ /* Repeat texture coordinates as the weight can be optimized to a small portion of the film. */
+ texel = texel % imageSize(in_weight_img).xy;
+
+ if (!film_buf.use_history || film_buf.use_reprojection) {
+ return 1.0e16;
+ }
+ return imageLoad(in_weight_img, ivec3(texel, WEIGHT_lAYER_DISTANCE)).x;
+}
+
+float film_weight_load(ivec2 texel)
+{
+ /* Repeat texture coordinates as the weight can be optimized to a small portion of the film. */
+ texel = texel % imageSize(in_weight_img).xy;
+
+ if (!film_buf.use_history || film_buf.use_reprojection) {
+ return 0.0;
+ }
+ return imageLoad(in_weight_img, ivec3(texel, WEIGHT_lAYER_ACCUMULATION)).x;
+}
+
+/* Returns motion in pixel space to retrieve the pixel history. */
+vec2 film_pixel_history_motion_vector(ivec2 texel_sample)
+{
+ /**
+ * Dilate velocity by using the nearest pixel in a cross pattern.
+ * "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014 (Slide 27)
+ */
+ const ivec2 corners[4] = ivec2[4](ivec2(-2, -2), ivec2(2, -2), ivec2(-2, 2), ivec2(2, 2));
+ float min_depth = texelFetch(depth_tx, texel_sample, 0).x;
+ ivec2 nearest_texel = texel_sample;
+ for (int i = 0; i < 4; i++) {
+ ivec2 texel = clamp(texel_sample + corners[i], ivec2(0), textureSize(depth_tx, 0).xy);
+ float depth = texelFetch(depth_tx, texel, 0).x;
+ if (min_depth > depth) {
+ min_depth = depth;
+ nearest_texel = texel;
+ }
+ }
+
+ vec4 vector = velocity_resolve(vector_tx, nearest_texel, min_depth);
+
+ /* Transform to pixel space. */
+ vector.xy *= vec2(film_buf.extent);
+
+ return vector.xy;
+}
+
+/* \a t is inter-pixel position. 0 means perfectly on a pixel center.
+ * Returns weights in both dimensions.
+ * Multiply each dimension weights to get final pixel weights. */
+void film_get_catmull_rom_weights(vec2 t, out vec2 weights[4])
+{
+ vec2 t2 = t * t;
+ vec2 t3 = t2 * t;
+ float fc = 0.5; /* Catmull-Rom. */
+
+ vec2 fct = t * fc;
+ vec2 fct2 = t2 * fc;
+ vec2 fct3 = t3 * fc;
+ weights[0] = (fct2 * 2.0 - fct3) - fct;
+ weights[1] = (t3 * 2.0 - fct3) + (-t2 * 3.0 + fct2) + 1.0;
+ weights[2] = (-t3 * 2.0 + fct3) + (t2 * 3.0 - (2.0 * fct2)) + fct;
+ weights[3] = fct3 - fct2;
+}
+
+/* Load color using a special filter to avoid loosing detail.
+ * \a texel is sample position with subpixel accuracy. */
+vec4 film_sample_catmull_rom(sampler2D color_tx, vec2 input_texel)
+{
+ vec2 center_texel;
+ vec2 inter_texel = modf(input_texel, center_texel);
+ vec2 weights[4];
+ film_get_catmull_rom_weights(inter_texel, weights);
+
+#if 0 /* Reference. 16 Taps. */
+ vec4 color = vec4(0.0);
+ for (int y = 0; y < 4; y++) {
+ for (int x = 0; x < 4; x++) {
+ ivec2 texel = ivec2(center_texel) + ivec2(x, y) - 1;
+ texel = clamp(texel, ivec2(0), textureSize(color_tx, 0).xy - 1);
+ color += texelFetch(color_tx, texel, 0) * weights[x].x * weights[y].y;
+ }
+ }
+ return color;
+
+#elif 1 /* Optimize version. 5 Bilinear Taps. */
+ /**
+ * Use optimized version by leveraging bilinear filtering from hardware sampler and by removing
+ * corner taps.
+ * From "Filmic SMAA" by Jorge Jimenez at Siggraph 2016
+ * http://advances.realtimerendering.com/s2016/Filmic%20SMAA%20v7.pptx
+ */
+ center_texel += 0.5;
+
+ /* Slide 92. */
+ vec2 weight_12 = weights[1] + weights[2];
+ vec2 uv_12 = (center_texel + weights[2] / weight_12) * film_buf.extent_inv;
+ vec2 uv_0 = (center_texel - 1.0) * film_buf.extent_inv;
+ vec2 uv_3 = (center_texel + 2.0) * film_buf.extent_inv;
+
+ vec4 color;
+ vec4 weight_cross = weight_12.xyyx * vec4(weights[0].yx, weights[3].xy);
+ float weight_center = weight_12.x * weight_12.y;
+
+ color = textureLod(color_tx, uv_12, 0.0) * weight_center;
+ color += textureLod(color_tx, vec2(uv_12.x, uv_0.y), 0.0) * weight_cross.x;
+ color += textureLod(color_tx, vec2(uv_0.x, uv_12.y), 0.0) * weight_cross.y;
+ color += textureLod(color_tx, vec2(uv_3.x, uv_12.y), 0.0) * weight_cross.z;
+ color += textureLod(color_tx, vec2(uv_12.x, uv_3.y), 0.0) * weight_cross.w;
+ /* Re-normalize for the removed corners. */
+ return color / (weight_center + sum(weight_cross));
+
+#else /* Nearest interpolation for debugging. 1 Tap. */
+ ivec2 texel = ivec2(center_texel) + ivec2(greaterThan(inter_texel, vec2(0.5)));
+ texel = clamp(texel, ivec2(0), textureSize(color_tx, 0).xy - 1);
+ return texelFetch(color_tx, texel, 0);
+#endif
+}
+
+/* Return history clipping bounding box in YCoCg color space. */
+void film_combined_neighbor_boundbox(ivec2 texel, out vec4 min_c, out vec4 max_c)
+{
+ /* Plus (+) shape offsets. */
+ const ivec2 plus_offsets[5] = ivec2[5](ivec2(0, 0), /* Center */
+ ivec2(-1, 0),
+ ivec2(0, -1),
+ ivec2(1, 0),
+ ivec2(0, 1));
+#if 0
+ /**
+ * Compute Variance of neighborhood as described in:
+ * "An Excursion in Temporal Supersampling" by Marco Salvi at GDC 2016.
+ * and:
+ * "A Survey of Temporal Antialiasing Techniques" by Yang et al.
+ */
+
+ /* First 2 moments. */
+ vec4 mu1 = vec4(0), mu2 = vec4(0);
+ for (int i = 0; i < 5; i++) {
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, texel + plus_offsets[i]);
+ mu1 += color;
+ mu2 += sqr(color);
+ }
+ mu1 *= (1.0 / 5.0);
+ mu2 *= (1.0 / 5.0);
+
+ /* Extent scaling. Range [0.75..1.25].
+ * Balance between more flickering (0.75) or more ghosting (1.25). */
+ const float gamma = 1.25;
+ /* Standard deviation. */
+ vec4 sigma = sqrt(abs(mu2 - sqr(mu1)));
+ /* eq. 6 in "A Survey of Temporal Antialiasing Techniques". */
+ min_c = mu1 - gamma * sigma;
+ max_c = mu1 + gamma * sigma;
+#else
+ /**
+ * Simple bounding box calculation in YCoCg as described in:
+ * "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014
+ */
+ min_c = vec4(1e16);
+ max_c = vec4(-1e16);
+ for (int i = 0; i < 5; i++) {
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, texel + plus_offsets[i]);
+ min_c = min(min_c, color);
+ max_c = max(max_c, color);
+ }
+ /* (Slide 32) Simple clamp to min/max of 8 neighbors results in 3x3 box artifacts.
+ * Round bbox shape by averaging 2 different min/max from 2 different neighborhood. */
+ vec4 min_c_3x3 = min_c;
+ vec4 max_c_3x3 = max_c;
+ const ivec2 corners[4] = ivec2[4](ivec2(-1, -1), ivec2(1, -1), ivec2(-1, 1), ivec2(1, 1));
+ for (int i = 0; i < 4; i++) {
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, texel + corners[i]);
+ min_c_3x3 = min(min_c_3x3, color);
+ max_c_3x3 = max(max_c_3x3, color);
+ }
+ min_c = (min_c + min_c_3x3) * 0.5;
+ max_c = (max_c + max_c_3x3) * 0.5;
+#endif
+}
+
+/* 1D equivalent of line_aabb_clipping_dist(). */
+float film_aabb_clipping_dist_alpha(float origin, float direction, float aabb_min, float aabb_max)
+{
+ if (abs(direction) < 1e-5) {
+ return 0.0;
+ }
+ float nearest_plane = (direction > 0.0) ? aabb_min : aabb_max;
+ return (nearest_plane - origin) / direction;
+}
+
+/* Modulate the history color to avoid ghosting artifact. */
+vec4 film_amend_combined_history(
+ vec4 min_color, vec4 max_color, vec4 color_history, vec4 src_color, ivec2 src_texel)
+{
+ /* Clip instead of clamping to avoid color accumulating in the AABB corners. */
+ vec4 clip_dir = src_color - color_history;
+
+ float t = line_aabb_clipping_dist(color_history.rgb, clip_dir.rgb, min_color.rgb, max_color.rgb);
+ color_history.rgb += clip_dir.rgb * saturate(t);
+
+ /* Clip alpha on its own to avoid interference with other chanels. */
+ float t_a = film_aabb_clipping_dist_alpha(color_history.a, clip_dir.a, min_color.a, max_color.a);
+ color_history.a += clip_dir.a * saturate(t_a);
+
+ return color_history;
+}
+
+float film_history_blend_factor(float velocity,
+ vec2 texel,
+ float luma_min,
+ float luma_max,
+ float luma_incoming,
+ float luma_history)
+{
+ /* 5% of incoming color by default. */
+ float blend = 0.05;
+ /* Blend less history if the pixel has substential velocity. */
+ blend = mix(blend, 0.20, saturate(velocity * 0.02));
+ /**
+ * "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014 (Slide 43)
+ * Bias towards history if incomming pixel is near clamping. Reduces flicker.
+ */
+ float distance_to_luma_clip = min_v2(vec2(luma_history - luma_min, luma_max - luma_history));
+ /* Divide by bbox size to get a factor. 2 factor to compensate the line above. */
+ distance_to_luma_clip *= 2.0 * safe_rcp(luma_max - luma_min);
+ /* Linearly blend when history gets bellow to 25% of the bbox size. */
+ blend *= saturate(distance_to_luma_clip * 4.0 + 0.1);
+ /* Discard out of view history. */
+ if (any(lessThan(texel, vec2(0))) || any(greaterThanEqual(texel, film_buf.extent))) {
+ blend = 1.0;
+ }
+ /* Discard history if invalid. */
+ if (film_buf.use_history == false) {
+ blend = 1.0;
+ }
+ return blend;
+}
+
+/* Returns resolved final color. */
+void film_store_combined(
+ FilmSample dst, ivec2 src_texel, vec4 color, float color_weight, inout vec4 display)
+{
+ if (film_buf.combined_id == -1) {
+ return;
+ }
+
+ vec4 color_src, color_dst;
+ float weight_src, weight_dst;
+
+ /* Undo the weighting to get final spatialy-filtered color. */
+ color_src = color / color_weight;
+
+ if (film_buf.use_reprojection) {
+ /* Interactive accumulation. Do reprojection and Temporal Anti-Aliasing. */
+
+ /* Reproject by finding where this pixel was in the previous frame. */
+ vec2 motion = film_pixel_history_motion_vector(src_texel);
+ vec2 history_texel = vec2(dst.texel) + motion;
+
+ float velocity = length(motion);
+
+ /* Load weight if it is not uniform accross the whole buffer (i.e: upsampling, panoramic). */
+ // dst.weight = film_weight_load(texel_combined);
+
+ color_dst = film_sample_catmull_rom(in_combined_tx, history_texel);
+ color_dst.rgb = film_YCoCg_from_scene_linear(color_dst.rgb);
+
+ /* Get local color bounding box of source neighboorhood. */
+ vec4 min_color, max_color;
+ film_combined_neighbor_boundbox(src_texel, min_color, max_color);
+
+ float blend = film_history_blend_factor(
+ velocity, history_texel, min_color.x, max_color.x, color_src.x, color_dst.x);
+
+ color_dst = film_amend_combined_history(min_color, max_color, color_dst, color_src, src_texel);
+
+ /* Luma weighted blend to avoid flickering. */
+ weight_dst = film_luma_weight(color_dst.x) * (1.0 - blend);
+ weight_src = film_luma_weight(color_src.x) * (blend);
+ }
+ else {
+ /* Everything is static. Use render accumulation. */
+ color_dst = texelFetch(in_combined_tx, dst.texel, 0);
+ color_dst.rgb = film_YCoCg_from_scene_linear(color_dst.rgb);
+
+ /* Luma weighted blend to avoid flickering. */
+ weight_dst = film_luma_weight(color_dst.x) * dst.weight;
+ weight_src = color_weight;
+ }
+ /* Weighted blend. */
+ color = color_dst * weight_dst + color_src * weight_src;
+ color /= weight_src + weight_dst;
+
+ color.rgb = film_scene_linear_from_YCoCg(color.rgb);
+
+ /* Fix alpha not accumulating to 1 because of float imprecision. */
+ if (color.a > 0.995) {
+ color.a = 1.0;
+ }
+
+ /* Filter NaNs. */
+ if (any(isnan(color))) {
+ color = vec4(0.0, 0.0, 0.0, 1.0);
+ }
+
+ if (film_buf.display_id == -1) {
+ display = color;
+ }
+ imageStore(out_combined_img, dst.texel, color);
+}
+
+void film_store_color(FilmSample dst, int pass_id, vec4 color, inout vec4 display)
+{
+ if (pass_id == -1) {
+ return;
+ }
+
+ vec4 data_film = imageLoad(color_accum_img, ivec3(dst.texel, pass_id));
+
+ color = (data_film * dst.weight + color) * dst.weight_sum_inv;
+
+ /* Filter NaNs. */
+ if (any(isnan(color))) {
+ color = vec4(0.0, 0.0, 0.0, 1.0);
+ }
+
+ if (film_buf.display_id == pass_id) {
+ display = color;
+ }
+ imageStore(color_accum_img, ivec3(dst.texel, pass_id), color);
+}
+
+void film_store_value(FilmSample dst, int pass_id, float value, inout vec4 display)
+{
+ if (pass_id == -1) {
+ return;
+ }
+
+ float data_film = imageLoad(value_accum_img, ivec3(dst.texel, pass_id)).x;
+
+ value = (data_film * dst.weight + value) * dst.weight_sum_inv;
+
+ /* Filter NaNs. */
+ if (isnan(value)) {
+ value = 0.0;
+ }
+
+ if (film_buf.display_id == pass_id) {
+ display = vec4(value, value, value, 1.0);
+ }
+ imageStore(value_accum_img, ivec3(dst.texel, pass_id), vec4(value));
+}
+
+/* Nearest sample variant. Always stores the data. */
+void film_store_data(ivec2 texel_film, int pass_id, vec4 data_sample, inout vec4 display)
+{
+ if (pass_id == -1) {
+ return;
+ }
+
+ if (film_buf.display_id == pass_id) {
+ display = data_sample;
+ }
+ imageStore(color_accum_img, ivec3(texel_film, pass_id), data_sample);
+}
+
+void film_store_depth(ivec2 texel_film, float value, out float out_depth)
+{
+ if (film_buf.depth_id == -1) {
+ return;
+ }
+
+ out_depth = film_depth_convert_to_scene(value);
+
+ imageStore(depth_img, texel_film, vec4(out_depth));
+}
+
+void film_store_distance(ivec2 texel, float value)
+{
+ imageStore(out_weight_img, ivec3(texel, WEIGHT_lAYER_DISTANCE), vec4(value));
+}
+
+void film_store_weight(ivec2 texel, float value)
+{
+ imageStore(out_weight_img, ivec3(texel, WEIGHT_lAYER_ACCUMULATION), vec4(value));
+}
+
+float film_display_depth_ammend(ivec2 texel, float depth)
+{
+ /* This effectively offsets the depth of the whole 2x2 region to the lowest value of the region
+ * twice. One for X and one for Y direction. */
+ /* TODO(fclem): This could be improved as it gives flickering result at depth discontinuity.
+ * But this is the quickest stable result I could come with for now. */
+#ifdef GPU_FRAGMENT_SHADER
+ depth += fwidth(depth);
+#endif
+ /* Small offset to avoid depth test lessEqual failing because of all the conversions loss. */
+ depth += 2.4e-7 * 4.0;
+ return saturate(depth);
+}
+
+/** \} */
+
+/** NOTE: out_depth is scene linear depth from the camera origin. */
+void film_process_data(ivec2 texel_film, out vec4 out_color, out float out_depth)
+{
+ out_color = vec4(0.0);
+ out_depth = 0.0;
+
+ float weight_accum = film_weight_accumulation(texel_film);
+ float film_weight = film_weight_load(texel_film);
+ float weight_sum = film_weight + weight_accum;
+ film_store_weight(texel_film, weight_sum);
+
+ FilmSample dst;
+ dst.texel = texel_film;
+ dst.weight = film_weight;
+ dst.weight_sum_inv = 1.0 / weight_sum;
+
+ /* NOTE: We split the accumulations into separate loops to avoid using too much registers and
+ * maximize occupancy. */
+
+ if (film_buf.combined_id != -1) {
+ /* NOTE: Do weight accumulation again since we use custom weights. */
+ float weight_accum = 0.0;
+ vec4 combined_accum = vec4(0.0);
+
+ FilmSample src;
+ for (int i = film_buf.samples_len - 1; i >= 0; i--) {
+ src = film_sample_get(i, texel_film);
+ film_sample_accum_combined(src, combined_accum, weight_accum);
+ }
+ /* NOTE: src.texel is center texel in incomming data buffer. */
+ film_store_combined(dst, src.texel, combined_accum, weight_accum, out_color);
+ }
+
+ if (film_buf.has_data) {
+ float film_distance = film_distance_load(texel_film);
+
+ /* Get sample closest to target texel. It is always sample 0. */
+ FilmSample film_sample = film_sample_get(0, texel_film);
+
+ if (film_buf.use_reprojection || film_sample.weight < film_distance) {
+ vec4 normal = texelFetch(normal_tx, film_sample.texel, 0);
+ float depth = texelFetch(depth_tx, film_sample.texel, 0).x;
+ vec4 vector = velocity_resolve(vector_tx, film_sample.texel, depth);
+
+ film_store_depth(texel_film, depth, out_depth);
+ film_store_data(texel_film, film_buf.normal_id, normal, out_color);
+ film_store_data(texel_film, film_buf.vector_id, vector, out_color);
+ film_store_distance(texel_film, film_sample.weight);
+ }
+ else {
+ out_depth = imageLoad(depth_img, texel_film).r;
+ }
+ }
+
+ if (film_buf.any_render_pass_1) {
+ vec4 diffuse_light_accum = vec4(0.0);
+ vec4 specular_light_accum = vec4(0.0);
+ vec4 volume_light_accum = vec4(0.0);
+ vec4 emission_accum = vec4(0.0);
+
+ for (int i = 0; i < film_buf.samples_len; i++) {
+ FilmSample src = film_sample_get(i, texel_film);
+ film_sample_accum(src, film_buf.diffuse_light_id, diffuse_light_tx, diffuse_light_accum);
+ film_sample_accum(src, film_buf.specular_light_id, specular_light_tx, specular_light_accum);
+ film_sample_accum(src, film_buf.volume_light_id, volume_light_tx, volume_light_accum);
+ film_sample_accum(src, film_buf.emission_id, emission_tx, emission_accum);
+ }
+ film_store_color(dst, film_buf.diffuse_light_id, diffuse_light_accum, out_color);
+ film_store_color(dst, film_buf.specular_light_id, specular_light_accum, out_color);
+ film_store_color(dst, film_buf.volume_light_id, volume_light_accum, out_color);
+ film_store_color(dst, film_buf.emission_id, emission_accum, out_color);
+ }
+
+ if (film_buf.any_render_pass_2) {
+ vec4 diffuse_color_accum = vec4(0.0);
+ vec4 specular_color_accum = vec4(0.0);
+ vec4 environment_accum = vec4(0.0);
+ float mist_accum = 0.0;
+ float shadow_accum = 0.0;
+ float ao_accum = 0.0;
+
+ for (int i = 0; i < film_buf.samples_len; i++) {
+ FilmSample src = film_sample_get(i, texel_film);
+ film_sample_accum(src, film_buf.diffuse_color_id, diffuse_color_tx, diffuse_color_accum);
+ film_sample_accum(src, film_buf.specular_color_id, specular_color_tx, specular_color_accum);
+ film_sample_accum(src, film_buf.environment_id, environment_tx, environment_accum);
+ film_sample_accum(src, film_buf.shadow_id, shadow_tx, shadow_accum);
+ film_sample_accum(src, film_buf.ambient_occlusion_id, ambient_occlusion_tx, ao_accum);
+ film_sample_accum_mist(src, mist_accum);
+ }
+ film_store_color(dst, film_buf.diffuse_color_id, diffuse_color_accum, out_color);
+ film_store_color(dst, film_buf.specular_color_id, specular_color_accum, out_color);
+ film_store_color(dst, film_buf.environment_id, environment_accum, out_color);
+ film_store_value(dst, film_buf.shadow_id, shadow_accum, out_color);
+ film_store_value(dst, film_buf.ambient_occlusion_id, ao_accum, out_color);
+ film_store_value(dst, film_buf.mist_id, mist_accum, out_color);
+ }
+
+ for (int aov = 0; aov < film_buf.aov_color_len; aov++) {
+ vec4 aov_accum = vec4(0.0);
+
+ for (int i = 0; i < film_buf.samples_len; i++) {
+ FilmSample src = film_sample_get(i, texel_film);
+ film_sample_accum(src, aov, aov_color_tx, aov_accum);
+ }
+ film_store_color(dst, film_buf.aov_color_id + aov, aov_accum, out_color);
+ }
+
+ for (int aov = 0; aov < film_buf.aov_value_len; aov++) {
+ float aov_accum = 0.0;
+
+ for (int i = 0; i < film_buf.samples_len; i++) {
+ FilmSample src = film_sample_get(i, texel_film);
+ film_sample_accum(src, aov, aov_value_tx, aov_accum);
+ }
+ film_store_value(dst, film_buf.aov_value_id + aov, aov_accum, out_color);
+ }
+}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_nodetree_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_nodetree_lib.glsl
index 0ccf06a9e14..c488216eeac 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_nodetree_lib.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_nodetree_lib.glsl
@@ -245,6 +245,20 @@ float F_eta(float a, float b)
}
void output_aov(vec4 color, float value, uint hash)
{
+#if defined(MAT_AOV_SUPPORT) && defined(GPU_FRAGMENT_SHADER)
+ for (int i = 0; i < AOV_MAX && i < aov_buf.color_len; i++) {
+ if (aov_buf.hash_color[i] == hash) {
+ imageStore(aov_color_img, ivec3(gl_FragCoord.xy, i), color);
+ return;
+ }
+ }
+ for (int i = 0; i < AOV_MAX && i < aov_buf.value_len; i++) {
+ if (aov_buf.hash_value[i] == hash) {
+ imageStore(aov_value_img, ivec3(gl_FragCoord.xy, i), vec4(value));
+ return;
+ }
+ }
+#endif
}
#ifdef EEVEE_MATERIAL_STUBS
@@ -255,6 +269,10 @@ void output_aov(vec4 color, float value, uint hash)
# define nodetree_thickness() 0.1
#endif
+#ifdef GPU_VERTEX_SHADER
+# define closure_to_rgba(a) vec4(0.0)
+#endif
+
/* -------------------------------------------------------------------- */
/** \name Fragment Displacement
*
@@ -359,3 +377,43 @@ vec3 coordinate_incoming(vec3 P)
}
/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Volume Attribute post
+ *
+ * TODO(@fclem): These implementation details should concern the DRWManager and not be a fix on
+ * the engine side. But as of now, the engines are responsible for loading the attributes.
+ *
+ * \{ */
+
+#if defined(MAT_GEOM_VOLUME)
+
+float attr_load_temperature_post(float attr)
+{
+ /* Bring the into standard range without having to modify the grid values */
+ attr = (attr > 0.01) ? (attr * drw_volume.temperature_mul + drw_volume.temperature_bias) : 0.0;
+ return attr;
+}
+vec4 attr_load_color_post(vec4 attr)
+{
+ /* Density is premultiplied for interpolation, divide it out here. */
+ attr.rgb *= safe_rcp(attr.a);
+ attr.rgb *= drw_volume.color_mul.rgb;
+ attr.a = 1.0;
+ return attr;
+}
+
+#else /* Noop for any other surface. */
+
+float attr_load_temperature_post(float attr)
+{
+ return attr;
+}
+vec4 attr_load_color_post(vec4 attr)
+{
+ return attr;
+}
+
+#endif
+
+/** \} */
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
index 7ddf941df7c..34ea288852a 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
@@ -10,6 +10,18 @@
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
+vec4 closure_to_rgba(Closure cl)
+{
+ vec4 out_color;
+ out_color.rgb = g_emission;
+ out_color.a = saturate(1.0 - avg(g_transmittance));
+
+ /* Reset for the next closure tree. */
+ closure_weights_reset();
+
+ return out_color;
+}
+
/* From the paper "Hashed Alpha Testing" by Chris Wyman and Morgan McGuire. */
float hash(vec2 a)
{
@@ -72,14 +84,7 @@ void main()
#endif
#ifdef MAT_VELOCITY
- vec4 out_velocity_camera; /* TODO(fclem): Panoramic cameras. */
- velocity_camera(interp.P + motion.prev,
- interp.P,
- interp.P - motion.next,
- out_velocity_camera,
- out_velocity_view);
-
- /* For testing in viewport. */
- out_velocity_view.zw = vec2(0.0);
+ out_velocity = velocity_surface(interp.P + motion.prev, interp.P, interp.P - motion.next);
+ out_velocity = velocity_pack(out_velocity);
#endif
}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl
index 143e88dbe68..48ced4e5374 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl
@@ -53,21 +53,45 @@ void main()
g_holdout = saturate(g_holdout);
+ vec3 diffuse_light = vec3(saturate(g_diffuse_data.N.z * 0.5 + 0.5));
+ vec3 reflection_light = vec3(spec_light(g_reflection_data));
+ vec3 refraction_light = vec3(saturate(g_refraction_data.N.z * 0.5 + 0.5));
+
+ g_diffuse_data.color *= g_diffuse_data.weight;
+ g_reflection_data.color *= g_reflection_data.weight;
+ g_refraction_data.color *= g_refraction_data.weight;
+ diffuse_light *= step(1e-5, g_diffuse_data.weight);
+ reflection_light *= step(1e-5, g_reflection_data.weight);
+ refraction_light *= step(1e-5, g_refraction_data.weight);
+
out_radiance.rgb = g_emission;
- out_radiance.rgb += g_diffuse_data.color * g_diffuse_data.weight *
- saturate(g_diffuse_data.N.z * 0.5 + 0.5);
- out_radiance.rgb += g_reflection_data.color * g_reflection_data.weight *
- spec_light(g_reflection_data);
- out_radiance.rgb += g_refraction_data.color * g_refraction_data.weight *
- saturate(g_refraction_data.N.z * 0.5 + 0.5);
+ out_radiance.rgb += g_diffuse_data.color * diffuse_light;
+ out_radiance.rgb += g_reflection_data.color * reflection_light;
+ out_radiance.rgb += g_refraction_data.color * refraction_light;
out_radiance.a = 0.0;
+ vec3 specular_light = reflection_light + refraction_light;
+ vec3 specular_color = g_reflection_data.color + g_refraction_data.color;
+
+ /* TODO(fclem): This feels way too complex for what is it. */
+ bool has_any_bsdf_weight = g_diffuse_data.weight != 0.0 || g_reflection_data.weight != 0.0 ||
+ g_refraction_data.weight != 0.0;
+ vec3 out_normal = has_any_bsdf_weight ? vec3(0.0) : g_data.N;
+ out_normal += g_diffuse_data.N * g_diffuse_data.weight;
+ out_normal += g_reflection_data.N * g_reflection_data.weight;
+ out_normal += g_refraction_data.N * g_refraction_data.weight;
+ out_normal = safe_normalize(out_normal);
+
+ ivec2 out_texel = ivec2(gl_FragCoord.xy);
+ imageStore(rp_normal_img, out_texel, vec4(out_normal, 1.0));
+ imageStore(rp_diffuse_light_img, out_texel, vec4(diffuse_light, 1.0));
+ imageStore(rp_diffuse_color_img, out_texel, vec4(g_diffuse_data.color, 1.0));
+ imageStore(rp_specular_light_img, out_texel, vec4(specular_light, 1.0));
+ imageStore(rp_specular_color_img, out_texel, vec4(specular_color, 1.0));
+ imageStore(rp_emission_img, out_texel, vec4(g_emission, 1.0));
+
out_radiance.rgb *= 1.0 - g_holdout;
out_transmittance.rgb = g_transmittance;
out_transmittance.a = saturate(avg(g_transmittance));
-
- /* Test */
- out_transmittance.a = 1.0 - out_transmittance.a;
- out_radiance.a = 1.0 - out_radiance.a;
}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl
index ac657afc922..ed75282a550 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl
@@ -24,6 +24,17 @@ void main()
g_holdout = saturate(g_holdout);
+ ivec2 out_texel = ivec2(gl_FragCoord.xy);
+ imageStore(rp_normal_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+ imageStore(rp_diffuse_light_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+ imageStore(rp_diffuse_color_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+ imageStore(rp_specular_light_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+ imageStore(rp_specular_color_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+ imageStore(rp_emission_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+
out_background.rgb = safe_color(g_emission) * (1.0 - g_holdout);
out_background.a = saturate(avg(g_transmittance)) * g_holdout;
+
+ /* World opacity. */
+ out_background = mix(vec4(0.0, 0.0, 0.0, 1.0), out_background, world_opacity_fade);
}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl
index 435ae6658c9..c21456b7a5c 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl
@@ -4,21 +4,49 @@
#ifdef VELOCITY_CAMERA
+vec4 velocity_pack(vec4 data)
+{
+ return data * 0.01;
+}
+
+vec4 velocity_unpack(vec4 data)
+{
+ return data * 100.0;
+}
+
/**
* Given a triple of position, compute the previous and next motion vectors.
- * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy)
+ * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy).
*/
-vec4 velocity_view(vec3 P_prev, vec3 P, vec3 P_next)
+vec4 velocity_surface(vec3 P_prv, vec3 P, vec3 P_nxt)
{
- vec2 prev_uv, curr_uv, next_uv;
+ /* NOTE: We don't use the drw_view.persmat to avoid adding the TAA jitter to the velocity. */
+ vec2 prev_uv = project_point(camera_prev.persmat, P_prv).xy;
+ vec2 curr_uv = project_point(camera_curr.persmat, P).xy;
+ vec2 next_uv = project_point(camera_next.persmat, P_nxt).xy;
+
+ vec4 motion = vec4(prev_uv - curr_uv, curr_uv - next_uv);
+ /* Convert NDC velocity to UV velocity */
+ motion *= 0.5;
+
+ return motion;
+}
- prev_uv = transform_point(ProjectionMatrix, transform_point(camera_prev.viewmat, P_prev)).xy;
- curr_uv = transform_point(ViewProjectionMatrix, P).xy;
- next_uv = transform_point(ProjectionMatrix, transform_point(camera_next.viewmat, P_next)).xy;
+/**
+ * Given a view space view vector \a vV, compute the previous and next motion vectors for
+ * background pixels.
+ * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy).
+ */
+vec4 velocity_background(vec3 vV)
+{
+ /* Only transform direction to avoid loosing precision. */
+ vec3 V = transform_direction(camera_curr.viewinv, vV);
+ /* NOTE: We don't use the drw_view.winmat to avoid adding the TAA jitter to the velocity. */
+ vec2 prev_uv = project_point(camera_prev.winmat, V).xy;
+ vec2 curr_uv = project_point(camera_curr.winmat, V).xy;
+ vec2 next_uv = project_point(camera_next.winmat, V).xy;
- vec4 motion;
- motion.xy = prev_uv - curr_uv;
- motion.zw = curr_uv - next_uv;
+ vec4 motion = vec4(prev_uv - curr_uv, curr_uv - next_uv);
/* Convert NDC velocity to UV velocity */
motion *= 0.5;
@@ -26,37 +54,28 @@ vec4 velocity_view(vec3 P_prev, vec3 P, vec3 P_next)
}
/**
- * Given a triple of position, compute the previous and next motion vectors.
- * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy)
- * \a velocity_camera is the motion in film UV space after camera projection.
- * \a velocity_view is the motion in ShadingView UV space. It is different
- * from velocity_camera for multi-view rendering.
+ * Load and resolve correct velocity as some pixels might still not have correct
+ * motion data for performance reasons.
*/
-void velocity_camera(vec3 P_prev, vec3 P, vec3 P_next, out vec4 vel_camera, out vec4 vel_view)
+vec4 velocity_resolve(sampler2D vector_tx, ivec2 texel, float depth)
{
- vec2 prev_uv, curr_uv, next_uv;
- prev_uv = camera_uv_from_world(camera_prev, P_prev);
- curr_uv = camera_uv_from_world(camera_curr, P);
- next_uv = camera_uv_from_world(camera_next, P_next);
-
- vel_camera.xy = prev_uv - curr_uv;
- vel_camera.zw = curr_uv - next_uv;
-
- if (is_panoramic(camera_curr.type)) {
- /* This path is only used if using using panoramic projections. Since the views always have
- * the same 45° aperture angle, we can safely reuse the projection matrix. */
- prev_uv = transform_point(ProjectionMatrix, transform_point(camera_prev.viewmat, P_prev)).xy;
- curr_uv = transform_point(ViewProjectionMatrix, P).xy;
- next_uv = transform_point(ProjectionMatrix, transform_point(camera_next.viewmat, P_next)).xy;
-
- vel_view.xy = prev_uv - curr_uv;
- vel_view.zw = curr_uv - next_uv;
- /* Convert NDC velocity to UV velocity */
- vel_view *= 0.5;
- }
- else {
- vel_view = vel_camera;
+ vec2 uv = (vec2(texel) + 0.5) / vec2(textureSize(vector_tx, 0).xy);
+ vec4 vector = texelFetch(vector_tx, texel, 0);
+
+ if (vector.x == VELOCITY_INVALID) {
+ bool is_background = (depth == 1.0);
+ if (is_background) {
+ /* NOTE: Use viewCameraVec to avoid imprecision if camera is far from origin. */
+ vec3 vV = viewCameraVec(get_view_space_from_depth(uv, 1.0));
+ return velocity_background(vV);
+ }
+ else {
+ /* Static geometry. No translation in world space. */
+ vec3 P = get_world_space_from_depth(uv, depth);
+ return velocity_surface(P, P, P);
+ }
}
+ return velocity_unpack(vector);
}
#endif
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl
deleted file mode 100644
index b68b2eaf117..00000000000
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl
+++ /dev/null
@@ -1,58 +0,0 @@
-
-/**
- * Fullscreen pass that compute motion vector for static geometry.
- * Animated geometry has already written correct motion vectors.
- */
-
-#pragma BLENDER_REQUIRE(common_view_lib.glsl)
-#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
-
-#define is_valid_output(img_) (imageSize(img_).x > 1)
-
-void main()
-{
- ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
- vec4 motion = imageLoad(velocity_view_img, texel);
-
- bool pixel_has_valid_motion = (motion.x != VELOCITY_INVALID);
- float depth = texelFetch(depth_tx, texel, 0).r;
- bool is_background = (depth == 1.0f);
-
- vec2 uv = vec2(texel) * drw_view.viewport_size_inverse;
- vec3 P_next, P_prev, P_curr;
-
- if (pixel_has_valid_motion) {
- /* Animated geometry. View motion already computed during prepass. Convert only to camera. */
- // P_prev = get_world_space_from_depth(uv + motion.xy, 0.5);
- // P_curr = get_world_space_from_depth(uv, 0.5);
- // P_next = get_world_space_from_depth(uv + motion.zw, 0.5);
- return;
- }
- else if (is_background) {
- /* NOTE: Use viewCameraVec to avoid imprecision if camera is far from origin. */
- vec3 vV = viewCameraVec(get_view_space_from_depth(uv, 1.0));
- vec3 V = transform_direction(ViewMatrixInverse, vV);
- /* Background has no motion under camera translation. Translate view vector with the camera. */
- /* WATCH(fclem): Might create precision issues. */
- P_next = camera_next.viewinv[3].xyz + V;
- P_curr = camera_curr.viewinv[3].xyz + V;
- P_prev = camera_prev.viewinv[3].xyz + V;
- }
- else {
- /* Static geometry. No translation in world space. */
- P_curr = get_world_space_from_depth(uv, depth);
- P_prev = P_curr;
- P_next = P_curr;
- }
-
- vec4 vel_camera, vel_view;
- velocity_camera(P_prev, P_curr, P_next, vel_camera, vel_view);
-
- if (in_texture_range(texel, depth_tx)) {
- imageStore(velocity_view_img, texel, vel_view);
-
- if (is_valid_output(velocity_camera_img)) {
- imageStore(velocity_camera_img, texel, vel_camera);
- }
- }
-}
diff --git a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh
new file mode 100644
index 00000000000..a5baaca51f9
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "eevee_defines.hh"
+#include "gpu_shader_create_info.hh"
+
+GPU_SHADER_CREATE_INFO(eevee_film)
+ .uniform_buf(4, "FilmData", "film_buf")
+ .sampler(0, ImageType::DEPTH_2D, "depth_tx")
+ .sampler(1, ImageType::FLOAT_2D, "combined_tx")
+ .sampler(2, ImageType::FLOAT_2D, "normal_tx")
+ .sampler(3, ImageType::FLOAT_2D, "vector_tx")
+ .sampler(4, ImageType::FLOAT_2D, "diffuse_light_tx")
+ .sampler(5, ImageType::FLOAT_2D, "diffuse_color_tx")
+ .sampler(6, ImageType::FLOAT_2D, "specular_light_tx")
+ .sampler(7, ImageType::FLOAT_2D, "specular_color_tx")
+ .sampler(8, ImageType::FLOAT_2D, "volume_light_tx")
+ .sampler(9, ImageType::FLOAT_2D, "emission_tx")
+ .sampler(10, ImageType::FLOAT_2D, "environment_tx")
+ .sampler(11, ImageType::FLOAT_2D, "shadow_tx")
+ .sampler(12, ImageType::FLOAT_2D, "ambient_occlusion_tx")
+ .sampler(13, ImageType::FLOAT_2D_ARRAY, "aov_color_tx")
+ .sampler(14, ImageType::FLOAT_2D_ARRAY, "aov_value_tx")
+ /* Color History for TAA needs to be sampler to leverage bilinear sampling. */
+ .sampler(15, ImageType::FLOAT_2D, "in_combined_tx")
+ // .sampler(15, ImageType::FLOAT_2D, "cryptomatte_tx") /* TODO */
+ .image(0, GPU_R32F, Qualifier::READ, ImageType::FLOAT_2D_ARRAY, "in_weight_img")
+ .image(1, GPU_R32F, Qualifier::WRITE, ImageType::FLOAT_2D_ARRAY, "out_weight_img")
+ /* Color History for TAA needs to be sampler to leverage bilinear sampling. */
+ //.image(2, GPU_RGBA16F, Qualifier::READ, ImageType::FLOAT_2D, "in_combined_img")
+ .image(3, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "out_combined_img")
+ .image(4, GPU_R32F, Qualifier::READ_WRITE, ImageType::FLOAT_2D, "depth_img")
+ .image(5, GPU_RGBA16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D_ARRAY, "color_accum_img")
+ .image(6, GPU_R16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D_ARRAY, "value_accum_img")
+ .additional_info("eevee_shared")
+ .additional_info("eevee_velocity_camera")
+ .additional_info("draw_view");
+
+GPU_SHADER_CREATE_INFO(eevee_film_frag)
+ .do_static_compilation(true)
+ .fragment_out(0, Type::VEC4, "out_color")
+ .fragment_source("eevee_film_frag.glsl")
+ .additional_info("draw_fullscreen", "eevee_film");
+
+GPU_SHADER_CREATE_INFO(eevee_film_comp)
+ .do_static_compilation(true)
+ .local_group_size(FILM_GROUP_SIZE, FILM_GROUP_SIZE)
+ .compute_source("eevee_film_comp.glsl")
+ .additional_info("eevee_film");
diff --git a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh
index a944bea402e..2368061402c 100644
--- a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh
+++ b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh
@@ -70,6 +70,14 @@ GPU_SHADER_INTERFACE_INFO(eevee_surf_iface, "interp")
#define image_out(slot, qualifier, format, name) \
image(slot, format, qualifier, ImageType::FLOAT_2D, name, Frequency::PASS)
+#define image_array_out(slot, qualifier, format, name) \
+ image(slot, format, qualifier, ImageType::FLOAT_2D_ARRAY, name, Frequency::PASS)
+
+GPU_SHADER_CREATE_INFO(eevee_aov_out)
+ .define("MAT_AOV_SUPPORT")
+ .image_array_out(6, Qualifier::WRITE, GPU_RGBA16F, "aov_color_img")
+ .image_array_out(7, Qualifier::WRITE, GPU_R16F, "aov_value_img")
+ .storage_buf(7, Qualifier::READ, "AOVsInfoData", "aov_buf");
GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
.vertex_out(eevee_surf_iface)
@@ -85,31 +93,38 @@ GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
// .image_out(3, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_reflection_color")
// .image_out(4, Qualifier::WRITE, GPU_RGBA16F, "gbuff_reflection_normal")
// .image_out(5, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_emission")
- /* Renderpasses. */
+ /* Render-passes. */
// .image_out(6, Qualifier::READ_WRITE, GPU_RGBA16F, "rpass_volume_light")
/* TODO: AOVs maybe? */
.fragment_source("eevee_surf_deferred_frag.glsl")
- // .additional_info("eevee_sampling_data", "eevee_utility_texture")
+ // .additional_info("eevee_aov_out", "eevee_sampling_data", "eevee_utility_texture")
;
-#undef image_out
-
GPU_SHADER_CREATE_INFO(eevee_surf_forward)
.auto_resource_location(true)
.vertex_out(eevee_surf_iface)
+ /* Early fragment test is needed for render passes support for forward surfaces. */
+ /* NOTE: This removes the possibility of using gl_FragDepth. */
+ .early_fragment_test(true)
.fragment_out(0, Type::VEC4, "out_radiance", DualBlend::SRC_0)
.fragment_out(0, Type::VEC4, "out_transmittance", DualBlend::SRC_1)
.fragment_source("eevee_surf_forward_frag.glsl")
- // .additional_info("eevee_sampling_data",
- // "eevee_lightprobe_data",
- /* Optionally added depending on the material. */
- // "eevee_raytrace_data",
- // "eevee_transmittance_data",
- // "eevee_utility_texture",
- // "eevee_light_data",
- // "eevee_shadow_data"
- // )
- ;
+ .image_out(0, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_normal_img")
+ .image_out(1, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_light_img")
+ .image_out(2, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_color_img")
+ .image_out(3, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_light_img")
+ .image_out(4, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_color_img")
+ .image_out(5, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_emission_img")
+ .additional_info("eevee_aov_out"
+ // "eevee_sampling_data",
+ // "eevee_lightprobe_data",
+ /* Optionally added depending on the material. */
+ // "eevee_raytrace_data",
+ // "eevee_transmittance_data",
+ // "eevee_utility_texture",
+ // "eevee_light_data",
+ // "eevee_shadow_data"
+ );
GPU_SHADER_CREATE_INFO(eevee_surf_depth)
.vertex_out(eevee_surf_iface)
@@ -119,10 +134,21 @@ GPU_SHADER_CREATE_INFO(eevee_surf_depth)
GPU_SHADER_CREATE_INFO(eevee_surf_world)
.vertex_out(eevee_surf_iface)
+ .image_out(0, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_normal_img")
+ .image_out(1, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_light_img")
+ .image_out(2, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_color_img")
+ .image_out(3, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_light_img")
+ .image_out(4, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_color_img")
+ .image_out(5, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_emission_img")
+ .push_constant(Type::FLOAT, "world_opacity_fade")
.fragment_out(0, Type::VEC4, "out_background")
.fragment_source("eevee_surf_world_frag.glsl")
- // .additional_info("eevee_utility_texture")
- ;
+ .additional_info("eevee_aov_out"
+ //"eevee_utility_texture"
+ );
+
+#undef image_out
+#undef image_array_out
/** \} */
diff --git a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh
index a5f16363466..6e8e8fb020a 100644
--- a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh
+++ b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
@@ -30,26 +31,7 @@ GPU_SHADER_CREATE_INFO(eevee_velocity_geom)
.storage_buf(
7, Qualifier::READ, "VelocityIndex", "velocity_indirection_buf[]", Frequency::PASS)
.vertex_out(eevee_velocity_surface_iface)
- .fragment_out(0, Type::VEC4, "out_velocity_view")
+ .fragment_out(0, Type::VEC4, "out_velocity")
.additional_info("eevee_velocity_camera");
/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name Velocity Resolve
- *
- * Computes velocity for static objects.
- * Also converts motion to camera space (as opposed to view space) if needed.
- * \{ */
-
-GPU_SHADER_CREATE_INFO(eevee_velocity_resolve)
- .do_static_compilation(true)
- .local_group_size(8, 8)
- .sampler(0, ImageType::DEPTH_2D, "depth_tx")
- .image(0, GPU_RG16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D, "velocity_view_img")
- .image(1, GPU_RG16F, Qualifier::WRITE, ImageType::FLOAT_2D, "velocity_camera_img")
- .additional_info("eevee_shared")
- .compute_source("eevee_velocity_resolve_comp.glsl")
- .additional_info("draw_view", "eevee_velocity_camera");
-
-/** \} */