Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/engines/eevee_next/shaders')
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_attributes_lib.glsl42
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl20
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_film_comp.glsl13
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl31
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl713
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_nodetree_lib.glsl58
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl23
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl44
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl11
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl93
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl58
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh48
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh58
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh22
14 files changed, 1027 insertions, 207 deletions
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_attributes_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_attributes_lib.glsl
index 326481a1db6..974581e674e 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_attributes_lib.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_attributes_lib.glsl
@@ -3,6 +3,8 @@
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_codegen_lib.glsl)
+#defined EEVEE_ATTRIBUTE_LIB
+
#if defined(MAT_GEOM_MESH)
/* -------------------------------------------------------------------- */
@@ -282,43 +284,3 @@ vec3 attr_load_uv(vec3 attr)
/** \} */
#endif
-
-/* -------------------------------------------------------------------- */
-/** \name Volume Attribute post
- *
- * TODO(@fclem): These implementation details should concern the DRWManager and not be a fix on
- * the engine side. But as of now, the engines are responsible for loading the attributes.
- *
- * \{ */
-
-#if defined(MAT_GEOM_VOLUME)
-
-float attr_load_temperature_post(float attr)
-{
- /* Bring the into standard range without having to modify the grid values */
- attr = (attr > 0.01) ? (attr * drw_volume.temperature_mul + drw_volume.temperature_bias) : 0.0;
- return attr;
-}
-vec4 attr_load_color_post(vec4 attr)
-{
- /* Density is premultiplied for interpolation, divide it out here. */
- attr.rgb *= safe_rcp(attr.a);
- attr.rgb *= drw_volume.color_mul.rgb;
- attr.a = 1.0;
- return attr;
-}
-
-#else /* Noop for any other surface. */
-
-float attr_load_temperature_post(float attr)
-{
- return attr;
-}
-vec4 attr_load_color_post(vec4 attr)
-{
- return attr;
-}
-
-#endif
-
-/** \} */
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl
index f79e9102d76..2611f714b59 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_camera_lib.glsl
@@ -143,24 +143,10 @@ vec2 camera_uv_from_view(CameraData cam, vec3 vV)
}
}
-vec2 camera_uv_from_world(CameraData cam, vec3 V)
+vec2 camera_uv_from_world(CameraData cam, vec3 P)
{
- vec3 vV = transform_point(cam.viewmat, V);
- switch (cam.type) {
- default:
- case CAMERA_ORTHO:
- return camera_uv_from_view(cam.persmat, false, V);
- case CAMERA_PERSP:
- return camera_uv_from_view(cam.persmat, true, V);
- case CAMERA_PANO_EQUIRECT:
- return camera_equirectangular_from_direction(cam, vV);
- case CAMERA_PANO_EQUISOLID:
- /* ATTR_FALLTHROUGH; */
- case CAMERA_PANO_EQUIDISTANT:
- return camera_fisheye_from_direction(cam, vV);
- case CAMERA_PANO_MIRROR:
- return camera_mirror_ball_from_direction(cam, vV);
- }
+ vec3 vV = transform_direction(cam.viewmat, normalize(P));
+ return camera_uv_from_view(cam, vV);
}
/** \} */
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_film_comp.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_film_comp.glsl
new file mode 100644
index 00000000000..ce1f19edf53
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_film_comp.glsl
@@ -0,0 +1,13 @@
+
+#pragma BLENDER_REQUIRE(common_view_lib.glsl)
+#pragma BLENDER_REQUIRE(eevee_film_lib.glsl)
+
+void main()
+{
+ ivec2 texel_film = ivec2(gl_GlobalInvocationID.xy);
+ /* Not used. */
+ vec4 out_color;
+ float out_depth;
+
+ film_process_data(texel_film, out_color, out_depth);
+}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl
new file mode 100644
index 00000000000..26040234fd0
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_film_frag.glsl
@@ -0,0 +1,31 @@
+
+#pragma BLENDER_REQUIRE(common_view_lib.glsl)
+#pragma BLENDER_REQUIRE(eevee_film_lib.glsl)
+
+void main()
+{
+ ivec2 texel_film = ivec2(gl_FragCoord.xy) - film_buf.offset;
+ float out_depth;
+
+ if (film_buf.display_only) {
+ out_depth = imageLoad(depth_img, texel_film).r;
+
+ if (film_buf.display_id == -1) {
+ out_color = texelFetch(in_combined_tx, texel_film, 0);
+ }
+ else if (film_buf.display_is_value) {
+ out_color.rgb = imageLoad(value_accum_img, ivec3(texel_film, film_buf.display_id)).rrr;
+ out_color.a = 1.0;
+ }
+ else {
+ out_color = imageLoad(color_accum_img, ivec3(texel_film, film_buf.display_id));
+ }
+ }
+ else {
+ film_process_data(texel_film, out_color, out_depth);
+ }
+
+ gl_FragDepth = get_depth_from_view_z(-out_depth);
+
+ gl_FragDepth = film_display_depth_ammend(texel_film, gl_FragDepth);
+}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl
new file mode 100644
index 00000000000..b286836e8df
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_film_lib.glsl
@@ -0,0 +1,713 @@
+
+/**
+ * Film accumulation utils functions.
+ **/
+
+#pragma BLENDER_REQUIRE(common_view_lib.glsl)
+#pragma BLENDER_REQUIRE(common_math_geom_lib.glsl)
+#pragma BLENDER_REQUIRE(eevee_camera_lib.glsl)
+#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
+
+/* Return scene linear Z depth from the camera or radial depth for panoramic cameras. */
+float film_depth_convert_to_scene(float depth)
+{
+ if (false /* Panoramic */) {
+ /* TODO */
+ return 1.0;
+ }
+ return abs(get_view_z_from_depth(depth));
+}
+
+vec3 film_YCoCg_from_scene_linear(vec3 rgb_color)
+{
+ const mat3 colorspace_tx = transpose(mat3(vec3(1, 2, 1), /* Y */
+ vec3(2, 0, -2), /* Co */
+ vec3(-1, 2, -1))); /* Cg */
+ return colorspace_tx * rgb_color;
+}
+
+vec4 film_YCoCg_from_scene_linear(vec4 rgba_color)
+{
+ return vec4(film_YCoCg_from_scene_linear(rgba_color.rgb), rgba_color.a);
+}
+
+vec3 film_scene_linear_from_YCoCg(vec3 ycocg_color)
+{
+ float Y = ycocg_color.x;
+ float Co = ycocg_color.y;
+ float Cg = ycocg_color.z;
+
+ vec3 rgb_color;
+ rgb_color.r = Y + Co - Cg;
+ rgb_color.g = Y + Cg;
+ rgb_color.b = Y - Co - Cg;
+ return rgb_color * 0.25;
+}
+
+/* Load a texture sample in a specific format. Combined pass needs to use this. */
+vec4 film_texelfetch_as_YCoCg_opacity(sampler2D tx, ivec2 texel)
+{
+ vec4 color = texelFetch(combined_tx, texel, 0);
+ /* Convert transmittance to opacity. */
+ color.a = saturate(1.0 - color.a);
+ /* Transform to YCoCg for accumulation. */
+ color.rgb = film_YCoCg_from_scene_linear(color.rgb);
+ return color;
+}
+
+/* Returns a weight based on Luma to reduce the flickering introduced by high energy pixels. */
+float film_luma_weight(float luma)
+{
+ /* Slide 20 of "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014. */
+ /* To preserve more details in dark areas, we use a bigger bias. */
+ return 1.0 / (4.0 + luma * film_buf.exposure_scale);
+}
+
+/* -------------------------------------------------------------------- */
+/** \name Filter
+ * \{ */
+
+FilmSample film_sample_get(int sample_n, ivec2 texel_film)
+{
+#ifdef PANORAMIC
+ /* TODO(fclem): Panoramic projection will be more complex. The samples will have to be retrieve
+ * at runtime, maybe by scanning a whole region. Offset and weight will have to be computed by
+ * reprojecting the incoming pixel data into film pixel space. */
+#else
+
+# ifdef SCALED_RENDERING
+ texel_film /= film_buf.scaling_factor;
+# endif
+
+ FilmSample film_sample = film_buf.samples[sample_n];
+ film_sample.texel += texel_film + film_buf.offset;
+ /* Use extend on borders. */
+ film_sample.texel = clamp(film_sample.texel, ivec2(0, 0), film_buf.render_extent - 1);
+
+ /* TODO(fclem): Panoramic projection will need to compute the sample weight in the shader
+ * instead of precomputing it on CPU. */
+# ifdef SCALED_RENDERING
+ /* We need to compute the real distance and weight since a sample
+ * can be used by many final pixel. */
+ vec2 offset = film_buf.subpixel_offset - vec2(texel_film % film_buf.scaling_factor);
+ film_sample.weight = film_filter_weight(film_buf.filter_size, len_squared(offset));
+# endif
+
+#endif /* PANORAMIC */
+
+ /* Always return a weight above 0 to avoid blind spots between samples. */
+ film_sample.weight = max(film_sample.weight, 1e-6);
+
+ return film_sample;
+}
+
+/* Returns the combined weights of all samples affecting this film pixel. */
+float film_weight_accumulation(ivec2 texel_film)
+{
+#if 0 /* TODO(fclem): Reference implementation, also needed for panoramic cameras. */
+ float weight = 0.0;
+ for (int i = 0; i < film_buf.samples_len; i++) {
+ weight += film_sample_get(i, texel_film).weight;
+ }
+ return weight;
+#endif
+ return film_buf.samples_weight_total;
+}
+
+void film_sample_accum(FilmSample samp, int pass_id, sampler2D tex, inout vec4 accum)
+{
+ if (pass_id == -1) {
+ return;
+ }
+ accum += texelFetch(tex, samp.texel, 0) * samp.weight;
+}
+
+void film_sample_accum(FilmSample samp, int pass_id, sampler2D tex, inout float accum)
+{
+ if (pass_id == -1) {
+ return;
+ }
+ accum += texelFetch(tex, samp.texel, 0).x * samp.weight;
+}
+
+void film_sample_accum(FilmSample samp, int pass_id, sampler2DArray tex, inout vec4 accum)
+{
+ if (pass_id == -1) {
+ return;
+ }
+ accum += texelFetch(tex, ivec3(samp.texel, pass_id), 0) * samp.weight;
+}
+
+void film_sample_accum(FilmSample samp, int pass_id, sampler2DArray tex, inout float accum)
+{
+ if (pass_id == -1) {
+ return;
+ }
+ accum += texelFetch(tex, ivec3(samp.texel, pass_id), 0).x * samp.weight;
+}
+
+void film_sample_accum_mist(FilmSample samp, inout float accum)
+{
+ if (film_buf.mist_id == -1) {
+ return;
+ }
+ float depth = texelFetch(depth_tx, samp.texel, 0).x;
+ vec2 uv = (vec2(samp.texel) + 0.5) / textureSize(depth_tx, 0).xy;
+ vec3 vP = get_view_space_from_depth(uv, depth);
+ bool is_persp = ProjectionMatrix[3][3] == 0.0;
+ float mist = (is_persp) ? length(vP) : abs(vP.z);
+ /* Remap to 0..1 range. */
+ mist = saturate(mist * film_buf.mist_scale + film_buf.mist_bias);
+ /* Falloff. */
+ mist = pow(mist, film_buf.mist_exponent);
+ accum += mist * samp.weight;
+}
+
+void film_sample_accum_combined(FilmSample samp, inout vec4 accum, inout float weight_accum)
+{
+ if (film_buf.combined_id == -1) {
+ return;
+ }
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, samp.texel);
+
+ /* Weight by luma to remove fireflies. */
+ float weight = film_luma_weight(color.x) * samp.weight;
+
+ accum += color * weight;
+ weight_accum += weight;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Load/Store Data
+ * \{ */
+
+#define WEIGHT_lAYER_ACCUMULATION 0
+#define WEIGHT_lAYER_DISTANCE 1
+
+/* Returns the distance used to store nearest interpolation data. */
+float film_distance_load(ivec2 texel)
+{
+ /* Repeat texture coordinates as the weight can be optimized to a small portion of the film. */
+ texel = texel % imageSize(in_weight_img).xy;
+
+ if (!film_buf.use_history || film_buf.use_reprojection) {
+ return 1.0e16;
+ }
+ return imageLoad(in_weight_img, ivec3(texel, WEIGHT_lAYER_DISTANCE)).x;
+}
+
+float film_weight_load(ivec2 texel)
+{
+ /* Repeat texture coordinates as the weight can be optimized to a small portion of the film. */
+ texel = texel % imageSize(in_weight_img).xy;
+
+ if (!film_buf.use_history || film_buf.use_reprojection) {
+ return 0.0;
+ }
+ return imageLoad(in_weight_img, ivec3(texel, WEIGHT_lAYER_ACCUMULATION)).x;
+}
+
+/* Returns motion in pixel space to retrieve the pixel history. */
+vec2 film_pixel_history_motion_vector(ivec2 texel_sample)
+{
+ /**
+ * Dilate velocity by using the nearest pixel in a cross pattern.
+ * "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014 (Slide 27)
+ */
+ const ivec2 corners[4] = ivec2[4](ivec2(-2, -2), ivec2(2, -2), ivec2(-2, 2), ivec2(2, 2));
+ float min_depth = texelFetch(depth_tx, texel_sample, 0).x;
+ ivec2 nearest_texel = texel_sample;
+ for (int i = 0; i < 4; i++) {
+ ivec2 texel = clamp(texel_sample + corners[i], ivec2(0), textureSize(depth_tx, 0).xy);
+ float depth = texelFetch(depth_tx, texel, 0).x;
+ if (min_depth > depth) {
+ min_depth = depth;
+ nearest_texel = texel;
+ }
+ }
+
+ vec4 vector = velocity_resolve(vector_tx, nearest_texel, min_depth);
+
+ /* Transform to pixel space. */
+ vector.xy *= vec2(film_buf.extent);
+
+ return vector.xy;
+}
+
+/* \a t is inter-pixel position. 0 means perfectly on a pixel center.
+ * Returns weights in both dimensions.
+ * Multiply each dimension weights to get final pixel weights. */
+void film_get_catmull_rom_weights(vec2 t, out vec2 weights[4])
+{
+ vec2 t2 = t * t;
+ vec2 t3 = t2 * t;
+ float fc = 0.5; /* Catmull-Rom. */
+
+ vec2 fct = t * fc;
+ vec2 fct2 = t2 * fc;
+ vec2 fct3 = t3 * fc;
+ weights[0] = (fct2 * 2.0 - fct3) - fct;
+ weights[1] = (t3 * 2.0 - fct3) + (-t2 * 3.0 + fct2) + 1.0;
+ weights[2] = (-t3 * 2.0 + fct3) + (t2 * 3.0 - (2.0 * fct2)) + fct;
+ weights[3] = fct3 - fct2;
+}
+
+/* Load color using a special filter to avoid loosing detail.
+ * \a texel is sample position with subpixel accuracy. */
+vec4 film_sample_catmull_rom(sampler2D color_tx, vec2 input_texel)
+{
+ vec2 center_texel;
+ vec2 inter_texel = modf(input_texel, center_texel);
+ vec2 weights[4];
+ film_get_catmull_rom_weights(inter_texel, weights);
+
+#if 0 /* Reference. 16 Taps. */
+ vec4 color = vec4(0.0);
+ for (int y = 0; y < 4; y++) {
+ for (int x = 0; x < 4; x++) {
+ ivec2 texel = ivec2(center_texel) + ivec2(x, y) - 1;
+ texel = clamp(texel, ivec2(0), textureSize(color_tx, 0).xy - 1);
+ color += texelFetch(color_tx, texel, 0) * weights[x].x * weights[y].y;
+ }
+ }
+ return color;
+
+#elif 1 /* Optimize version. 5 Bilinear Taps. */
+ /**
+ * Use optimized version by leveraging bilinear filtering from hardware sampler and by removing
+ * corner taps.
+ * From "Filmic SMAA" by Jorge Jimenez at Siggraph 2016
+ * http://advances.realtimerendering.com/s2016/Filmic%20SMAA%20v7.pptx
+ */
+ center_texel += 0.5;
+
+ /* Slide 92. */
+ vec2 weight_12 = weights[1] + weights[2];
+ vec2 uv_12 = (center_texel + weights[2] / weight_12) * film_buf.extent_inv;
+ vec2 uv_0 = (center_texel - 1.0) * film_buf.extent_inv;
+ vec2 uv_3 = (center_texel + 2.0) * film_buf.extent_inv;
+
+ vec4 color;
+ vec4 weight_cross = weight_12.xyyx * vec4(weights[0].yx, weights[3].xy);
+ float weight_center = weight_12.x * weight_12.y;
+
+ color = textureLod(color_tx, uv_12, 0.0) * weight_center;
+ color += textureLod(color_tx, vec2(uv_12.x, uv_0.y), 0.0) * weight_cross.x;
+ color += textureLod(color_tx, vec2(uv_0.x, uv_12.y), 0.0) * weight_cross.y;
+ color += textureLod(color_tx, vec2(uv_3.x, uv_12.y), 0.0) * weight_cross.z;
+ color += textureLod(color_tx, vec2(uv_12.x, uv_3.y), 0.0) * weight_cross.w;
+ /* Re-normalize for the removed corners. */
+ return color / (weight_center + sum(weight_cross));
+
+#else /* Nearest interpolation for debugging. 1 Tap. */
+ ivec2 texel = ivec2(center_texel) + ivec2(greaterThan(inter_texel, vec2(0.5)));
+ texel = clamp(texel, ivec2(0), textureSize(color_tx, 0).xy - 1);
+ return texelFetch(color_tx, texel, 0);
+#endif
+}
+
+/* Return history clipping bounding box in YCoCg color space. */
+void film_combined_neighbor_boundbox(ivec2 texel, out vec4 min_c, out vec4 max_c)
+{
+ /* Plus (+) shape offsets. */
+ const ivec2 plus_offsets[5] = ivec2[5](ivec2(0, 0), /* Center */
+ ivec2(-1, 0),
+ ivec2(0, -1),
+ ivec2(1, 0),
+ ivec2(0, 1));
+#if 0
+ /**
+ * Compute Variance of neighborhood as described in:
+ * "An Excursion in Temporal Supersampling" by Marco Salvi at GDC 2016.
+ * and:
+ * "A Survey of Temporal Antialiasing Techniques" by Yang et al.
+ */
+
+ /* First 2 moments. */
+ vec4 mu1 = vec4(0), mu2 = vec4(0);
+ for (int i = 0; i < 5; i++) {
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, texel + plus_offsets[i]);
+ mu1 += color;
+ mu2 += sqr(color);
+ }
+ mu1 *= (1.0 / 5.0);
+ mu2 *= (1.0 / 5.0);
+
+ /* Extent scaling. Range [0.75..1.25].
+ * Balance between more flickering (0.75) or more ghosting (1.25). */
+ const float gamma = 1.25;
+ /* Standard deviation. */
+ vec4 sigma = sqrt(abs(mu2 - sqr(mu1)));
+ /* eq. 6 in "A Survey of Temporal Antialiasing Techniques". */
+ min_c = mu1 - gamma * sigma;
+ max_c = mu1 + gamma * sigma;
+#else
+ /**
+ * Simple bounding box calculation in YCoCg as described in:
+ * "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014
+ */
+ min_c = vec4(1e16);
+ max_c = vec4(-1e16);
+ for (int i = 0; i < 5; i++) {
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, texel + plus_offsets[i]);
+ min_c = min(min_c, color);
+ max_c = max(max_c, color);
+ }
+ /* (Slide 32) Simple clamp to min/max of 8 neighbors results in 3x3 box artifacts.
+ * Round bbox shape by averaging 2 different min/max from 2 different neighborhood. */
+ vec4 min_c_3x3 = min_c;
+ vec4 max_c_3x3 = max_c;
+ const ivec2 corners[4] = ivec2[4](ivec2(-1, -1), ivec2(1, -1), ivec2(-1, 1), ivec2(1, 1));
+ for (int i = 0; i < 4; i++) {
+ vec4 color = film_texelfetch_as_YCoCg_opacity(combined_tx, texel + corners[i]);
+ min_c_3x3 = min(min_c_3x3, color);
+ max_c_3x3 = max(max_c_3x3, color);
+ }
+ min_c = (min_c + min_c_3x3) * 0.5;
+ max_c = (max_c + max_c_3x3) * 0.5;
+#endif
+}
+
+/* 1D equivalent of line_aabb_clipping_dist(). */
+float film_aabb_clipping_dist_alpha(float origin, float direction, float aabb_min, float aabb_max)
+{
+ if (abs(direction) < 1e-5) {
+ return 0.0;
+ }
+ float nearest_plane = (direction > 0.0) ? aabb_min : aabb_max;
+ return (nearest_plane - origin) / direction;
+}
+
+/* Modulate the history color to avoid ghosting artifact. */
+vec4 film_amend_combined_history(
+ vec4 min_color, vec4 max_color, vec4 color_history, vec4 src_color, ivec2 src_texel)
+{
+ /* Clip instead of clamping to avoid color accumulating in the AABB corners. */
+ vec4 clip_dir = src_color - color_history;
+
+ float t = line_aabb_clipping_dist(color_history.rgb, clip_dir.rgb, min_color.rgb, max_color.rgb);
+ color_history.rgb += clip_dir.rgb * saturate(t);
+
+ /* Clip alpha on its own to avoid interference with other chanels. */
+ float t_a = film_aabb_clipping_dist_alpha(color_history.a, clip_dir.a, min_color.a, max_color.a);
+ color_history.a += clip_dir.a * saturate(t_a);
+
+ return color_history;
+}
+
+float film_history_blend_factor(float velocity,
+ vec2 texel,
+ float luma_min,
+ float luma_max,
+ float luma_incoming,
+ float luma_history)
+{
+ /* 5% of incoming color by default. */
+ float blend = 0.05;
+ /* Blend less history if the pixel has substential velocity. */
+ blend = mix(blend, 0.20, saturate(velocity * 0.02));
+ /**
+ * "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014 (Slide 43)
+ * Bias towards history if incomming pixel is near clamping. Reduces flicker.
+ */
+ float distance_to_luma_clip = min_v2(vec2(luma_history - luma_min, luma_max - luma_history));
+ /* Divide by bbox size to get a factor. 2 factor to compensate the line above. */
+ distance_to_luma_clip *= 2.0 * safe_rcp(luma_max - luma_min);
+ /* Linearly blend when history gets bellow to 25% of the bbox size. */
+ blend *= saturate(distance_to_luma_clip * 4.0 + 0.1);
+ /* Discard out of view history. */
+ if (any(lessThan(texel, vec2(0))) || any(greaterThanEqual(texel, film_buf.extent))) {
+ blend = 1.0;
+ }
+ /* Discard history if invalid. */
+ if (film_buf.use_history == false) {
+ blend = 1.0;
+ }
+ return blend;
+}
+
+/* Returns resolved final color. */
+void film_store_combined(
+ FilmSample dst, ivec2 src_texel, vec4 color, float color_weight, inout vec4 display)
+{
+ if (film_buf.combined_id == -1) {
+ return;
+ }
+
+ vec4 color_src, color_dst;
+ float weight_src, weight_dst;
+
+ /* Undo the weighting to get final spatialy-filtered color. */
+ color_src = color / color_weight;
+
+ if (film_buf.use_reprojection) {
+ /* Interactive accumulation. Do reprojection and Temporal Anti-Aliasing. */
+
+ /* Reproject by finding where this pixel was in the previous frame. */
+ vec2 motion = film_pixel_history_motion_vector(src_texel);
+ vec2 history_texel = vec2(dst.texel) + motion;
+
+ float velocity = length(motion);
+
+ /* Load weight if it is not uniform accross the whole buffer (i.e: upsampling, panoramic). */
+ // dst.weight = film_weight_load(texel_combined);
+
+ color_dst = film_sample_catmull_rom(in_combined_tx, history_texel);
+ color_dst.rgb = film_YCoCg_from_scene_linear(color_dst.rgb);
+
+ /* Get local color bounding box of source neighboorhood. */
+ vec4 min_color, max_color;
+ film_combined_neighbor_boundbox(src_texel, min_color, max_color);
+
+ float blend = film_history_blend_factor(
+ velocity, history_texel, min_color.x, max_color.x, color_src.x, color_dst.x);
+
+ color_dst = film_amend_combined_history(min_color, max_color, color_dst, color_src, src_texel);
+
+ /* Luma weighted blend to avoid flickering. */
+ weight_dst = film_luma_weight(color_dst.x) * (1.0 - blend);
+ weight_src = film_luma_weight(color_src.x) * (blend);
+ }
+ else {
+ /* Everything is static. Use render accumulation. */
+ color_dst = texelFetch(in_combined_tx, dst.texel, 0);
+ color_dst.rgb = film_YCoCg_from_scene_linear(color_dst.rgb);
+
+ /* Luma weighted blend to avoid flickering. */
+ weight_dst = film_luma_weight(color_dst.x) * dst.weight;
+ weight_src = color_weight;
+ }
+ /* Weighted blend. */
+ color = color_dst * weight_dst + color_src * weight_src;
+ color /= weight_src + weight_dst;
+
+ color.rgb = film_scene_linear_from_YCoCg(color.rgb);
+
+ /* Fix alpha not accumulating to 1 because of float imprecision. */
+ if (color.a > 0.995) {
+ color.a = 1.0;
+ }
+
+ /* Filter NaNs. */
+ if (any(isnan(color))) {
+ color = vec4(0.0, 0.0, 0.0, 1.0);
+ }
+
+ if (film_buf.display_id == -1) {
+ display = color;
+ }
+ imageStore(out_combined_img, dst.texel, color);
+}
+
+void film_store_color(FilmSample dst, int pass_id, vec4 color, inout vec4 display)
+{
+ if (pass_id == -1) {
+ return;
+ }
+
+ vec4 data_film = imageLoad(color_accum_img, ivec3(dst.texel, pass_id));
+
+ color = (data_film * dst.weight + color) * dst.weight_sum_inv;
+
+ /* Filter NaNs. */
+ if (any(isnan(color))) {
+ color = vec4(0.0, 0.0, 0.0, 1.0);
+ }
+
+ if (film_buf.display_id == pass_id) {
+ display = color;
+ }
+ imageStore(color_accum_img, ivec3(dst.texel, pass_id), color);
+}
+
+void film_store_value(FilmSample dst, int pass_id, float value, inout vec4 display)
+{
+ if (pass_id == -1) {
+ return;
+ }
+
+ float data_film = imageLoad(value_accum_img, ivec3(dst.texel, pass_id)).x;
+
+ value = (data_film * dst.weight + value) * dst.weight_sum_inv;
+
+ /* Filter NaNs. */
+ if (isnan(value)) {
+ value = 0.0;
+ }
+
+ if (film_buf.display_id == pass_id) {
+ display = vec4(value, value, value, 1.0);
+ }
+ imageStore(value_accum_img, ivec3(dst.texel, pass_id), vec4(value));
+}
+
+/* Nearest sample variant. Always stores the data. */
+void film_store_data(ivec2 texel_film, int pass_id, vec4 data_sample, inout vec4 display)
+{
+ if (pass_id == -1) {
+ return;
+ }
+
+ if (film_buf.display_id == pass_id) {
+ display = data_sample;
+ }
+ imageStore(color_accum_img, ivec3(texel_film, pass_id), data_sample);
+}
+
+void film_store_depth(ivec2 texel_film, float value, out float out_depth)
+{
+ if (film_buf.depth_id == -1) {
+ return;
+ }
+
+ out_depth = film_depth_convert_to_scene(value);
+
+ imageStore(depth_img, texel_film, vec4(out_depth));
+}
+
+void film_store_distance(ivec2 texel, float value)
+{
+ imageStore(out_weight_img, ivec3(texel, WEIGHT_lAYER_DISTANCE), vec4(value));
+}
+
+void film_store_weight(ivec2 texel, float value)
+{
+ imageStore(out_weight_img, ivec3(texel, WEIGHT_lAYER_ACCUMULATION), vec4(value));
+}
+
+float film_display_depth_ammend(ivec2 texel, float depth)
+{
+ /* This effectively offsets the depth of the whole 2x2 region to the lowest value of the region
+ * twice. One for X and one for Y direction. */
+ /* TODO(fclem): This could be improved as it gives flickering result at depth discontinuity.
+ * But this is the quickest stable result I could come with for now. */
+#ifdef GPU_FRAGMENT_SHADER
+ depth += fwidth(depth);
+#endif
+ /* Small offset to avoid depth test lessEqual failing because of all the conversions loss. */
+ depth += 2.4e-7 * 4.0;
+ return saturate(depth);
+}
+
+/** \} */
+
+/** NOTE: out_depth is scene linear depth from the camera origin. */
+void film_process_data(ivec2 texel_film, out vec4 out_color, out float out_depth)
+{
+ out_color = vec4(0.0);
+ out_depth = 0.0;
+
+ float weight_accum = film_weight_accumulation(texel_film);
+ float film_weight = film_weight_load(texel_film);
+ float weight_sum = film_weight + weight_accum;
+ film_store_weight(texel_film, weight_sum);
+
+ FilmSample dst;
+ dst.texel = texel_film;
+ dst.weight = film_weight;
+ dst.weight_sum_inv = 1.0 / weight_sum;
+
+ /* NOTE: We split the accumulations into separate loops to avoid using too much registers and
+ * maximize occupancy. */
+
+ if (film_buf.combined_id != -1) {
+ /* NOTE: Do weight accumulation again since we use custom weights. */
+ float weight_accum = 0.0;
+ vec4 combined_accum = vec4(0.0);
+
+ FilmSample src;
+ for (int i = film_buf.samples_len - 1; i >= 0; i--) {
+ src = film_sample_get(i, texel_film);
+ film_sample_accum_combined(src, combined_accum, weight_accum);
+ }
+ /* NOTE: src.texel is center texel in incomming data buffer. */
+ film_store_combined(dst, src.texel, combined_accum, weight_accum, out_color);
+ }
+
+ if (film_buf.has_data) {
+ float film_distance = film_distance_load(texel_film);
+
+ /* Get sample closest to target texel. It is always sample 0. */
+ FilmSample film_sample = film_sample_get(0, texel_film);
+
+ if (film_buf.use_reprojection || film_sample.weight < film_distance) {
+ vec4 normal = texelFetch(normal_tx, film_sample.texel, 0);
+ float depth = texelFetch(depth_tx, film_sample.texel, 0).x;
+ vec4 vector = velocity_resolve(vector_tx, film_sample.texel, depth);
+
+ film_store_depth(texel_film, depth, out_depth);
+ film_store_data(texel_film, film_buf.normal_id, normal, out_color);
+ film_store_data(texel_film, film_buf.vector_id, vector, out_color);
+ film_store_distance(texel_film, film_sample.weight);
+ }
+ else {
+ out_depth = imageLoad(depth_img, texel_film).r;
+ }
+ }
+
+ if (film_buf.any_render_pass_1) {
+ vec4 diffuse_light_accum = vec4(0.0);
+ vec4 specular_light_accum = vec4(0.0);
+ vec4 volume_light_accum = vec4(0.0);
+ vec4 emission_accum = vec4(0.0);
+
+ for (int i = 0; i < film_buf.samples_len; i++) {
+ FilmSample src = film_sample_get(i, texel_film);
+ film_sample_accum(src, film_buf.diffuse_light_id, diffuse_light_tx, diffuse_light_accum);
+ film_sample_accum(src, film_buf.specular_light_id, specular_light_tx, specular_light_accum);
+ film_sample_accum(src, film_buf.volume_light_id, volume_light_tx, volume_light_accum);
+ film_sample_accum(src, film_buf.emission_id, emission_tx, emission_accum);
+ }
+ film_store_color(dst, film_buf.diffuse_light_id, diffuse_light_accum, out_color);
+ film_store_color(dst, film_buf.specular_light_id, specular_light_accum, out_color);
+ film_store_color(dst, film_buf.volume_light_id, volume_light_accum, out_color);
+ film_store_color(dst, film_buf.emission_id, emission_accum, out_color);
+ }
+
+ if (film_buf.any_render_pass_2) {
+ vec4 diffuse_color_accum = vec4(0.0);
+ vec4 specular_color_accum = vec4(0.0);
+ vec4 environment_accum = vec4(0.0);
+ float mist_accum = 0.0;
+ float shadow_accum = 0.0;
+ float ao_accum = 0.0;
+
+ for (int i = 0; i < film_buf.samples_len; i++) {
+ FilmSample src = film_sample_get(i, texel_film);
+ film_sample_accum(src, film_buf.diffuse_color_id, diffuse_color_tx, diffuse_color_accum);
+ film_sample_accum(src, film_buf.specular_color_id, specular_color_tx, specular_color_accum);
+ film_sample_accum(src, film_buf.environment_id, environment_tx, environment_accum);
+ film_sample_accum(src, film_buf.shadow_id, shadow_tx, shadow_accum);
+ film_sample_accum(src, film_buf.ambient_occlusion_id, ambient_occlusion_tx, ao_accum);
+ film_sample_accum_mist(src, mist_accum);
+ }
+ film_store_color(dst, film_buf.diffuse_color_id, diffuse_color_accum, out_color);
+ film_store_color(dst, film_buf.specular_color_id, specular_color_accum, out_color);
+ film_store_color(dst, film_buf.environment_id, environment_accum, out_color);
+ film_store_value(dst, film_buf.shadow_id, shadow_accum, out_color);
+ film_store_value(dst, film_buf.ambient_occlusion_id, ao_accum, out_color);
+ film_store_value(dst, film_buf.mist_id, mist_accum, out_color);
+ }
+
+ for (int aov = 0; aov < film_buf.aov_color_len; aov++) {
+ vec4 aov_accum = vec4(0.0);
+
+ for (int i = 0; i < film_buf.samples_len; i++) {
+ FilmSample src = film_sample_get(i, texel_film);
+ film_sample_accum(src, aov, aov_color_tx, aov_accum);
+ }
+ film_store_color(dst, film_buf.aov_color_id + aov, aov_accum, out_color);
+ }
+
+ for (int aov = 0; aov < film_buf.aov_value_len; aov++) {
+ float aov_accum = 0.0;
+
+ for (int i = 0; i < film_buf.samples_len; i++) {
+ FilmSample src = film_sample_get(i, texel_film);
+ film_sample_accum(src, aov, aov_value_tx, aov_accum);
+ }
+ film_store_value(dst, film_buf.aov_value_id + aov, aov_accum, out_color);
+ }
+}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_nodetree_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_nodetree_lib.glsl
index 0ccf06a9e14..c488216eeac 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_nodetree_lib.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_nodetree_lib.glsl
@@ -245,6 +245,20 @@ float F_eta(float a, float b)
}
void output_aov(vec4 color, float value, uint hash)
{
+#if defined(MAT_AOV_SUPPORT) && defined(GPU_FRAGMENT_SHADER)
+ for (int i = 0; i < AOV_MAX && i < aov_buf.color_len; i++) {
+ if (aov_buf.hash_color[i] == hash) {
+ imageStore(aov_color_img, ivec3(gl_FragCoord.xy, i), color);
+ return;
+ }
+ }
+ for (int i = 0; i < AOV_MAX && i < aov_buf.value_len; i++) {
+ if (aov_buf.hash_value[i] == hash) {
+ imageStore(aov_value_img, ivec3(gl_FragCoord.xy, i), vec4(value));
+ return;
+ }
+ }
+#endif
}
#ifdef EEVEE_MATERIAL_STUBS
@@ -255,6 +269,10 @@ void output_aov(vec4 color, float value, uint hash)
# define nodetree_thickness() 0.1
#endif
+#ifdef GPU_VERTEX_SHADER
+# define closure_to_rgba(a) vec4(0.0)
+#endif
+
/* -------------------------------------------------------------------- */
/** \name Fragment Displacement
*
@@ -359,3 +377,43 @@ vec3 coordinate_incoming(vec3 P)
}
/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Volume Attribute post
+ *
+ * TODO(@fclem): These implementation details should concern the DRWManager and not be a fix on
+ * the engine side. But as of now, the engines are responsible for loading the attributes.
+ *
+ * \{ */
+
+#if defined(MAT_GEOM_VOLUME)
+
+float attr_load_temperature_post(float attr)
+{
+ /* Bring the into standard range without having to modify the grid values */
+ attr = (attr > 0.01) ? (attr * drw_volume.temperature_mul + drw_volume.temperature_bias) : 0.0;
+ return attr;
+}
+vec4 attr_load_color_post(vec4 attr)
+{
+ /* Density is premultiplied for interpolation, divide it out here. */
+ attr.rgb *= safe_rcp(attr.a);
+ attr.rgb *= drw_volume.color_mul.rgb;
+ attr.a = 1.0;
+ return attr;
+}
+
+#else /* Noop for any other surface. */
+
+float attr_load_temperature_post(float attr)
+{
+ return attr;
+}
+vec4 attr_load_color_post(vec4 attr)
+{
+ return attr;
+}
+
+#endif
+
+/** \} */
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
index 7ddf941df7c..34ea288852a 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
@@ -10,6 +10,18 @@
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
+vec4 closure_to_rgba(Closure cl)
+{
+ vec4 out_color;
+ out_color.rgb = g_emission;
+ out_color.a = saturate(1.0 - avg(g_transmittance));
+
+ /* Reset for the next closure tree. */
+ closure_weights_reset();
+
+ return out_color;
+}
+
/* From the paper "Hashed Alpha Testing" by Chris Wyman and Morgan McGuire. */
float hash(vec2 a)
{
@@ -72,14 +84,7 @@ void main()
#endif
#ifdef MAT_VELOCITY
- vec4 out_velocity_camera; /* TODO(fclem): Panoramic cameras. */
- velocity_camera(interp.P + motion.prev,
- interp.P,
- interp.P - motion.next,
- out_velocity_camera,
- out_velocity_view);
-
- /* For testing in viewport. */
- out_velocity_view.zw = vec2(0.0);
+ out_velocity = velocity_surface(interp.P + motion.prev, interp.P, interp.P - motion.next);
+ out_velocity = velocity_pack(out_velocity);
#endif
}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl
index 143e88dbe68..48ced4e5374 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_forward_frag.glsl
@@ -53,21 +53,45 @@ void main()
g_holdout = saturate(g_holdout);
+ vec3 diffuse_light = vec3(saturate(g_diffuse_data.N.z * 0.5 + 0.5));
+ vec3 reflection_light = vec3(spec_light(g_reflection_data));
+ vec3 refraction_light = vec3(saturate(g_refraction_data.N.z * 0.5 + 0.5));
+
+ g_diffuse_data.color *= g_diffuse_data.weight;
+ g_reflection_data.color *= g_reflection_data.weight;
+ g_refraction_data.color *= g_refraction_data.weight;
+ diffuse_light *= step(1e-5, g_diffuse_data.weight);
+ reflection_light *= step(1e-5, g_reflection_data.weight);
+ refraction_light *= step(1e-5, g_refraction_data.weight);
+
out_radiance.rgb = g_emission;
- out_radiance.rgb += g_diffuse_data.color * g_diffuse_data.weight *
- saturate(g_diffuse_data.N.z * 0.5 + 0.5);
- out_radiance.rgb += g_reflection_data.color * g_reflection_data.weight *
- spec_light(g_reflection_data);
- out_radiance.rgb += g_refraction_data.color * g_refraction_data.weight *
- saturate(g_refraction_data.N.z * 0.5 + 0.5);
+ out_radiance.rgb += g_diffuse_data.color * diffuse_light;
+ out_radiance.rgb += g_reflection_data.color * reflection_light;
+ out_radiance.rgb += g_refraction_data.color * refraction_light;
out_radiance.a = 0.0;
+ vec3 specular_light = reflection_light + refraction_light;
+ vec3 specular_color = g_reflection_data.color + g_refraction_data.color;
+
+ /* TODO(fclem): This feels way too complex for what is it. */
+ bool has_any_bsdf_weight = g_diffuse_data.weight != 0.0 || g_reflection_data.weight != 0.0 ||
+ g_refraction_data.weight != 0.0;
+ vec3 out_normal = has_any_bsdf_weight ? vec3(0.0) : g_data.N;
+ out_normal += g_diffuse_data.N * g_diffuse_data.weight;
+ out_normal += g_reflection_data.N * g_reflection_data.weight;
+ out_normal += g_refraction_data.N * g_refraction_data.weight;
+ out_normal = safe_normalize(out_normal);
+
+ ivec2 out_texel = ivec2(gl_FragCoord.xy);
+ imageStore(rp_normal_img, out_texel, vec4(out_normal, 1.0));
+ imageStore(rp_diffuse_light_img, out_texel, vec4(diffuse_light, 1.0));
+ imageStore(rp_diffuse_color_img, out_texel, vec4(g_diffuse_data.color, 1.0));
+ imageStore(rp_specular_light_img, out_texel, vec4(specular_light, 1.0));
+ imageStore(rp_specular_color_img, out_texel, vec4(specular_color, 1.0));
+ imageStore(rp_emission_img, out_texel, vec4(g_emission, 1.0));
+
out_radiance.rgb *= 1.0 - g_holdout;
out_transmittance.rgb = g_transmittance;
out_transmittance.a = saturate(avg(g_transmittance));
-
- /* Test */
- out_transmittance.a = 1.0 - out_transmittance.a;
- out_radiance.a = 1.0 - out_radiance.a;
}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl
index ac657afc922..ed75282a550 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_surf_world_frag.glsl
@@ -24,6 +24,17 @@ void main()
g_holdout = saturate(g_holdout);
+ ivec2 out_texel = ivec2(gl_FragCoord.xy);
+ imageStore(rp_normal_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+ imageStore(rp_diffuse_light_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+ imageStore(rp_diffuse_color_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+ imageStore(rp_specular_light_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+ imageStore(rp_specular_color_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+ imageStore(rp_emission_img, out_texel, vec4(0.0, 0.0, 0.0, 1.0));
+
out_background.rgb = safe_color(g_emission) * (1.0 - g_holdout);
out_background.a = saturate(avg(g_transmittance)) * g_holdout;
+
+ /* World opacity. */
+ out_background = mix(vec4(0.0, 0.0, 0.0, 1.0), out_background, world_opacity_fade);
}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl
index 435ae6658c9..c21456b7a5c 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_lib.glsl
@@ -4,21 +4,49 @@
#ifdef VELOCITY_CAMERA
+vec4 velocity_pack(vec4 data)
+{
+ return data * 0.01;
+}
+
+vec4 velocity_unpack(vec4 data)
+{
+ return data * 100.0;
+}
+
/**
* Given a triple of position, compute the previous and next motion vectors.
- * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy)
+ * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy).
*/
-vec4 velocity_view(vec3 P_prev, vec3 P, vec3 P_next)
+vec4 velocity_surface(vec3 P_prv, vec3 P, vec3 P_nxt)
{
- vec2 prev_uv, curr_uv, next_uv;
+ /* NOTE: We don't use the drw_view.persmat to avoid adding the TAA jitter to the velocity. */
+ vec2 prev_uv = project_point(camera_prev.persmat, P_prv).xy;
+ vec2 curr_uv = project_point(camera_curr.persmat, P).xy;
+ vec2 next_uv = project_point(camera_next.persmat, P_nxt).xy;
+
+ vec4 motion = vec4(prev_uv - curr_uv, curr_uv - next_uv);
+ /* Convert NDC velocity to UV velocity */
+ motion *= 0.5;
+
+ return motion;
+}
- prev_uv = transform_point(ProjectionMatrix, transform_point(camera_prev.viewmat, P_prev)).xy;
- curr_uv = transform_point(ViewProjectionMatrix, P).xy;
- next_uv = transform_point(ProjectionMatrix, transform_point(camera_next.viewmat, P_next)).xy;
+/**
+ * Given a view space view vector \a vV, compute the previous and next motion vectors for
+ * background pixels.
+ * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy).
+ */
+vec4 velocity_background(vec3 vV)
+{
+ /* Only transform direction to avoid loosing precision. */
+ vec3 V = transform_direction(camera_curr.viewinv, vV);
+ /* NOTE: We don't use the drw_view.winmat to avoid adding the TAA jitter to the velocity. */
+ vec2 prev_uv = project_point(camera_prev.winmat, V).xy;
+ vec2 curr_uv = project_point(camera_curr.winmat, V).xy;
+ vec2 next_uv = project_point(camera_next.winmat, V).xy;
- vec4 motion;
- motion.xy = prev_uv - curr_uv;
- motion.zw = curr_uv - next_uv;
+ vec4 motion = vec4(prev_uv - curr_uv, curr_uv - next_uv);
/* Convert NDC velocity to UV velocity */
motion *= 0.5;
@@ -26,37 +54,28 @@ vec4 velocity_view(vec3 P_prev, vec3 P, vec3 P_next)
}
/**
- * Given a triple of position, compute the previous and next motion vectors.
- * Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy)
- * \a velocity_camera is the motion in film UV space after camera projection.
- * \a velocity_view is the motion in ShadingView UV space. It is different
- * from velocity_camera for multi-view rendering.
+ * Load and resolve correct velocity as some pixels might still not have correct
+ * motion data for performance reasons.
*/
-void velocity_camera(vec3 P_prev, vec3 P, vec3 P_next, out vec4 vel_camera, out vec4 vel_view)
+vec4 velocity_resolve(sampler2D vector_tx, ivec2 texel, float depth)
{
- vec2 prev_uv, curr_uv, next_uv;
- prev_uv = camera_uv_from_world(camera_prev, P_prev);
- curr_uv = camera_uv_from_world(camera_curr, P);
- next_uv = camera_uv_from_world(camera_next, P_next);
-
- vel_camera.xy = prev_uv - curr_uv;
- vel_camera.zw = curr_uv - next_uv;
-
- if (is_panoramic(camera_curr.type)) {
- /* This path is only used if using using panoramic projections. Since the views always have
- * the same 45° aperture angle, we can safely reuse the projection matrix. */
- prev_uv = transform_point(ProjectionMatrix, transform_point(camera_prev.viewmat, P_prev)).xy;
- curr_uv = transform_point(ViewProjectionMatrix, P).xy;
- next_uv = transform_point(ProjectionMatrix, transform_point(camera_next.viewmat, P_next)).xy;
-
- vel_view.xy = prev_uv - curr_uv;
- vel_view.zw = curr_uv - next_uv;
- /* Convert NDC velocity to UV velocity */
- vel_view *= 0.5;
- }
- else {
- vel_view = vel_camera;
+ vec2 uv = (vec2(texel) + 0.5) / vec2(textureSize(vector_tx, 0).xy);
+ vec4 vector = texelFetch(vector_tx, texel, 0);
+
+ if (vector.x == VELOCITY_INVALID) {
+ bool is_background = (depth == 1.0);
+ if (is_background) {
+ /* NOTE: Use viewCameraVec to avoid imprecision if camera is far from origin. */
+ vec3 vV = viewCameraVec(get_view_space_from_depth(uv, 1.0));
+ return velocity_background(vV);
+ }
+ else {
+ /* Static geometry. No translation in world space. */
+ vec3 P = get_world_space_from_depth(uv, depth);
+ return velocity_surface(P, P, P);
+ }
}
+ return velocity_unpack(vector);
}
#endif
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl
deleted file mode 100644
index b68b2eaf117..00000000000
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl
+++ /dev/null
@@ -1,58 +0,0 @@
-
-/**
- * Fullscreen pass that compute motion vector for static geometry.
- * Animated geometry has already written correct motion vectors.
- */
-
-#pragma BLENDER_REQUIRE(common_view_lib.glsl)
-#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
-
-#define is_valid_output(img_) (imageSize(img_).x > 1)
-
-void main()
-{
- ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
- vec4 motion = imageLoad(velocity_view_img, texel);
-
- bool pixel_has_valid_motion = (motion.x != VELOCITY_INVALID);
- float depth = texelFetch(depth_tx, texel, 0).r;
- bool is_background = (depth == 1.0f);
-
- vec2 uv = vec2(texel) * drw_view.viewport_size_inverse;
- vec3 P_next, P_prev, P_curr;
-
- if (pixel_has_valid_motion) {
- /* Animated geometry. View motion already computed during prepass. Convert only to camera. */
- // P_prev = get_world_space_from_depth(uv + motion.xy, 0.5);
- // P_curr = get_world_space_from_depth(uv, 0.5);
- // P_next = get_world_space_from_depth(uv + motion.zw, 0.5);
- return;
- }
- else if (is_background) {
- /* NOTE: Use viewCameraVec to avoid imprecision if camera is far from origin. */
- vec3 vV = viewCameraVec(get_view_space_from_depth(uv, 1.0));
- vec3 V = transform_direction(ViewMatrixInverse, vV);
- /* Background has no motion under camera translation. Translate view vector with the camera. */
- /* WATCH(fclem): Might create precision issues. */
- P_next = camera_next.viewinv[3].xyz + V;
- P_curr = camera_curr.viewinv[3].xyz + V;
- P_prev = camera_prev.viewinv[3].xyz + V;
- }
- else {
- /* Static geometry. No translation in world space. */
- P_curr = get_world_space_from_depth(uv, depth);
- P_prev = P_curr;
- P_next = P_curr;
- }
-
- vec4 vel_camera, vel_view;
- velocity_camera(P_prev, P_curr, P_next, vel_camera, vel_view);
-
- if (in_texture_range(texel, depth_tx)) {
- imageStore(velocity_view_img, texel, vel_view);
-
- if (is_valid_output(velocity_camera_img)) {
- imageStore(velocity_camera_img, texel, vel_camera);
- }
- }
-}
diff --git a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh
new file mode 100644
index 00000000000..a5baaca51f9
--- /dev/null
+++ b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_film_info.hh
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "eevee_defines.hh"
+#include "gpu_shader_create_info.hh"
+
+GPU_SHADER_CREATE_INFO(eevee_film)
+ .uniform_buf(4, "FilmData", "film_buf")
+ .sampler(0, ImageType::DEPTH_2D, "depth_tx")
+ .sampler(1, ImageType::FLOAT_2D, "combined_tx")
+ .sampler(2, ImageType::FLOAT_2D, "normal_tx")
+ .sampler(3, ImageType::FLOAT_2D, "vector_tx")
+ .sampler(4, ImageType::FLOAT_2D, "diffuse_light_tx")
+ .sampler(5, ImageType::FLOAT_2D, "diffuse_color_tx")
+ .sampler(6, ImageType::FLOAT_2D, "specular_light_tx")
+ .sampler(7, ImageType::FLOAT_2D, "specular_color_tx")
+ .sampler(8, ImageType::FLOAT_2D, "volume_light_tx")
+ .sampler(9, ImageType::FLOAT_2D, "emission_tx")
+ .sampler(10, ImageType::FLOAT_2D, "environment_tx")
+ .sampler(11, ImageType::FLOAT_2D, "shadow_tx")
+ .sampler(12, ImageType::FLOAT_2D, "ambient_occlusion_tx")
+ .sampler(13, ImageType::FLOAT_2D_ARRAY, "aov_color_tx")
+ .sampler(14, ImageType::FLOAT_2D_ARRAY, "aov_value_tx")
+ /* Color History for TAA needs to be sampler to leverage bilinear sampling. */
+ .sampler(15, ImageType::FLOAT_2D, "in_combined_tx")
+ // .sampler(15, ImageType::FLOAT_2D, "cryptomatte_tx") /* TODO */
+ .image(0, GPU_R32F, Qualifier::READ, ImageType::FLOAT_2D_ARRAY, "in_weight_img")
+ .image(1, GPU_R32F, Qualifier::WRITE, ImageType::FLOAT_2D_ARRAY, "out_weight_img")
+ /* Color History for TAA needs to be sampler to leverage bilinear sampling. */
+ //.image(2, GPU_RGBA16F, Qualifier::READ, ImageType::FLOAT_2D, "in_combined_img")
+ .image(3, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "out_combined_img")
+ .image(4, GPU_R32F, Qualifier::READ_WRITE, ImageType::FLOAT_2D, "depth_img")
+ .image(5, GPU_RGBA16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D_ARRAY, "color_accum_img")
+ .image(6, GPU_R16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D_ARRAY, "value_accum_img")
+ .additional_info("eevee_shared")
+ .additional_info("eevee_velocity_camera")
+ .additional_info("draw_view");
+
+GPU_SHADER_CREATE_INFO(eevee_film_frag)
+ .do_static_compilation(true)
+ .fragment_out(0, Type::VEC4, "out_color")
+ .fragment_source("eevee_film_frag.glsl")
+ .additional_info("draw_fullscreen", "eevee_film");
+
+GPU_SHADER_CREATE_INFO(eevee_film_comp)
+ .do_static_compilation(true)
+ .local_group_size(FILM_GROUP_SIZE, FILM_GROUP_SIZE)
+ .compute_source("eevee_film_comp.glsl")
+ .additional_info("eevee_film");
diff --git a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh
index a944bea402e..2368061402c 100644
--- a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh
+++ b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_material_info.hh
@@ -70,6 +70,14 @@ GPU_SHADER_INTERFACE_INFO(eevee_surf_iface, "interp")
#define image_out(slot, qualifier, format, name) \
image(slot, format, qualifier, ImageType::FLOAT_2D, name, Frequency::PASS)
+#define image_array_out(slot, qualifier, format, name) \
+ image(slot, format, qualifier, ImageType::FLOAT_2D_ARRAY, name, Frequency::PASS)
+
+GPU_SHADER_CREATE_INFO(eevee_aov_out)
+ .define("MAT_AOV_SUPPORT")
+ .image_array_out(6, Qualifier::WRITE, GPU_RGBA16F, "aov_color_img")
+ .image_array_out(7, Qualifier::WRITE, GPU_R16F, "aov_value_img")
+ .storage_buf(7, Qualifier::READ, "AOVsInfoData", "aov_buf");
GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
.vertex_out(eevee_surf_iface)
@@ -85,31 +93,38 @@ GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
// .image_out(3, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_reflection_color")
// .image_out(4, Qualifier::WRITE, GPU_RGBA16F, "gbuff_reflection_normal")
// .image_out(5, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_emission")
- /* Renderpasses. */
+ /* Render-passes. */
// .image_out(6, Qualifier::READ_WRITE, GPU_RGBA16F, "rpass_volume_light")
/* TODO: AOVs maybe? */
.fragment_source("eevee_surf_deferred_frag.glsl")
- // .additional_info("eevee_sampling_data", "eevee_utility_texture")
+ // .additional_info("eevee_aov_out", "eevee_sampling_data", "eevee_utility_texture")
;
-#undef image_out
-
GPU_SHADER_CREATE_INFO(eevee_surf_forward)
.auto_resource_location(true)
.vertex_out(eevee_surf_iface)
+ /* Early fragment test is needed for render passes support for forward surfaces. */
+ /* NOTE: This removes the possibility of using gl_FragDepth. */
+ .early_fragment_test(true)
.fragment_out(0, Type::VEC4, "out_radiance", DualBlend::SRC_0)
.fragment_out(0, Type::VEC4, "out_transmittance", DualBlend::SRC_1)
.fragment_source("eevee_surf_forward_frag.glsl")
- // .additional_info("eevee_sampling_data",
- // "eevee_lightprobe_data",
- /* Optionally added depending on the material. */
- // "eevee_raytrace_data",
- // "eevee_transmittance_data",
- // "eevee_utility_texture",
- // "eevee_light_data",
- // "eevee_shadow_data"
- // )
- ;
+ .image_out(0, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_normal_img")
+ .image_out(1, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_light_img")
+ .image_out(2, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_color_img")
+ .image_out(3, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_light_img")
+ .image_out(4, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_color_img")
+ .image_out(5, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_emission_img")
+ .additional_info("eevee_aov_out"
+ // "eevee_sampling_data",
+ // "eevee_lightprobe_data",
+ /* Optionally added depending on the material. */
+ // "eevee_raytrace_data",
+ // "eevee_transmittance_data",
+ // "eevee_utility_texture",
+ // "eevee_light_data",
+ // "eevee_shadow_data"
+ );
GPU_SHADER_CREATE_INFO(eevee_surf_depth)
.vertex_out(eevee_surf_iface)
@@ -119,10 +134,21 @@ GPU_SHADER_CREATE_INFO(eevee_surf_depth)
GPU_SHADER_CREATE_INFO(eevee_surf_world)
.vertex_out(eevee_surf_iface)
+ .image_out(0, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_normal_img")
+ .image_out(1, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_light_img")
+ .image_out(2, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_color_img")
+ .image_out(3, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_light_img")
+ .image_out(4, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_color_img")
+ .image_out(5, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_emission_img")
+ .push_constant(Type::FLOAT, "world_opacity_fade")
.fragment_out(0, Type::VEC4, "out_background")
.fragment_source("eevee_surf_world_frag.glsl")
- // .additional_info("eevee_utility_texture")
- ;
+ .additional_info("eevee_aov_out"
+ //"eevee_utility_texture"
+ );
+
+#undef image_out
+#undef image_array_out
/** \} */
diff --git a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh
index a5f16363466..6e8e8fb020a 100644
--- a/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh
+++ b/source/blender/draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
@@ -30,26 +31,7 @@ GPU_SHADER_CREATE_INFO(eevee_velocity_geom)
.storage_buf(
7, Qualifier::READ, "VelocityIndex", "velocity_indirection_buf[]", Frequency::PASS)
.vertex_out(eevee_velocity_surface_iface)
- .fragment_out(0, Type::VEC4, "out_velocity_view")
+ .fragment_out(0, Type::VEC4, "out_velocity")
.additional_info("eevee_velocity_camera");
/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name Velocity Resolve
- *
- * Computes velocity for static objects.
- * Also converts motion to camera space (as opposed to view space) if needed.
- * \{ */
-
-GPU_SHADER_CREATE_INFO(eevee_velocity_resolve)
- .do_static_compilation(true)
- .local_group_size(8, 8)
- .sampler(0, ImageType::DEPTH_2D, "depth_tx")
- .image(0, GPU_RG16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D, "velocity_view_img")
- .image(1, GPU_RG16F, Qualifier::WRITE, ImageType::FLOAT_2D, "velocity_camera_img")
- .additional_info("eevee_shared")
- .compute_source("eevee_velocity_resolve_comp.glsl")
- .additional_info("draw_view", "eevee_velocity_camera");
-
-/** \} */