Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/engines/eevee/shaders')
-rw-r--r--source/blender/draw/engines/eevee/shaders/bsdf_common_lib.glsl15
-rw-r--r--source/blender/draw/engines/eevee/shaders/effect_motion_blur_frag.glsl247
-rw-r--r--source/blender/draw/engines/eevee/shaders/effect_temporal_aa.glsl19
-rw-r--r--source/blender/draw/engines/eevee/shaders/effect_velocity_resolve_frag.glsl18
-rw-r--r--source/blender/draw/engines/eevee/shaders/effect_velocity_tile_frag.glsl151
-rw-r--r--source/blender/draw/engines/eevee/shaders/object_motion_frag.glsl27
-rw-r--r--source/blender/draw/engines/eevee/shaders/object_motion_vert.glsl54
7 files changed, 473 insertions, 58 deletions
diff --git a/source/blender/draw/engines/eevee/shaders/bsdf_common_lib.glsl b/source/blender/draw/engines/eevee/shaders/bsdf_common_lib.glsl
index 402d306df45..393ecaf1fc5 100644
--- a/source/blender/draw/engines/eevee/shaders/bsdf_common_lib.glsl
+++ b/source/blender/draw/engines/eevee/shaders/bsdf_common_lib.glsl
@@ -877,6 +877,14 @@ Closure closure_mix(Closure cl1, Closure cl2, float fac)
{
Closure cl;
cl.holdout = mix(cl1.holdout, cl2.holdout, fac);
+
+ if (FLAG_TEST(cl1.flag, CLOSURE_HOLDOUT_FLAG)) {
+ fac = 1.0;
+ }
+ else if (FLAG_TEST(cl2.flag, CLOSURE_HOLDOUT_FLAG)) {
+ fac = 0.0;
+ }
+
cl.transmittance = mix(cl1.transmittance, cl2.transmittance, fac);
cl.radiance = mix(cl1.radiance, cl2.radiance, fac);
cl.flag = cl1.flag | cl2.flag;
@@ -958,7 +966,7 @@ void main()
{
Closure cl = nodetree_exec();
- float holdout = 1.0 - saturate(cl.holdout);
+ float holdout = saturate(1.0 - cl.holdout);
float transmit = saturate(avg(cl.transmittance));
float alpha = 1.0 - transmit;
@@ -972,8 +980,9 @@ void main()
* Since we do that using the blending pipeline we need to account for material transmittance. */
vol_scatter -= vol_scatter * cl.transmittance;
- outRadiance = vec4(cl.radiance * vol_transmit + vol_scatter, alpha * holdout);
- outTransmittance = vec4(cl.transmittance, transmit * holdout);
+ cl.radiance = cl.radiance * holdout * vol_transmit + vol_scatter;
+ outRadiance = vec4(cl.radiance, alpha * holdout);
+ outTransmittance = vec4(cl.transmittance, transmit) * holdout;
# else
outRadiance = vec4(cl.radiance, holdout);
ssrNormals = cl.ssr_normal;
diff --git a/source/blender/draw/engines/eevee/shaders/effect_motion_blur_frag.glsl b/source/blender/draw/engines/eevee/shaders/effect_motion_blur_frag.glsl
index b7935235d06..fbf507a2e40 100644
--- a/source/blender/draw/engines/eevee/shaders/effect_motion_blur_frag.glsl
+++ b/source/blender/draw/engines/eevee/shaders/effect_motion_blur_frag.glsl
@@ -1,64 +1,235 @@
+/*
+ * Based on:
+ * A Fast and Stable Feature-Aware Motion Blur Filter
+ * by Jean-Philippe Guertin, Morgan McGuire, Derek Nowrouzezahrai
+ *
+ * With modification from the presentation:
+ * Next Generation Post Processing in Call of Duty Advanced Warfare
+ * by Jorge Jimenez
+ */
uniform sampler2D colorBuffer;
uniform sampler2D depthBuffer;
+uniform sampler2D velocityBuffer;
+uniform sampler2D tileMaxBuffer;
-/* current frame */
-uniform mat4 currInvViewProjMatrix;
+#define KERNEL 8
-/* past frame frame */
-uniform mat4 pastViewProjMatrix;
+/* TODO(fclem) deduplicate this code. */
+uniform sampler2DArray utilTex;
+#define LUT_SIZE 64
+#define texelfetch_noise_tex(coord) texelFetch(utilTex, ivec3(ivec2(coord) % LUT_SIZE, 2.0), 0)
+
+uniform float depthScale;
+uniform ivec2 tileBufferSize;
+uniform vec2 viewportSize;
+uniform vec2 viewportSizeInv;
+uniform bool isPerspective;
+uniform vec2 nearFar; /* Near & far view depths values */
+
+#define linear_depth(z) \
+ ((isPerspective) ? (nearFar.x * nearFar.y) / (z * (nearFar.x - nearFar.y) + nearFar.y) : \
+ z * (nearFar.y - nearFar.x) + nearFar.x) /* Only true for camera view! */
in vec4 uvcoordsvar;
-out vec4 FragColor;
+out vec4 fragColor;
-#define MAX_SAMPLE 64
+#define saturate(a) clamp(a, 0.0, 1.0)
-uniform int samples;
+vec2 spread_compare(float center_motion_length, float sample_motion_length, float offset_length)
+{
+ return saturate(vec2(center_motion_length, sample_motion_length) - offset_length + 1.0);
+}
-float wang_hash_noise(uint s)
+vec2 depth_compare(float center_depth, float sample_depth)
{
- uint seed = (uint(gl_FragCoord.x) * 1664525u + uint(gl_FragCoord.y)) + s;
+ return saturate(0.5 + vec2(depthScale, -depthScale) * (sample_depth - center_depth));
+}
- seed = (seed ^ 61u) ^ (seed >> 16u);
- seed *= 9u;
- seed = seed ^ (seed >> 4u);
- seed *= 0x27d4eb2du;
- seed = seed ^ (seed >> 15u);
+/* Kill contribution if not going the same direction. */
+float dir_compare(vec2 offset, vec2 sample_motion, float sample_motion_length)
+{
+ if (sample_motion_length < 0.5) {
+ return 1.0;
+ }
+ return (dot(offset, sample_motion) > 0.0) ? 1.0 : 0.0;
+}
- float value = float(seed);
- value *= 1.0 / 4294967296.0;
- return fract(value);
+/* Return background (x) and foreground (y) weights. */
+vec2 sample_weights(float center_depth,
+ float sample_depth,
+ float center_motion_length,
+ float sample_motion_length,
+ float offset_length)
+{
+ /* Clasify foreground/background. */
+ vec2 depth_weight = depth_compare(center_depth, sample_depth);
+ /* Weight if sample is overlapping or under the center pixel. */
+ vec2 spread_weight = spread_compare(center_motion_length, sample_motion_length, offset_length);
+ return depth_weight * spread_weight;
}
-void main()
+vec4 decode_velocity(vec4 velocity)
{
- vec3 ndc_pos;
- ndc_pos.xy = uvcoordsvar.xy;
- ndc_pos.z = texture(depthBuffer, uvcoordsvar.xy).x;
+ velocity = velocity * 2.0 - 1.0;
+ /* Needed to match cycles. Can't find why... (fclem) */
+ velocity *= 0.5;
+ /* Transpose to pixelspace. */
+ velocity *= viewportSize.xyxy;
+ return velocity;
+}
- float inv_samples = 1.0 / float(samples);
- float noise = 2.0 * wang_hash_noise(0u) * inv_samples;
+vec4 sample_velocity(vec2 uv)
+{
+ vec4 data = texture(velocityBuffer, uv);
+ return decode_velocity(data);
+}
- /* Normalize Device Coordinates are [-1, +1]. */
- ndc_pos = ndc_pos * 2.0 - 1.0;
+vec2 sample_velocity(vec2 uv, const bool next)
+{
+ vec4 data = sample_velocity(uv);
+ data.xy = (next ? data.zw : data.xy);
+ return data.xy;
+}
- vec4 p = currInvViewProjMatrix * vec4(ndc_pos, 1.0);
- vec3 world_pos = p.xyz / p.w; /* Perspective divide */
+void gather_sample(vec2 screen_uv,
+ float center_depth,
+ float center_motion_len,
+ vec2 offset,
+ float offset_len,
+ const bool next,
+ inout vec4 accum,
+ inout vec4 accum_bg,
+ inout vec3 w_accum)
+{
+ vec2 sample_uv = screen_uv - offset * viewportSizeInv;
+ vec2 sample_motion = sample_velocity(sample_uv, next);
+ float sample_motion_len = length(sample_motion);
+ float sample_depth = linear_depth(texture(depthBuffer, sample_uv).r);
+ vec4 col = textureLod(colorBuffer, sample_uv, 0.0);
+
+ vec3 weights;
+ weights.xy = sample_weights(
+ center_depth, sample_depth, center_motion_len, sample_motion_len, offset_len);
+ weights.z = dir_compare(offset, sample_motion, sample_motion_len);
+ weights.xy *= weights.z;
+
+ accum += col * weights.y;
+ accum_bg += col * weights.x;
+ w_accum += weights;
+}
- /* Now find where was this pixel position
- * inside the past camera viewport */
- vec4 old_ndc = pastViewProjMatrix * vec4(world_pos, 1.0);
- old_ndc.xyz /= old_ndc.w; /* Perspective divide */
+void gather_blur(vec2 screen_uv,
+ vec2 center_motion,
+ float center_depth,
+ vec2 max_motion,
+ float ofs,
+ const bool next,
+ inout vec4 accum,
+ inout vec4 accum_bg,
+ inout vec3 w_accum)
+{
+ float center_motion_len = length(center_motion);
+ float max_motion_len = length(max_motion);
+
+ /* Tile boundaries randomization can fetch a tile where there is less motion than this pixel.
+ * Fix this by overriding the max_motion. */
+ if (max_motion_len < center_motion_len) {
+ max_motion_len = center_motion_len;
+ max_motion = center_motion;
+ }
- vec2 motion = (ndc_pos.xy - old_ndc.xy) * 0.25; /* 0.25 fit cycles ref */
+ if (max_motion_len < 0.5) {
+ return;
+ }
- float inc = 2.0 * inv_samples;
- float i = -1.0 + noise;
+ int i;
+ float t, inc = 1.0 / float(KERNEL);
+ for (i = 0, t = ofs * inc; i < KERNEL; i++, t += inc) {
+ gather_sample(screen_uv,
+ center_depth,
+ center_motion_len,
+ max_motion * t,
+ max_motion_len * t,
+ next,
+ accum,
+ accum_bg,
+ w_accum);
+ }
- FragColor = vec4(0.0);
- for (int j = 0; j < samples && j < MAX_SAMPLE; j++) {
- FragColor += textureLod(colorBuffer, uvcoordsvar.xy + motion * i, 0.0) * inv_samples;
- i += inc;
+ if (center_motion_len < 0.5) {
+ return;
}
+
+ for (i = 0, t = ofs * inc; i < KERNEL; i++, t += inc) {
+ /* Also sample in center motion direction.
+ * Allow to recover motion where there is conflicting
+ * motion between foreground and background. */
+ gather_sample(screen_uv,
+ center_depth,
+ center_motion_len,
+ center_motion * t,
+ center_motion_len * t,
+ next,
+ accum,
+ accum_bg,
+ w_accum);
+ }
+}
+
+void main()
+{
+ vec2 uv = uvcoordsvar.xy;
+
+ /* Data of the center pixel of the gather (target). */
+ float center_depth = linear_depth(texture(depthBuffer, uv).r);
+ vec4 center_motion = sample_velocity(uv);
+ vec4 center_color = textureLod(colorBuffer, uv, 0.0);
+
+ vec2 rand = texelfetch_noise_tex(gl_FragCoord.xy).xy;
+
+ /* Randomize tile boundary to avoid ugly discontinuities. Randomize 1/4th of the tile.
+ * Note this randomize only in one direction but in practice it's enough. */
+ rand.x = rand.x * 2.0 - 1.0;
+ ivec2 tile = ivec2(gl_FragCoord.xy + rand.x * float(EEVEE_VELOCITY_TILE_SIZE) * 0.25) /
+ EEVEE_VELOCITY_TILE_SIZE;
+ tile = clamp(tile, ivec2(0), tileBufferSize - 1);
+ vec4 max_motion = decode_velocity(texelFetch(tileMaxBuffer, tile, 0));
+
+ /* First (center) sample: time = T */
+ /* x: Background, y: Foreground, z: dir. */
+ vec3 w_accum = vec3(0.0, 0.0, 1.0);
+ vec4 accum_bg = vec4(0.0);
+ vec4 accum = vec4(0.0);
+ /* First linear gather. time = [T - delta, T] */
+ gather_blur(
+ uv, center_motion.xy, center_depth, max_motion.xy, rand.y, false, accum, accum_bg, w_accum);
+ /* Second linear gather. time = [T, T + delta] */
+ gather_blur(
+ uv, center_motion.zw, center_depth, max_motion.zw, rand.y, true, accum, accum_bg, w_accum);
+
+#if 1
+ /* Avoid division by 0.0. */
+ float w = 1.0 / (50.0 * float(KERNEL) * 4.0);
+ accum_bg += center_color * w;
+ w_accum.x += w;
+ /* Note: In Jimenez's presentation, they used center sample.
+ * We use background color as it contains more informations for foreground
+ * elements that have not enough weights.
+ * Yield beter blur in complex motion. */
+ center_color = accum_bg / w_accum.x;
+#endif
+ /* Merge background. */
+ accum += accum_bg;
+ w_accum.y += w_accum.x;
+ /* Balance accumulation for failled samples.
+ * We replace the missing foreground by the background. */
+ float blend_fac = saturate(1.0 - w_accum.y / w_accum.z);
+ fragColor = (accum / w_accum.z) + center_color * blend_fac;
+
+#if 0 /* For debugging. */
+ fragColor.rgb = fragColor.ggg;
+ fragColor.rg += max_motion.xy;
+#endif
}
diff --git a/source/blender/draw/engines/eevee/shaders/effect_temporal_aa.glsl b/source/blender/draw/engines/eevee/shaders/effect_temporal_aa.glsl
index 428318e3c68..b44645174bd 100644
--- a/source/blender/draw/engines/eevee/shaders/effect_temporal_aa.glsl
+++ b/source/blender/draw/engines/eevee/shaders/effect_temporal_aa.glsl
@@ -1,6 +1,6 @@
uniform sampler2D colorHistoryBuffer;
-uniform sampler2D velocityBuffer;
+uniform mat4 prevViewProjectionMatrix;
out vec4 FragColor;
@@ -38,16 +38,19 @@ vec3 clip_to_aabb(vec3 color, vec3 minimum, vec3 maximum, vec3 average)
*/
void main()
{
+ vec2 screen_res = vec2(textureSize(colorBuffer, 0).xy);
+ vec2 uv = gl_FragCoord.xy / screen_res;
ivec2 texel = ivec2(gl_FragCoord.xy);
- vec2 motion = texelFetch(velocityBuffer, texel, 0).rg;
-
- /* Decode from unsigned normalized 16bit texture. */
- motion = motion * 2.0 - 1.0;
/* Compute pixel position in previous frame. */
- vec2 screen_res = vec2(textureSize(colorBuffer, 0).xy);
- vec2 uv = gl_FragCoord.xy / screen_res;
- vec2 uv_history = uv - motion;
+ float depth = textureLod(depthBuffer, uv, 0.0).r;
+ vec3 pos = get_world_space_from_depth(uv, depth);
+ vec2 uv_history = project_point(prevViewProjectionMatrix, pos).xy * 0.5 + 0.5;
+
+ /* HACK: Reject lookdev spheres from TAA reprojection. */
+ if (depth == 0.0) {
+ uv_history = uv;
+ }
ivec2 texel_history = ivec2(uv_history * screen_res);
vec4 color_history = textureLod(colorHistoryBuffer, uv_history, 0.0);
diff --git a/source/blender/draw/engines/eevee/shaders/effect_velocity_resolve_frag.glsl b/source/blender/draw/engines/eevee/shaders/effect_velocity_resolve_frag.glsl
index 7d701bce5cb..d927fd78d30 100644
--- a/source/blender/draw/engines/eevee/shaders/effect_velocity_resolve_frag.glsl
+++ b/source/blender/draw/engines/eevee/shaders/effect_velocity_resolve_frag.glsl
@@ -1,8 +1,9 @@
-uniform mat4 currPersinv;
-uniform mat4 pastPersmat;
+uniform mat4 prevViewProjMatrix;
+uniform mat4 currViewProjMatrixInv;
+uniform mat4 nextViewProjMatrix;
-out vec2 outData;
+out vec4 outData;
void main()
{
@@ -12,13 +13,12 @@ void main()
float depth = texelFetch(depthBuffer, texel, 0).r;
- vec3 world_position = project_point(currPersinv, vec3(uv, depth) * 2.0 - 1.0);
- vec2 uv_history = project_point(pastPersmat, world_position).xy * 0.5 + 0.5;
+ vec3 world_position = project_point(currViewProjMatrixInv, vec3(uv, depth) * 2.0 - 1.0);
+ vec2 uv_prev = project_point(prevViewProjMatrix, world_position).xy * 0.5 + 0.5;
+ vec2 uv_next = project_point(nextViewProjMatrix, world_position).xy * 0.5 + 0.5;
- outData = uv - uv_history;
-
- /* HACK: Reject lookdev spheres from TAA reprojection. */
- outData = (depth > 0.0) ? outData : vec2(0.0);
+ outData.xy = uv_prev - uv;
+ outData.zw = uv_next - uv;
/* Encode to unsigned normalized 16bit texture. */
outData = outData * 0.5 + 0.5;
diff --git a/source/blender/draw/engines/eevee/shaders/effect_velocity_tile_frag.glsl b/source/blender/draw/engines/eevee/shaders/effect_velocity_tile_frag.glsl
new file mode 100644
index 00000000000..0eb598521af
--- /dev/null
+++ b/source/blender/draw/engines/eevee/shaders/effect_velocity_tile_frag.glsl
@@ -0,0 +1,151 @@
+/**
+ * Shaders that down-sample velocity buffer,
+ *
+ * Based on:
+ * A Fast and Stable Feature-Aware Motion Blur Filter
+ * by Jean-Philippe Guertin, Morgan McGuire, Derek Nowrouzezahrai
+ *
+ * Adapted from G3D Innovation Engine implementation.
+ */
+
+uniform sampler2D velocityBuffer;
+uniform vec2 viewportSize;
+uniform vec2 viewportSizeInv;
+uniform ivec2 velocityBufferSize;
+
+out vec4 tileMaxVelocity;
+
+vec4 sample_velocity(ivec2 texel)
+{
+ texel = clamp(texel, ivec2(0), velocityBufferSize - 1);
+ vec4 data = texelFetch(velocityBuffer, texel, 0);
+ /* Decode data. */
+ return (data * 2.0 - 1.0) * viewportSize.xyxy;
+}
+
+vec4 encode_velocity(vec4 velocity)
+{
+ return velocity * viewportSizeInv.xyxy * 0.5 + 0.5;
+}
+
+#ifdef TILE_GATHER
+
+uniform ivec2 gatherStep;
+
+void main()
+{
+ vec4 max_motion = vec4(0.0);
+ float max_motion_len_sqr_prev = 0.0;
+ float max_motion_len_sqr_next = 0.0;
+ ivec2 texel = ivec2(gl_FragCoord.xy);
+ texel = texel * gatherStep.yx + texel * EEVEE_VELOCITY_TILE_SIZE * gatherStep;
+
+ for (int i = 0; i < EEVEE_VELOCITY_TILE_SIZE; ++i) {
+ vec4 motion = sample_velocity(texel + i * gatherStep);
+ float motion_len_sqr_prev = dot(motion.xy, motion.xy);
+ float motion_len_sqr_next = dot(motion.zw, motion.zw);
+
+ if (motion_len_sqr_prev > max_motion_len_sqr_prev) {
+ max_motion_len_sqr_prev = motion_len_sqr_prev;
+ max_motion.xy = motion.xy;
+ }
+ if (motion_len_sqr_next > max_motion_len_sqr_next) {
+ max_motion_len_sqr_next = motion_len_sqr_next;
+ max_motion.zw = motion.zw;
+ }
+ }
+
+ tileMaxVelocity = encode_velocity(max_motion);
+}
+
+#else /* TILE_EXPANSION */
+
+bool neighbor_affect_this_tile(ivec2 offset, vec2 velocity)
+{
+ /* Manhattan distance to the tiles, which is used for
+ * differentiating corners versus middle blocks */
+ float displacement = float(abs(offset.x) + abs(offset.y));
+ /**
+ * Relative sign on each axis of the offset compared
+ * to the velocity for that tile. In order for a tile
+ * to affect the center tile, it must have a
+ * neighborhood velocity in which x and y both have
+ * identical or both have opposite signs relative to
+ * offset. If the offset coordinate is zero then
+ * velocity is irrelevant.
+ **/
+ vec2 point = sign(offset * velocity);
+
+ float dist = (point.x + point.y);
+ /**
+ * Here's an example of the logic for this code.
+ * In this diagram, the upper-left tile has offset = (-1, -1).
+ * V1 is velocity = (1, -2). point in this case = (-1, 1), and therefore dist = 0,
+ * so the upper-left tile does not affect the center.
+ *
+ * Now, look at another case. V2 = (-1, -2). point = (1, 1), so dist = 2 and the tile
+ * does affect the center.
+ *
+ * V2(-1,-2) V1(1, -2)
+ * \ /
+ * \ /
+ * \/___ ____ ____
+ * (-1, -1)| | | |
+ * |____|____|____|
+ * | | | |
+ * |____|____|____|
+ * | | | |
+ * |____|____|____|
+ **/
+ return (abs(dist) == displacement);
+}
+
+/**
+ * Only gather neighborhood velocity into tiles that could be affected by it.
+ * In the general case, only six of the eight neighbors contribute:
+ *
+ * This tile can't possibly be affected by the center one
+ * |
+ * v
+ * ____ ____ ____
+ * | | ///|/// |
+ * |____|////|//__|
+ * | |////|/ |
+ * |___/|////|____|
+ * | //|////| | <--- This tile can't possibly be affected by the center one
+ * |_///|///_|____|
+ **/
+void main()
+{
+ vec4 max_motion = vec4(0.0);
+ float max_motion_len_sqr_prev = -1.0;
+ float max_motion_len_sqr_next = -1.0;
+
+ ivec2 tile = ivec2(gl_FragCoord.xy);
+ ivec2 offset = ivec2(0);
+ for (offset.y = -1; offset.y <= 1; ++offset.y) {
+ for (offset.x = -1; offset.x <= 1; ++offset.x) {
+ vec4 motion = sample_velocity(tile + offset);
+ float motion_len_sqr_prev = dot(motion.xy, motion.xy);
+ float motion_len_sqr_next = dot(motion.zw, motion.zw);
+
+ if (motion_len_sqr_prev > max_motion_len_sqr_prev) {
+ if (neighbor_affect_this_tile(offset, motion.xy)) {
+ max_motion_len_sqr_prev = motion_len_sqr_prev;
+ max_motion.xy = motion.xy;
+ }
+ }
+
+ if (motion_len_sqr_next > max_motion_len_sqr_next) {
+ if (neighbor_affect_this_tile(offset, motion.zw)) {
+ max_motion_len_sqr_next = motion_len_sqr_next;
+ max_motion.zw = motion.zw;
+ }
+ }
+ }
+ }
+
+ tileMaxVelocity = encode_velocity(max_motion);
+}
+
+#endif \ No newline at end of file
diff --git a/source/blender/draw/engines/eevee/shaders/object_motion_frag.glsl b/source/blender/draw/engines/eevee/shaders/object_motion_frag.glsl
new file mode 100644
index 00000000000..66b098ef6c5
--- /dev/null
+++ b/source/blender/draw/engines/eevee/shaders/object_motion_frag.glsl
@@ -0,0 +1,27 @@
+
+uniform mat4 prevViewProjMatrix;
+uniform mat4 currViewProjMatrix;
+uniform mat4 nextViewProjMatrix;
+
+in vec3 prevWorldPos;
+in vec3 currWorldPos;
+in vec3 nextWorldPos;
+
+out vec4 outData;
+
+void main()
+{
+ vec4 prev_wpos = prevViewProjMatrix * vec4(prevWorldPos, 1.0);
+ vec4 curr_wpos = currViewProjMatrix * vec4(currWorldPos, 1.0);
+ vec4 next_wpos = nextViewProjMatrix * vec4(nextWorldPos, 1.0);
+
+ vec2 prev_uv = (prev_wpos.xy / prev_wpos.w);
+ vec2 curr_uv = (curr_wpos.xy / curr_wpos.w);
+ vec2 next_uv = (next_wpos.xy / next_wpos.w);
+
+ outData.xy = prev_uv - curr_uv;
+ outData.zw = next_uv - curr_uv;
+
+ /* Encode to unsigned normalized 16bit texture. */
+ outData = outData * 0.5 + 0.5;
+}
diff --git a/source/blender/draw/engines/eevee/shaders/object_motion_vert.glsl b/source/blender/draw/engines/eevee/shaders/object_motion_vert.glsl
new file mode 100644
index 00000000000..95cd69ba310
--- /dev/null
+++ b/source/blender/draw/engines/eevee/shaders/object_motion_vert.glsl
@@ -0,0 +1,54 @@
+
+uniform mat4 currModelMatrix;
+uniform mat4 prevModelMatrix;
+uniform mat4 nextModelMatrix;
+uniform bool useDeform;
+
+#ifdef HAIR
+uniform samplerBuffer prvBuffer; /* RGBA32F */
+uniform samplerBuffer nxtBuffer; /* RGBA32F */
+#else
+in vec3 pos;
+in vec3 prv; /* Previous frame position. */
+in vec3 nxt; /* Next frame position. */
+#endif
+
+out vec3 currWorldPos;
+out vec3 prevWorldPos;
+out vec3 nextWorldPos;
+
+void main()
+{
+#ifdef HAIR
+ bool is_persp = (ProjectionMatrix[3][3] == 0.0);
+ float time, thick_time, thickness;
+ vec3 tan, binor;
+ vec3 wpos;
+
+ hair_get_pos_tan_binor_time(is_persp,
+ ModelMatrixInverse,
+ ViewMatrixInverse[3].xyz,
+ ViewMatrixInverse[2].xyz,
+ wpos,
+ tan,
+ binor,
+ time,
+ thickness,
+ thick_time);
+
+ int id = hair_get_base_id();
+ vec3 pos = texelFetch(hairPointBuffer, id).point_position;
+ vec3 prv = texelFetch(prvBuffer, id).point_position;
+ vec3 nxt = texelFetch(nxtBuffer, id).point_position;
+#endif
+ prevWorldPos = (prevModelMatrix * vec4(useDeform ? prv : pos, 1.0)).xyz;
+ currWorldPos = (currModelMatrix * vec4(pos, 1.0)).xyz;
+ nextWorldPos = (nextModelMatrix * vec4(useDeform ? nxt : pos, 1.0)).xyz;
+ /* Use jittered projmatrix to be able to match exact sample depth (depth equal test).
+ * Note that currModelMatrix needs to also be equal to ModelMatrix for the samples to match. */
+#ifndef HAIR
+ gl_Position = ViewProjectionMatrix * vec4(currWorldPos, 1.0);
+#else
+ gl_Position = ViewProjectionMatrix * vec4(wpos, 1.0);
+#endif
+}