Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/videolan/dav1d.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/fg_apply_tmpl.c4
-rw-r--r--src/meson.build3
-rw-r--r--src/x86/filmgrain16_avx2.asm23
-rw-r--r--src/x86/filmgrain16_sse.asm23
-rw-r--r--src/x86/filmgrain_avx2.asm40
-rw-r--r--src/x86/filmgrain_avx512.asm1079
-rw-r--r--src/x86/filmgrain_common.asm46
-rw-r--r--src/x86/filmgrain_init_tmpl.c51
-rw-r--r--src/x86/filmgrain_sse.asm23
-rw-r--r--tests/checkasm/filmgrain.c8
10 files changed, 1172 insertions, 128 deletions
diff --git a/src/fg_apply_tmpl.c b/src/fg_apply_tmpl.c
index 18dc4cc..ee14db9 100644
--- a/src/fg_apply_tmpl.c
+++ b/src/fg_apply_tmpl.c
@@ -223,7 +223,11 @@ void bitfn(dav1d_apply_grain)(const Dav1dFilmGrainDSPContext *const dsp,
const Dav1dPicture *const in)
{
ALIGN_STK_16(entry, grain_lut, 3,[GRAIN_HEIGHT + 1][GRAIN_WIDTH]);
+#if ARCH_X86_64 && BITDEPTH == 8
+ ALIGN_STK_64(uint8_t, scaling, 3,[SCALING_SIZE]);
+#else
uint8_t scaling[3][SCALING_SIZE];
+#endif
const int rows = (out->p.h + 31) >> 5;
bitfn(dav1d_prep_grain)(dsp, out, in, scaling, grain_lut);
diff --git a/src/meson.build b/src/meson.build
index ce5c7f5..b06aee6 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -206,15 +206,16 @@ if is_asm_enabled
if dav1d_bitdepths.contains('8')
libdav1d_sources_asm += files(
'x86/cdef_avx512.asm',
+ 'x86/filmgrain_avx512.asm',
'x86/ipred_avx512.asm',
'x86/itx_avx512.asm',
'x86/loopfilter_avx512.asm',
'x86/looprestoration_avx512.asm',
'x86/mc_avx512.asm',
- 'x86/mc_avx2.asm',
'x86/filmgrain_avx2.asm',
'x86/ipred_avx2.asm',
'x86/loopfilter_avx2.asm',
+ 'x86/mc_avx2.asm',
'x86/filmgrain_sse.asm',
'x86/ipred_sse.asm',
'x86/loopfilter_sse.asm',
diff --git a/src/x86/filmgrain16_avx2.asm b/src/x86/filmgrain16_avx2.asm
index e98a731..5c2e386 100644
--- a/src/x86/filmgrain16_avx2.asm
+++ b/src/x86/filmgrain16_avx2.asm
@@ -25,6 +25,7 @@
%include "config.asm"
%include "ext/x86/x86inc.asm"
+%include "x86/filmgrain_common.asm"
%if ARCH_X86_64
@@ -81,28 +82,6 @@ JMP_TABLE generate_grain_uv_420_16bpc_avx2, 0, 1, 2, 3
JMP_TABLE generate_grain_uv_422_16bpc_avx2, 0, 1, 2, 3
JMP_TABLE generate_grain_uv_444_16bpc_avx2, 0, 1, 2, 3
-struc FGData
- .seed: resd 1
- .num_y_points: resd 1
- .y_points: resb 14 * 2
- .chroma_scaling_from_luma: resd 1
- .num_uv_points: resd 2
- .uv_points: resb 2 * 10 * 2
- .scaling_shift: resd 1
- .ar_coeff_lag: resd 1
- .ar_coeffs_y: resb 24
- .ar_coeffs_uv: resb 2 * 28 ; includes padding
- .ar_coeff_shift: resq 1
- .grain_scale_shift: resd 1
- .uv_mult: resd 2
- .uv_luma_mult: resd 2
- .uv_offset: resd 2
- .overlap_flag: resd 1
- .clip_to_restricted_range: resd 1
-endstruc
-
-cextern gaussian_sequence
-
SECTION .text
%define m(x) mangle(private_prefix %+ _ %+ x %+ SUFFIX)
diff --git a/src/x86/filmgrain16_sse.asm b/src/x86/filmgrain16_sse.asm
index 401c72d..6b0daaa 100644
--- a/src/x86/filmgrain16_sse.asm
+++ b/src/x86/filmgrain16_sse.asm
@@ -25,6 +25,7 @@
%include "config.asm"
%include "ext/x86/x86inc.asm"
+%include "x86/filmgrain_common.asm"
SECTION_RODATA 16
pd_16: times 4 dd 16
@@ -66,28 +67,6 @@ JMP_TABLE generate_grain_uv_420_16bpc_ssse3, 0, 1, 2, 3
JMP_TABLE generate_grain_uv_422_16bpc_ssse3, 0, 1, 2, 3
JMP_TABLE generate_grain_uv_444_16bpc_ssse3, 0, 1, 2, 3
-struc FGData
- .seed: resd 1
- .num_y_points: resd 1
- .y_points: resb 14 * 2
- .chroma_scaling_from_luma: resd 1
- .num_uv_points: resd 2
- .uv_points: resb 2 * 10 * 2
- .scaling_shift: resd 1
- .ar_coeff_lag: resd 1
- .ar_coeffs_y: resb 24
- .ar_coeffs_uv: resb 2 * 28 ; includes padding
- .ar_coeff_shift: resq 1
- .grain_scale_shift: resd 1
- .uv_mult: resd 2
- .uv_luma_mult: resd 2
- .uv_offset: resd 2
- .overlap_flag: resd 1
- .clip_to_restricted_range: resd 1
-endstruc
-
-cextern gaussian_sequence
-
SECTION .text
%if ARCH_X86_32
diff --git a/src/x86/filmgrain_avx2.asm b/src/x86/filmgrain_avx2.asm
index 1141b2c..7da8105 100644
--- a/src/x86/filmgrain_avx2.asm
+++ b/src/x86/filmgrain_avx2.asm
@@ -25,6 +25,7 @@
%include "config.asm"
%include "ext/x86/x86inc.asm"
+%include "x86/filmgrain_common.asm"
%if ARCH_X86_64
@@ -74,28 +75,6 @@ JMP_TABLE generate_grain_uv_420, avx2, 0, 1, 2, 3
JMP_TABLE generate_grain_uv_422, avx2, 0, 1, 2, 3
JMP_TABLE generate_grain_uv_444, avx2, 0, 1, 2, 3
-struc FGData
- .seed: resd 1
- .num_y_points: resd 1
- .y_points: resb 14 * 2
- .chroma_scaling_from_luma: resd 1
- .num_uv_points: resd 2
- .uv_points: resb 2 * 10 * 2
- .scaling_shift: resd 1
- .ar_coeff_lag: resd 1
- .ar_coeffs_y: resb 24
- .ar_coeffs_uv: resb 2 * 28 ; includes padding
- .ar_coeff_shift: resq 1
- .grain_scale_shift: resd 1
- .uv_mult: resd 2
- .uv_luma_mult: resd 2
- .uv_offset: resd 2
- .overlap_flag: resd 1
- .clip_to_restricted_range: resd 1
-endstruc
-
-cextern gaussian_sequence
-
SECTION .text
INIT_YMM avx2
@@ -1089,9 +1068,6 @@ cglobal fgy_32x32xn_8bpc, 6, 13, 15, dst, src, stride, fg_data, w, scaling, \
jne .loop_x_hv_overlap
jmp .loop_x_h_overlap
-.end:
- RET
-
.vertical_overlap:
DEFINE_ARGS dst, src, stride, fg_data, w, scaling, grain_lut, \
unused, sby, see, overlap
@@ -1206,7 +1182,7 @@ cglobal fgy_32x32xn_8bpc, 6, 13, 15, dst, src, stride, fg_data, w, scaling, \
jmp .loop_y
.end_y_v_overlap:
add wq, 32
- jge .end_hv
+ jge .end
lea srcq, [src_bakq+wq]
; since fg_dataq.overlap is guaranteed to be set, we never jump
@@ -1326,7 +1302,7 @@ cglobal fgy_32x32xn_8bpc, 6, 13, 15, dst, src, stride, fg_data, w, scaling, \
add wq, 32
lea srcq, [src_bakq+wq]
jl .loop_x_hv_overlap
-.end_hv:
+.end:
RET
%macro FGUV_FN 3 ; name, ss_hor, ss_ver
@@ -1683,9 +1659,6 @@ cglobal fguv_32x32xn_i%1_8bpc, 6, 15, 16, dst, src, stride, fg_data, w, scaling,
jne %%loop_x_hv_overlap
jmp %%loop_x_h_overlap
-%%end:
- RET
-
%%vertical_overlap:
DEFINE_ARGS dst, src, stride, fg_data, w, scaling, grain_lut, unused, \
sby, see, overlap, unused1, unused2, lstride
@@ -1879,7 +1852,7 @@ cglobal fguv_32x32xn_i%1_8bpc, 6, 15, 16, dst, src, stride, fg_data, w, scaling,
%%end_y_v_overlap:
add wq, 32>>%2
- jge %%end_hv
+ jge %%end
mov srcq, r11mp
mov dstq, r12mp
lea lumaq, [r14+wq*(1+%2)]
@@ -2108,15 +2081,14 @@ cglobal fguv_32x32xn_i%1_8bpc, 6, 15, 16, dst, src, stride, fg_data, w, scaling,
%%end_y_hv_overlap:
add wq, 32>>%2
- jge %%end_hv
+ jge %%end
mov srcq, r11mp
mov dstq, r12mp
lea lumaq, [r14+wq*(1+%2)]
add srcq, wq
add dstq, wq
jmp %%loop_x_hv_overlap
-
-%%end_hv:
+%%end:
RET
%endmacro
diff --git a/src/x86/filmgrain_avx512.asm b/src/x86/filmgrain_avx512.asm
new file mode 100644
index 0000000..6d27746
--- /dev/null
+++ b/src/x86/filmgrain_avx512.asm
@@ -0,0 +1,1079 @@
+; Copyright © 2022, VideoLAN and dav1d authors
+; Copyright © 2022, Two Orioles, LLC
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; 1. Redistributions of source code must retain the above copyright notice, this
+; list of conditions and the following disclaimer.
+;
+; 2. Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+; ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+; WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+; ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+; (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+; ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+%include "config.asm"
+%include "ext/x86/x86inc.asm"
+%include "x86/filmgrain_common.asm"
+
+%if ARCH_X86_64
+
+SECTION_RODATA 64
+
+pb_even: db 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
+ db 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62
+ db 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94
+ db 96, 98,100,102,104,106,108,110,112,114,116,118,120,122,124,126
+pb_odd: db 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
+ db 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63
+ db 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95
+ db 97, 99,101,103,105,107,109,111,113,115,117,119,121,123,125,127
+interleave_hl: db 8, 0, 9, 1, 10, 2, 11, 3, 12, 4, 13, 5, 14, 6, 15, 7
+pb_27_17_17_27: db 27, 17, 17, 27, 0, 32, 0, 32
+pb_23_22_0_32: db 23, 22, 0, 32, 0, 32, 0, 32
+pb_27_17: times 2 db 27, 17
+pb_23_22: times 2 db 23, 22
+pw_8: times 2 dw 8
+pw_1024: times 2 dw 1024
+pb_17_27: times 2 db 17, 27
+fg_max: times 4 db 255
+ times 4 db 240
+ times 4 db 235
+fg_min: times 4 db 0
+ times 4 db 16
+noise_rnd: times 2 dw 128
+ times 2 dw 64
+ times 2 dw 32
+ times 2 dw 16
+
+SECTION .text
+
+INIT_ZMM avx512icl
+cglobal fgy_32x32xn_8bpc, 6, 13, 22, dst, src, stride, fg_data, w, scaling, \
+ grain_lut, h, sby, see, overlap
+%define base r11-fg_min
+ lea r11, [fg_min]
+ mov r6d, [fg_dataq+FGData.scaling_shift]
+ mov r7d, [fg_dataq+FGData.clip_to_restricted_range]
+ mov sbyd, sbym
+ mov overlapd, [fg_dataq+FGData.overlap_flag]
+ mov r12, 0x0000000f0000000f ; h_overlap mask
+ mova m0, [scalingq+64*0]
+ mova m1, [scalingq+64*1]
+ mova m2, [scalingq+64*2]
+ mova m3, [scalingq+64*3]
+ kmovq k1, r12
+ vbroadcasti32x4 m4, [base+interleave_hl]
+ vpbroadcastd ym16, [base+pb_27_17]
+ vpbroadcastd m12, [base+pb_17_27]
+ vpbroadcastd m6, [base+noise_rnd+r6*4-32]
+ test sbyd, sbyd
+ setnz r6b
+ vpbroadcastd m7, [base+fg_min+r7*4]
+ vpbroadcastd m8, [base+fg_max+r7*8]
+ pxor m5, m5
+ vpbroadcastd m9, [base+pw_1024]
+ vpbroadcastq m10, [base+pb_27_17_17_27]
+ vmovdqa64 m12{k1}, m16
+ test r6b, overlapb
+ jnz .v_overlap
+
+ imul seed, sbyd, (173 << 24) | 37
+ add seed, (105 << 24) | 178
+ rorx seed, seed, 24
+ movzx seed, seew
+ xor seed, [fg_dataq+FGData.seed]
+
+ DEFINE_ARGS dst, src, stride, src_bak, w, offx, offy, \
+ h, sby, see, overlap
+
+ lea src_bakq, [srcq+wq]
+ neg wq
+ sub dstq, srcq
+.loop_x:
+ rorx r6, seeq, 1
+ or seed, 0xeff4
+ test seeb, seeh
+ lea seed, [r6+0x8000]
+ cmovp seed, r6d ; updated seed
+ rorx offyd, seed, 8
+ rorx offxq, seeq, 12
+ and offyd, 0xf
+ imul offyd, 164
+ lea offxd, [offyq+offxq*2+829] ; offy*stride+offx
+
+ DEFINE_ARGS dst, src, stride, src_bak, w, offxy, grain_lut, \
+ h, sby, see, overlap
+
+ mov grain_lutq, grain_lutmp
+ mov hd, hm
+.loop_y:
+ mova ym18, [srcq+strideq*0]
+ vinserti32x8 m18, [srcq+strideq*1], 1
+ movu ym21, [grain_lutq+offxyq-82]
+ vinserti32x8 m21, [grain_lutq+offxyq+ 0], 1
+ mova m19, m0
+ vpmovb2m k2, m18
+ punpcklbw m16, m18, m5
+ punpckhbw m17, m18, m5
+ vpermt2b m19, m18, m1 ; scaling[ 0..127]
+ vpermi2b m18, m2, m3 ; scaling[128..255]
+ punpcklbw m20, m5, m21 ; grain
+ punpckhbw m21, m5
+ vmovdqu8 m19{k2}, m18 ; scaling[src]
+ pshufb m19, m4
+ pmaddubsw m18, m19, m20
+ pmaddubsw m19, m21
+ add grain_lutq, 82*2
+ pmulhrsw m18, m6 ; noise
+ pmulhrsw m19, m6
+ paddw m16, m18
+ paddw m17, m19
+ packuswb m16, m17
+ pmaxub m16, m7
+ pminub m16, m8
+ mova [dstq+srcq], ym16
+ add srcq, strideq
+ vextracti32x8 [dstq+srcq], m16, 1
+ add srcq, strideq
+ sub hb, 2
+ jg .loop_y
+ add wq, 32
+ jge .end
+ lea srcq, [src_bakq+wq]
+ test overlapd, overlapd
+ jz .loop_x
+ test sbyd, sbyd
+ jnz .hv_overlap
+
+.loop_x_h_overlap:
+ rorx r6, seeq, 1
+ or seed, 0xeff4
+ test seeb, seeh
+ lea seed, [r6+0x8000]
+ cmovp seed, r6d ; updated seed
+
+ DEFINE_ARGS dst, src, stride, src_bak, w, offx, offy, \
+ h, sby, see, left_offxy
+
+ rorx offyd, seed, 8
+ mov left_offxyd, offxd ; previous column's offy*stride
+ rorx offxq, seeq, 12
+ and offyd, 0xf
+ imul offyd, 164
+ lea offxd, [offyq+offxq*2+829] ; offy*stride+offx
+
+ DEFINE_ARGS dst, src, stride, src_bak, w, offxy, grain_lut, \
+ h, sby, see, left_offxy
+
+ mov grain_lutq, grain_lutmp
+ mov hd, hm
+.loop_y_h_overlap:
+ movu ym20, [grain_lutq+offxyq-82]
+ vinserti32x8 m20, [grain_lutq+offxyq+ 0], 1
+ movd xm21, [grain_lutq+left_offxyq-50]
+ vinserti32x4 m21, [grain_lutq+left_offxyq+32], 2
+ mova ym18, [srcq+strideq*0]
+ vinserti32x8 m18, [srcq+strideq*1], 1
+ mova m19, m0
+ punpcklbw m21, m20
+ vpmovb2m k2, m18
+ punpcklbw m16, m18, m5
+ punpckhbw m17, m18, m5
+ pmaddubsw m21, m10, m21
+ vpermt2b m19, m18, m1
+ vpermi2b m18, m2, m3
+ pmulhrsw m21, m9
+ vmovdqu8 m19{k2}, m18 ; scaling[src]
+ punpckhbw m18, m20, m5
+ pshufb m19, m4
+ packsswb m20{k1}, m21, m21
+ punpcklbw m20, m5, m20 ; grain
+ pmaddubsw m18, m19, m18
+ pmaddubsw m19, m20
+ add grain_lutq, 82*2
+ pmulhrsw m18, m6 ; noise
+ pmulhrsw m19, m6
+ paddw m17, m18
+ paddw m16, m19
+ packuswb m16, m17
+ pmaxub m16, m7
+ pminub m16, m8
+ mova [dstq+srcq], ym16
+ add srcq, strideq
+ vextracti32x8 [dstq+srcq], m16, 1
+ add srcq, strideq
+ sub hb, 2
+ jg .loop_y_h_overlap
+ add wq, 32
+ jge .end
+ lea srcq, [src_bakq+wq]
+ test sbyd, sbyd
+ jnz .hv_overlap
+ jmp .loop_x_h_overlap
+
+.v_overlap:
+ DEFINE_ARGS dst, src, stride, fg_data, w, offy, offx, \
+ h, sby, see, overlap
+
+ movzx r6d, sbyb
+ imul seed, [fg_dataq+FGData.seed], 0x00010001
+ imul r7d, r6d, 173 * 0x00010001
+ imul r6d, 37 * 0x01000100
+ add r7d, (105 << 16) | 188
+ add r6d, (178 << 24) | (141 << 8)
+ and r7d, 0x00ff00ff
+ and r6d, 0xff00ff00
+ xor seed, r7d
+ xor seed, r6d ; (cur_seed << 16) | top_seed
+
+ DEFINE_ARGS dst, src, stride, src_bak, w, offx, offy, \
+ h, sby, see, overlap
+
+ lea src_bakq, [srcq+wq]
+ neg wq
+ sub dstq, srcq
+
+ ; we assume from the block above that bits 8-15 of r7d are zero'ed
+ mov r6d, seed
+ or seed, 0xeff4eff4
+ test seeb, seeh
+ setp r7b ; parity of top_seed
+ shr seed, 16
+ shl r7d, 16
+ test seeb, seeh
+ setp r7b ; parity of cur_seed
+ or r6d, 0x00010001
+ xor r7d, r6d
+ rorx seed, r7d, 1 ; updated (cur_seed << 16) | top_seed
+ rorx offyd, seed, 8
+ rorx offxd, seed, 12
+ and offyd, 0xf000f
+ and offxd, 0xf000f
+ imul offyd, 164
+ ; offxy=offy*stride+offx, (cur_offxy << 16) | top_offxy
+ lea offxd, [offyq+offxq*2+0x10001*829+32*82]
+
+ DEFINE_ARGS dst, src, stride, src_bak, w, offxy, grain_lut, \
+ h, sby, see, overlap, top_offxy
+
+ mov grain_lutq, grain_lutmp
+ mov hd, hm
+ movzx top_offxyd, offxyw
+ shr offxyd, 16
+ movu ym16, [grain_lutq+offxyq-82]
+ vinserti32x8 m16, [grain_lutq+offxyq+ 0], 1
+ movu ym21, [grain_lutq+top_offxyq-82]
+ vinserti32x8 m21, [grain_lutq+top_offxyq+ 0], 1
+ mova ym18, [srcq+strideq*0]
+ vinserti32x8 m18, [srcq+strideq*1], 1
+ mova m19, m0
+ punpcklbw m20, m21, m16
+ punpckhbw m21, m16
+ vpmovb2m k2, m18
+ pmaddubsw m20, m12, m20
+ pmaddubsw m21, m12, m21
+ punpcklbw m16, m18, m5
+ punpckhbw m17, m18, m5
+ vpermt2b m19, m18, m1
+ vpermi2b m18, m2, m3
+ pmulhrsw m20, m9
+ pmulhrsw m21, m9
+ vmovdqu8 m19{k2}, m18 ; scaling[src]
+ pshufb m19, m4
+ packsswb m20, m21
+ punpcklbw m18, m5, m20 ; grain
+ punpckhbw m20, m5
+ pmaddubsw m18, m19, m18
+ pmaddubsw m19, m20
+ add grain_lutq, 82*2
+ pmulhrsw m18, m6 ; noise
+ pmulhrsw m19, m6
+ paddw m16, m18
+ paddw m17, m19
+ packuswb m16, m17
+ pmaxub m16, m7
+ pminub m16, m8
+ mova [dstq+srcq], ym16
+ add srcq, strideq
+ vextracti32x8 [dstq+srcq], m16, 1
+ add srcq, strideq
+ sub hb, 2
+ jg .loop_y
+ add wq, 32
+ jge .end
+ lea srcq, [src_bakq+wq]
+
+ ; since fg_dataq.overlap is guaranteed to be set, we never jump back
+ ; to .v_overlap, and instead always fall-through to h+v overlap
+.hv_overlap:
+ ; we assume from the block above that bits 8-15 of r7d are zero'ed
+ mov r6d, seed
+ or seed, 0xeff4eff4
+ test seeb, seeh
+ setp r7b ; parity of top_seed
+ shr seed, 16
+ shl r7d, 16
+ test seeb, seeh
+ setp r7b ; parity of cur_seed
+ or r6d, 0x00010001
+ xor r7d, r6d
+ rorx seed, r7d, 1 ; updated (cur_seed << 16) | top_seed
+
+ DEFINE_ARGS dst, src, stride, src_bak, w, offx, offy, \
+ h, sby, see, left_offxy, top_offxy, topleft_offxy
+
+ mov topleft_offxyd, top_offxyd
+ rorx offyd, seed, 8
+ mov left_offxyd, offxd
+ rorx offxd, seed, 12
+ and offyd, 0xf000f
+ and offxd, 0xf000f
+ imul offyd, 164
+ ; offxy=offy*stride+offx, (cur_offxy << 16) | top_offxy
+ lea offxd, [offyq+offxq*2+0x10001*829+32*82]
+
+ DEFINE_ARGS dst, src, stride, src_bak, w, offxy, grain_lut, \
+ h, sby, see, left_offxy, top_offxy, topleft_offxy
+
+ mov grain_lutq, grain_lutmp
+ mov hd, hm
+ movzx top_offxyd, offxyw
+ shr offxyd, 16
+ movu ym19, [grain_lutq+offxyq-82]
+ vinserti32x8 m19, [grain_lutq+offxyq+ 0], 1
+ movd xm16, [grain_lutq+left_offxyq-50]
+ vinserti32x4 m16, [grain_lutq+left_offxyq+32], 2
+ movu ym21, [grain_lutq+top_offxyq-82]
+ vinserti32x8 m21, [grain_lutq+top_offxyq+ 0], 1
+ movd xm17, [grain_lutq+topleft_offxyq-50]
+ vinserti32x4 m17, [grain_lutq+topleft_offxyq+32], 2
+ mova ym18, [srcq+strideq*0]
+ vinserti32x8 m18, [srcq+strideq*1], 1
+ ; do h interpolation first (so top | top/left -> top, left | cur -> cur)
+ punpcklbw m16, m19
+ punpcklbw m17, m21
+ pmaddubsw m16, m10, m16
+ pmaddubsw m17, m10, m17
+ punpckhbw m20, m21, m19
+ vpmovb2m k2, m18
+ pmulhrsw m16, m9
+ pmulhrsw m17, m9
+ packsswb m19{k1}, m16, m16
+ packsswb m21{k1}, m17, m17
+ ; followed by v interpolation (top | cur -> cur)
+ punpcklbw m21, m19
+ mova m19, m0
+ pmaddubsw m20, m12, m20
+ pmaddubsw m21, m12, m21
+ punpcklbw m16, m18, m5
+ punpckhbw m17, m18, m5
+ vpermt2b m19, m18, m1 ; scaling[ 0..127]
+ vpermi2b m18, m2, m3 ; scaling[128..255]
+ pmulhrsw m20, m9
+ pmulhrsw m21, m9
+ vmovdqu8 m19{k2}, m18 ; scaling[src]
+ pshufb m19, m4
+ packsswb m21, m20
+ punpcklbw m20, m5, m21
+ punpckhbw m21, m5
+ pmaddubsw m18, m19, m20
+ pmaddubsw m19, m21
+ add grain_lutq, 82*2
+ pmulhrsw m18, m6 ; noise
+ pmulhrsw m19, m6
+ paddw m16, m18
+ paddw m17, m19
+ packuswb m16, m17
+ pmaxub m16, m7
+ pminub m16, m8
+ mova [dstq+srcq], ym16
+ add srcq, strideq
+ vextracti32x8 [dstq+srcq], m16, 1
+ add srcq, strideq
+ sub hb, 2
+ jg .loop_y_h_overlap
+ add wq, 32
+ lea srcq, [src_bakq+wq]
+ jl .hv_overlap
+.end:
+ RET
+
+%macro FGUV_FN 3 ; name, ss_hor, ss_ver
+cglobal fguv_32x32xn_i%1_8bpc, 6, 14+%2, 22, dst, src, stride, fg_data, w, \
+ scaling, grain_lut, h, sby, luma, \
+ overlap, uv_pl, is_id, _, stride3
+ lea r11, [fg_min]
+ mov r6d, [fg_dataq+FGData.scaling_shift]
+ mov r7d, [fg_dataq+FGData.clip_to_restricted_range]
+ mov r9d, is_idm
+ mov sbyd, sbym
+ mov overlapd, [fg_dataq+FGData.overlap_flag]
+%if %2
+ mov r12, 0x000f000f000f000f ; h_overlap mask
+ vpbroadcastq m10, [base+pb_23_22_0_32]
+ lea stride3q, [strideq*3]
+%else
+ mov r12, 0x0000000f0000000f
+ vpbroadcastq m10, [base+pb_27_17_17_27]
+%endif
+ mova m0, [scalingq+64*0]
+ mova m1, [scalingq+64*1]
+ mova m2, [scalingq+64*2]
+ mova m3, [scalingq+64*3]
+ kmovq k1, r12
+ vbroadcasti32x4 m4, [base+interleave_hl]
+ vpbroadcastd m6, [base+noise_rnd+r6*4-32]
+ vpbroadcastd m7, [base+fg_min+r7*4]
+ shlx r7d, r7d, r9d
+ vpbroadcastd m8, [base+fg_max+r7*4]
+ test sbyd, sbyd
+ setnz r7b
+ vpbroadcastd m9, [base+pw_1024]
+ mova m11, [base+pb_even]
+ mova m12, [base+pb_odd]
+ pxor m5, m5
+ mov r5, r10mp ; lstride
+ cmp byte [fg_dataq+FGData.chroma_scaling_from_luma], 0
+ jne .csfl
+
+%macro %%FGUV_32x32xN_LOOP 3 ; not-csfl, ss_hor, ss_ver
+ DEFINE_ARGS dst, src, stride, fg_data, w, lstride, grain_lut, \
+ h, sby, see, overlap, uv_pl, _, _, stride3
+%if %1
+ mov r6d, uv_plm
+ vpbroadcastd m16, [base+pw_8]
+ vbroadcasti32x4 m14, [fg_dataq+FGData.uv_mult+r6*4]
+ vpbroadcastw m15, [fg_dataq+FGData.uv_offset+r6*4]
+ pshufb m14, m16 ; uv_luma_mult, uv_mult
+%endif
+ test r7b, overlapb
+ jnz %%v_overlap
+
+ imul seed, sbyd, (173 << 24) | 37
+ add seed, (105 << 24) | 178
+ rorx seed, seed, 24
+ movzx seed, seew
+ xor seed, [fg_dataq+FGData.seed]
+
+ DEFINE_ARGS dst, src, stride, luma, w, lstride, grain_lut, \
+ offx, offy, see, overlap, _, _, _, stride3
+
+ mov lumaq, r9mp
+ lea r11, [srcq+wq]
+ lea r12, [dstq+wq]
+ lea r13, [lumaq+wq*(1+%2)]
+ mov r11mp, r11
+ mov r12mp, r12
+ neg wq
+
+%%loop_x:
+ rorx r6, seeq, 1
+ or seed, 0xeff4
+ test seeb, seeh
+ lea seed, [r6+0x8000]
+ cmovp seed, r6d ; updated seed
+ rorx offyd, seed, 8
+ rorx offxq, seeq, 12
+ and offyd, 0xf
+ imul offyd, 164>>%3
+ lea offyd, [offyq+offxq*(2-%2)+(3+(6>>%3))*82+3+(6>>%2)] ; offy*stride+offx
+
+ DEFINE_ARGS dst, src, stride, luma, w, lstride, grain_lut, \
+ h, offxy, see, overlap, _, _, _, stride3
+
+ mov grain_lutq, grain_lutmp
+ mov hd, hm
+%%loop_y:
+ mova ym18, [lumaq+lstrideq*(0<<%3)]
+ vinserti32x8 m18, [lumaq+lstrideq*(1<<%3)], 1
+ lea lumaq, [lumaq+lstrideq*(2<<%3)]
+%if %2
+ mova ym20, [lumaq+lstrideq*(0<<%3)]
+ vinserti32x8 m20, [lumaq+lstrideq*(1<<%3)], 1
+ lea lumaq, [lumaq+lstrideq*(2<<%3)]
+ mova xm17, [srcq+strideq*0]
+ movu xm21, [grain_lutq+offxyq+82*0]
+ vinserti128 ym17, [srcq+strideq*1], 1
+ vinserti128 ym21, [grain_lutq+offxyq+82*1], 1
+ mova m19, m11
+ vpermi2b m19, m18, m20
+ vpermt2b m18, m12, m20
+ vinserti32x4 m17, [srcq+strideq*2], 2
+ vinserti32x4 m21, [grain_lutq+offxyq+82*2], 2
+ pavgb m18, m19
+ vinserti32x4 m17, [srcq+stride3q ], 3
+ vinserti32x4 m21, [grain_lutq+offxyq+82*3], 3
+%else
+ mova ym17, [srcq+strideq*0]
+ vinserti32x8 m17, [srcq+strideq*1], 1
+ movu ym21, [grain_lutq+offxyq+82*0]
+ vinserti32x8 m21, [grain_lutq+offxyq+82*1], 1
+%endif
+ lea srcq, [srcq+strideq*(2<<%2)]
+%if %1
+ punpckhbw m19, m18, m17
+ punpcklbw m18, m17 ; { luma, chroma }
+ pmaddubsw m19, m14
+ pmaddubsw m18, m14
+ psraw m19, 6
+ psraw m18, 6
+ paddw m19, m15
+ paddw m18, m15
+ packuswb m18, m19
+%endif
+ mova m19, m0
+ vpmovb2m k2, m18
+ vpermt2b m19, m18, m1 ; scaling[ 0..127]
+ vpermi2b m18, m2, m3 ; scaling[128..255]
+ punpcklbw m20, m5, m21 ; grain
+ punpckhbw m21, m5
+ vmovdqu8 m19{k2}, m18 ; scaling[src]
+ pshufb m19, m4
+ pmaddubsw m18, m19, m20
+ pmaddubsw m19, m21
+ add grain_lutq, 82*2<<%2
+ pmulhrsw m18, m6 ; noise
+ pmulhrsw m19, m6
+ punpcklbw m16, m17, m5 ; chroma
+ punpckhbw m17, m5
+ paddw m16, m18
+ paddw m17, m19
+ packuswb m16, m17
+ pmaxub m16, m7
+ pminub m16, m8
+%if %2
+ mova [dstq+strideq*0], xm16
+ vextracti128 [dstq+strideq*1], ym16, 1
+ vextracti32x4 [dstq+strideq*2], m16, 2
+ vextracti32x4 [dstq+stride3q ], m16, 3
+%else
+ mova [dstq+strideq*0], ym16
+ vextracti32x8 [dstq+strideq*1], m16, 1
+%endif
+ lea dstq, [dstq+strideq*(2<<%2)]
+ sub hb, 2<<%2
+ jg %%loop_y
+ add wq, 32>>%2
+ jge %%end
+ mov srcq, r11mp
+ mov dstq, r12mp
+ lea lumaq, [r13+wq*(1<<%2)]
+ add srcq, wq
+ add dstq, wq
+ test overlapd, overlapd
+ jz %%loop_x
+ cmp dword r8m, 0 ; sby
+ jne %%hv_overlap
+
+ ; horizontal overlap (without vertical overlap)
+%%loop_x_h_overlap:
+ rorx r6, seeq, 1
+ or seed, 0xeff4
+ test seeb, seeh
+ lea seed, [r6+0x8000]
+ cmovp seed, r6d ; updated seed
+
+ DEFINE_ARGS dst, src, stride, luma, w, lstride, grain_lut, \
+ offx, offy, see, left_offxy, _, _, _, stride3
+
+ lea left_offxyd, [offyq+(32>>%2)] ; previous column's offy*stride+offx
+ rorx offyd, seed, 8
+ rorx offxq, seeq, 12
+ and offyd, 0xf
+ imul offyd, 164>>%3
+ lea offyd, [offyq+offxq*(2-%2)+(3+(6>>%3))*82+3+(6>>%2)] ; offy*stride+offx
+
+ DEFINE_ARGS dst, src, stride, luma, w, lstride, grain_lut, \
+ h, offxy, see, left_offxy, _, _, _, stride3
+
+ mov grain_lutq, grain_lutmp
+ mov hd, hm
+%%loop_y_h_overlap:
+ ; src
+%if %2
+ mova ym18, [lumaq+lstrideq*(0<<%3)]
+ vinserti32x8 m18, [lumaq+lstrideq*(1<<%3)], 1
+ lea lumaq, [lumaq+lstrideq*(2<<%3)]
+ mova ym20, [lumaq+lstrideq*(0<<%3)]
+ vinserti32x8 m20, [lumaq+lstrideq*(1<<%3)], 1
+ lea lumaq, [lumaq+lstrideq*(2<<%3)]
+ mova xm17, [srcq+strideq*0]
+ vinserti128 ym17, [srcq+strideq*1], 1
+ mova m19, m11
+ vpermi2b m19, m18, m20
+ vpermt2b m18, m12, m20
+ vinserti32x4 m17, [srcq+strideq*2], 2
+ pavgb m18, m19
+ vinserti32x4 m17, [srcq+stride3q ], 3
+%else
+ mova ym18, [lumaq+lstrideq*0]
+ vinserti32x8 m18, [lumaq+lstrideq*1], 1
+ mova ym17, [srcq+strideq*0]
+ vinserti32x8 m17, [srcq+strideq*1], 1
+ lea lumaq, [lumaq+lstrideq*2]
+%endif
+ lea srcq, [srcq+strideq*(2<<%2)]
+%if %1
+ punpckhbw m19, m18, m17
+ punpcklbw m18, m17 ; { luma, chroma }
+ pmaddubsw m19, m14
+ pmaddubsw m18, m14
+ psraw m19, 6
+ psraw m18, 6
+ paddw m19, m15
+ paddw m18, m15
+ packuswb m18, m19
+%endif
+ mova m19, m0
+ vpmovb2m k2, m18
+ vpermt2b m19, m18, m1 ; scaling[ 0..127]
+ vpermi2b m18, m2, m3 ; scaling[128..255]
+ vmovdqu8 m19{k2}, m18 ; scaling[src]
+%if %2
+ movu xm20, [grain_lutq+offxyq +82*0]
+ movd xm18, [grain_lutq+left_offxyq+82*0]
+ vinserti32x4 ym20, [grain_lutq+offxyq +82*1], 1
+ vinserti32x4 ym18, [grain_lutq+left_offxyq+82*1], 1
+ vinserti32x4 m20, [grain_lutq+offxyq +82*2], 2
+ vinserti32x4 m18, [grain_lutq+left_offxyq+82*2], 2
+ vinserti32x4 m20, [grain_lutq+offxyq +82*3], 3
+ vinserti32x4 m18, [grain_lutq+left_offxyq+82*3], 3
+%else
+ movu ym20, [grain_lutq+offxyq + 0]
+ movd xm18, [grain_lutq+left_offxyq+ 0]
+ vinserti32x8 m20, [grain_lutq+offxyq +82], 1
+ vinserti32x4 m18, [grain_lutq+left_offxyq+82], 2
+%endif
+ punpcklbw m18, m20
+ pmaddubsw m18, m10, m18
+ punpckhbw m21, m20, m5
+ pshufb m19, m4
+ pmulhrsw m18, m9
+ vpacksswb m20{k1}, m18, m18
+ punpcklbw m20, m5, m20
+ pmaddubsw m18, m19, m20
+ pmaddubsw m19, m21
+ add grain_lutq, 82*2<<%2
+ pmulhrsw m18, m6 ; noise
+ pmulhrsw m19, m6
+ punpcklbw m16, m17, m5 ; chroma
+ punpckhbw m17, m5
+ paddw m16, m18
+ paddw m17, m19
+ packuswb m16, m17
+ pmaxub m16, m7
+ pminub m16, m8
+%if %2
+ mova [dstq+strideq*0], xm16
+ vextracti128 [dstq+strideq*1], ym16, 1
+ vextracti32x4 [dstq+strideq*2], m16, 2
+ vextracti32x4 [dstq+stride3q ], m16, 3
+%else
+ mova [dstq+strideq*0], ym16
+ vextracti32x8 [dstq+strideq*1], m16, 1
+%endif
+ lea dstq, [dstq+strideq*(2<<%2)]
+ sub hb, 2<<%2
+ jg %%loop_y_h_overlap
+ add wq, 32>>%2
+ jge %%end
+ mov srcq, r11mp
+ mov dstq, r12mp
+ lea lumaq, [r13+wq*(1<<%2)]
+ add srcq, wq
+ add dstq, wq
+ cmp dword r8m, 0 ; sby
+ jne %%hv_overlap
+ jmp %%loop_x_h_overlap
+
+%%v_overlap:
+ DEFINE_ARGS dst, src, stride, fg_data, w, lstride, grain_lut, \
+ _, sby, see, overlap, _, _, _, stride3
+
+ movzx sbyd, sbyb
+ imul seed, [fg_dataq+FGData.seed], 0x00010001
+ imul r7d, sbyd, 173 * 0x00010001
+ imul sbyd, 37 * 0x01000100
+ add r7d, (105 << 16) | 188
+ add sbyd, (178 << 24) | (141 << 8)
+ and r7d, 0x00ff00ff
+ and sbyd, 0xff00ff00
+ xor seed, r7d
+ xor seed, sbyd ; (cur_seed << 16) | top_seed
+
+%if %3
+ vpbroadcastd m13, [base+pb_23_22]
+ kxnorw k3, k3, k3 ; v_overlap mask
+%elif %2
+ vbroadcasti32x8 m13, [base+pb_27_17]
+ kxnord k3, k3, k3
+ pshufd m13, m13, q0000 ; 8x27_17, 8x17_27
+%else
+ vpbroadcastd ym16, [base+pb_27_17]
+ vpbroadcastd m13, [base+pb_17_27]
+ vmovdqa64 m13{k1}, m16
+%endif
+
+ DEFINE_ARGS dst, src, stride, luma, w, lstride, grain_lut, \
+ offx, offy, see, overlap, top_offxy, _, _, stride3
+
+ mov lumaq, r9mp
+ lea r11, [srcq+wq]
+ lea r12, [dstq+wq]
+ lea r13, [lumaq+wq*(1<<%2)]
+ mov r11mp, r11
+ mov r12mp, r12
+ neg wq
+
+ ; we assume from the block above that bits 8-15 of r7d are zero'ed
+ mov r6d, seed
+ or seed, 0xeff4eff4
+ test seeb, seeh
+ setp r7b ; parity of top_seed
+ shr seed, 16
+ shl r7d, 16
+ test seeb, seeh
+ setp r7b ; parity of cur_seed
+ or r6d, 0x00010001
+ xor r7d, r6d
+ rorx seed, r7d, 1 ; updated (cur_seed << 16) | top_seed
+ rorx offyd, seed, 8
+ rorx offxd, seed, 12
+ and offyd, 0x000f000f
+ and offxd, 0x000f000f
+ imul offyd, 164>>%3
+ ; offxy=offy*stride+offx, (cur_offxy << 16) | top_offxy
+ lea offyd, [offyq+offxq*(2-%2)+0x10001*((3+(6>>%3))*82+3+(6>>%2))+(32>>%3)*82]
+
+ DEFINE_ARGS dst, src, stride, luma, w, lstride, grain_lut, \
+ h, offxy, see, overlap, top_offxy, _, _, stride3
+
+ mov grain_lutq, grain_lutmp
+ mov hd, hm
+ movzx top_offxyd, offxyw
+ shr offxyd, 16
+
+%if %2
+ mova ym18, [lumaq+lstrideq*(0<<%3)]
+ vinserti32x8 m18, [lumaq+lstrideq*(1<<%3)], 1
+ lea lumaq, [lumaq+lstrideq*(2<<%3)]
+ mova ym20, [lumaq+lstrideq*(0<<%3)]
+ vinserti32x8 m20, [lumaq+lstrideq*(1<<%3)], 1
+ lea lumaq, [lumaq+lstrideq*(2<<%3)]
+ mova xm17, [srcq+strideq*0]
+ vinserti128 ym17, [srcq+strideq*1], 1
+ mova m19, m11
+ vpermi2b m19, m18, m20
+ vpermt2b m18, m12, m20
+ vinserti32x4 m17, [srcq+strideq*2], 2
+ pavgb m18, m19
+ vinserti32x4 m17, [srcq+stride3q ], 3
+%else
+ mova ym18, [lumaq+lstrideq*0]
+ vinserti32x8 m18, [lumaq+lstrideq*1], 1
+ mova ym17, [srcq+strideq*0]
+ vinserti32x8 m17, [srcq+strideq*1], 1
+ lea lumaq, [lumaq+lstrideq*2]
+%endif
+ lea srcq, [srcq+strideq*(2<<%2)]
+%if %1
+ punpckhbw m19, m18, m17
+ punpcklbw m18, m17 ; { luma, chroma }
+ pmaddubsw m19, m14
+ pmaddubsw m18, m14
+ psraw m19, 6
+ psraw m18, 6
+ paddw m19, m15
+ paddw m18, m15
+ packuswb m18, m19
+%endif
+ mova m19, m0
+ vpmovb2m k2, m18
+ vpermt2b m19, m18, m1 ; scaling[ 0..127]
+ vpermi2b m18, m2, m3 ; scaling[128..255]
+%if %3
+ movu xm21, [grain_lutq+offxyq+82*0]
+ movu xm16, [grain_lutq+top_offxyq+82*0]
+ punpcklbw xm20, xm16, xm21
+ punpckhbw xm16, xm21
+ pmaddubsw xm20, xm13, xm20
+ pmaddubsw xm16, xm13, xm16
+ ; only interpolate first line, insert remaining line unmodified
+ vbroadcasti128 ym21, [grain_lutq+offxyq+82*1]
+ vinserti32x4 m21, [grain_lutq+offxyq+82*2], 2
+ vinserti32x4 m21, [grain_lutq+offxyq+82*3], 3
+ pmulhrsw xm20, xm9
+ pmulhrsw xm16, xm9
+ vpacksswb m21{k3}, m20, m16
+%elif %2
+ movu xm21, [grain_lutq+offxyq+82*0]
+ vinserti128 ym21, [grain_lutq+offxyq+82*1], 1
+ movu xm16, [grain_lutq+top_offxyq+82*0]
+ vinserti32x4 ym16, [grain_lutq+top_offxyq+82*1], 1
+ punpcklbw ym20, ym16, ym21
+ punpckhbw ym16, ym21
+ pmaddubsw ym20, ym13, ym20
+ pmaddubsw ym16, ym13, ym16
+ vbroadcasti32x4 m21, [grain_lutq+offxyq+82*2]
+ vinserti32x4 m21, [grain_lutq+offxyq+82*3], 3
+ pmulhrsw ym20, ym9
+ pmulhrsw ym16, ym9
+ packsswb m21{k3}, m20, m16
+%else
+ movu ym16, [grain_lutq+offxyq+82*0]
+ vinserti32x8 m16, [grain_lutq+offxyq+82*1], 1
+ movu ym20, [grain_lutq+top_offxyq+82*0]
+ vinserti32x8 m20, [grain_lutq+top_offxyq+82*1], 1
+ punpcklbw m21, m20, m16
+ punpckhbw m20, m16
+ pmaddubsw m21, m13, m21
+ pmaddubsw m20, m13, m20
+ pmulhrsw m21, m9
+ pmulhrsw m20, m9
+ packsswb m21, m20
+%endif
+ vmovdqu8 m19{k2}, m18 ; scaling[src]
+ pshufb m19, m4
+ punpcklbw m20, m5, m21
+ punpckhbw m21, m5
+ pmaddubsw m18, m19, m20
+ pmaddubsw m19, m21
+ add grain_lutq, 82*2<<%2
+ pmulhrsw m18, m6 ; noise
+ pmulhrsw m19, m6
+ punpcklbw m16, m17, m5 ; chroma
+ punpckhbw m17, m5
+ paddw m16, m18
+ paddw m17, m19
+ packuswb m16, m17
+ pmaxub m16, m7
+ pminub m16, m8
+%if %2
+ mova [dstq+strideq*0], xm16
+ vextracti128 [dstq+strideq*1], ym16, 1
+ vextracti32x4 [dstq+strideq*2], m16, 2
+ vextracti32x4 [dstq+stride3q ], m16, 3
+%else
+ mova [dstq+strideq*0], ym16
+ vextracti32x8 [dstq+strideq*1], m16, 1
+%endif
+ lea dstq, [dstq+strideq*(2<<%2)]
+ sub hb, 2<<%2
+ jg %%loop_y
+ add wq, 32>>%2
+ jge %%end
+ mov srcq, r11mp
+ mov dstq, r12mp
+ lea lumaq, [r13+wq*(1<<%2)]
+ add srcq, wq
+ add dstq, wq
+
+%%hv_overlap:
+ ; we assume from the block above that bits 8-15 of r7d are zero'ed
+ mov r6d, seed
+ or seed, 0xeff4eff4
+ test seeb, seeh
+ setp r7b ; parity of top_seed
+ shr seed, 16
+ shl r7d, 16
+ test seeb, seeh
+ setp r7b ; parity of cur_seed
+ or r6d, 0x00010001
+ xor r7d, r6d
+ rorx seed, r7d, 1 ; updated (cur_seed << 16) | top_seed
+
+ DEFINE_ARGS dst, src, stride, luma, w, lstride, grain_lut, \
+ offx, offy, see, left_offxy, top_offxy, topleft_offxy, _, stride3
+
+ lea topleft_offxyd, [top_offxyq+(32>>%2)]
+ lea left_offxyd, [offyq+(32>>%2)]
+ rorx offyd, seed, 8
+ rorx offxd, seed, 12
+ and offyd, 0x000f000f
+ and offxd, 0x000f000f
+ imul offyd, 164>>%3
+ ; offxy=offy*stride+offx, (cur_offxy << 16) | top_offxy
+ lea offyd, [offyq+offxq*(2-%2)+0x10001*((3+(6>>%3))*82+3+(6>>%2))+(32>>%3)*82]
+
+ DEFINE_ARGS dst, src, stride, luma, w, lstride, grain_lut, \
+ h, offxy, see, left_offxy, top_offxy, topleft_offxy, _, stride3
+
+ mov grain_lutq, grain_lutmp
+ mov hd, hm
+ movzx top_offxyd, offxyw
+ shr offxyd, 16
+
+%if %2
+ movu xm21, [grain_lutq+offxyq+82*0]
+ movd xm16, [grain_lutq+left_offxyq+82*0]
+ vinserti128 ym21, [grain_lutq+offxyq+82*1], 1
+ vinserti128 ym16, [grain_lutq+left_offxyq+82*1], 1
+ vinserti32x4 m21, [grain_lutq+offxyq+82*2], 2
+ vinserti32x4 m16, [grain_lutq+left_offxyq+82*2], 2
+ vinserti32x4 m21, [grain_lutq+offxyq+82*3], 3
+ vinserti32x4 m16, [grain_lutq+left_offxyq+82*3], 3
+ movd xm18, [grain_lutq+topleft_offxyq+82*0]
+ movu xm20, [grain_lutq+top_offxyq]
+ ; do h interpolation first (so top | top/left -> top, left | cur -> cur)
+ punpcklbw m16, m21
+%if %3
+ punpcklbw xm18, xm20
+%else
+ vinserti128 ym18, [grain_lutq+topleft_offxyq+82*1], 1
+ vinserti128 ym20, [grain_lutq+top_offxyq+82*1], 1
+ punpcklbw ym18, ym20
+%endif
+ punpcklqdq m16, m18
+ pmaddubsw m16, m10, m16
+ pmulhrsw m16, m9
+ packsswb m16, m16
+%if %3
+ vpalignr xm20{k1}, xm16, xm16, 4
+%else
+ vpalignr ym20{k1}, ym16, ym16, 4
+%endif
+ vmovdqu8 m21{k1}, m16
+%else
+ movu ym21, [grain_lutq+offxyq+82*0]
+ vinserti32x8 m21, [grain_lutq+offxyq+82*1], 1
+ movd xm16, [grain_lutq+left_offxyq+82*0]
+ vinserti32x4 m16, [grain_lutq+left_offxyq+82*1], 2
+ movu ym20, [grain_lutq+top_offxyq+82*0]
+ vinserti32x8 m20, [grain_lutq+top_offxyq+82*1], 1
+ movd xm18, [grain_lutq+topleft_offxyq+82*0]
+ vinserti32x4 m18, [grain_lutq+topleft_offxyq+82*1], 2
+ punpcklbw m16, m21
+ punpcklbw m18, m20
+ punpcklqdq m16, m18
+ pmaddubsw m16, m10, m16
+ pmulhrsw m16, m9
+ packsswb m16, m16
+ vpalignr m20{k1}, m16, m16, 4
+ vmovdqu8 m21{k1}, m16
+%endif
+%if %2
+ mova ym18, [lumaq+lstrideq*(0<<%3)]
+ vinserti32x8 m18, [lumaq+lstrideq*(1<<%3)], 1
+ lea lumaq, [lumaq+lstrideq*(2<<%3)]
+ mova ym16, [lumaq+lstrideq*(0<<%3)]
+ vinserti32x8 m16, [lumaq+lstrideq*(1<<%3)], 1
+ lea lumaq, [lumaq+lstrideq*(2<<%3)]
+ mova xm17, [srcq+strideq*0]
+ vinserti128 ym17, [srcq+strideq*1], 1
+ mova m19, m11
+ vpermi2b m19, m18, m16
+ vpermt2b m18, m12, m16
+ vinserti32x4 m17, [srcq+strideq*2], 2
+ pavgb m18, m19
+ vinserti32x4 m17, [srcq+stride3q ], 3
+%else
+ mova ym18, [lumaq+lstrideq*0]
+ vinserti32x8 m18, [lumaq+lstrideq*1], 1
+ mova ym17, [srcq+strideq*0]
+ vinserti32x8 m17, [srcq+strideq*1], 1
+ lea lumaq, [lumaq+lstrideq*2]
+%endif
+ lea srcq, [srcq+strideq*(2<<%2)]
+%if %1
+ punpckhbw m19, m18, m17
+ punpcklbw m18, m17 ; { luma, chroma }
+ pmaddubsw m19, m14
+ pmaddubsw m18, m14
+ psraw m19, 6
+ psraw m18, 6
+ paddw m19, m15
+ paddw m18, m15
+ packuswb m18, m19
+%endif
+ mova m19, m0
+ vpmovb2m k2, m18
+ vpermt2b m19, m18, m1 ; scaling[ 0..127]
+ vpermi2b m18, m2, m3 ; scaling[128..255]
+ ; followed by v interpolation (top | cur -> cur)
+%if %3
+ punpcklbw xm16, xm20, xm21
+ punpckhbw xm20, xm21
+ pmaddubsw xm16, xm13, xm16
+ pmaddubsw xm20, xm13, xm20
+ pmulhrsw xm16, xm9
+ pmulhrsw xm20, xm9
+ vpacksswb m21{k3}, m16, m20
+%elif %2
+ punpcklbw ym16, ym20, ym21
+ punpckhbw ym20, ym21
+ pmaddubsw ym16, ym13, ym16
+ pmaddubsw ym20, ym13, ym20
+ pmulhrsw ym16, ym9
+ pmulhrsw ym20, ym9
+ vpacksswb m21{k3}, m16, m20
+%else
+ punpcklbw m16, m20, m21
+ punpckhbw m20, m21
+ pmaddubsw m16, m13, m16
+ pmaddubsw m20, m13, m20
+ pmulhrsw m16, m9
+ pmulhrsw m20, m9
+ packsswb m21, m16, m20
+%endif
+ vmovdqu8 m19{k2}, m18 ; scaling[src]
+ pshufb m19, m4
+ punpcklbw m20, m5, m21
+ punpckhbw m21, m5
+ pmaddubsw m18, m19, m20
+ pmaddubsw m19, m21
+ add grain_lutq, 82*2<<%2
+ pmulhrsw m18, m6 ; grain
+ pmulhrsw m19, m6
+ punpcklbw m16, m17, m5 ; chroma
+ punpckhbw m17, m5
+ paddw m16, m18
+ paddw m17, m19
+ packuswb m16, m17
+ pmaxub m16, m7
+ pminub m16, m8
+%if %2
+ mova [dstq+strideq*0], xm16
+ vextracti128 [dstq+strideq*1], ym16, 1
+ vextracti32x4 [dstq+strideq*2], m16, 2
+ vextracti32x4 [dstq+stride3q ], m16, 3
+%else
+ mova [dstq+strideq*0], ym16
+ vextracti32x8 [dstq+strideq*1], m16, 1
+%endif
+ lea dstq, [dstq+strideq*(2<<%2)]
+ sub hb, 2<<%2
+ jg %%loop_y_h_overlap
+ add wq, 32>>%2
+ jge %%end
+ mov srcq, r11mp
+ mov dstq, r12mp
+ lea lumaq, [r13+wq*(1<<%2)]
+ add srcq, wq
+ add dstq, wq
+ jmp %%hv_overlap
+%%end:
+ RET
+%endmacro
+
+ %%FGUV_32x32xN_LOOP 1, %2, %3
+.csfl:
+ %%FGUV_32x32xN_LOOP 0, %2, %3
+%endmacro
+
+FGUV_FN 420, 1, 1
+FGUV_FN 422, 1, 0
+FGUV_FN 444, 0, 0
+
+%endif ; ARCH_X86_64
diff --git a/src/x86/filmgrain_common.asm b/src/x86/filmgrain_common.asm
new file mode 100644
index 0000000..74f7044
--- /dev/null
+++ b/src/x86/filmgrain_common.asm
@@ -0,0 +1,46 @@
+; Copyright © 2019-2022, VideoLAN and dav1d authors
+; Copyright © 2019-2022, Two Orioles, LLC
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; 1. Redistributions of source code must retain the above copyright notice, this
+; list of conditions and the following disclaimer.
+;
+; 2. Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+; ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+; WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+; ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+; (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+; ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+struc FGData
+ .seed: resd 1
+ .num_y_points: resd 1
+ .y_points: resb 14 * 2
+ .chroma_scaling_from_luma: resd 1
+ .num_uv_points: resd 2
+ .uv_points: resb 2 * 10 * 2
+ .scaling_shift: resd 1
+ .ar_coeff_lag: resd 1
+ .ar_coeffs_y: resb 24
+ .ar_coeffs_uv: resb 2 * 28 ; includes padding
+ .ar_coeff_shift: resq 1
+ .grain_scale_shift: resd 1
+ .uv_mult: resd 2
+ .uv_luma_mult: resd 2
+ .uv_offset: resd 2
+ .overlap_flag: resd 1
+ .clip_to_restricted_range: resd 1
+endstruc
+
+cextern gaussian_sequence
diff --git a/src/x86/filmgrain_init_tmpl.c b/src/x86/filmgrain_init_tmpl.c
index 95a5e97..0b783d1 100644
--- a/src/x86/filmgrain_init_tmpl.c
+++ b/src/x86/filmgrain_init_tmpl.c
@@ -1,6 +1,6 @@
/*
- * Copyright © 2018-2021, VideoLAN and dav1d authors
- * Copyright © 2018, Two Orioles, LLC
+ * Copyright © 2018-2022, VideoLAN and dav1d authors
+ * Copyright © 2018-2022, Two Orioles, LLC
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -28,23 +28,19 @@
#include "src/cpu.h"
#include "src/filmgrain.h"
-decl_generate_grain_y_fn(BF(dav1d_generate_grain_y, ssse3));
-decl_generate_grain_uv_fn(BF(dav1d_generate_grain_uv_420, ssse3));
-decl_generate_grain_uv_fn(BF(dav1d_generate_grain_uv_422, ssse3));
-decl_generate_grain_uv_fn(BF(dav1d_generate_grain_uv_444, ssse3));
-decl_fgy_32x32xn_fn(BF(dav1d_fgy_32x32xn, ssse3));
-decl_fguv_32x32xn_fn(BF(dav1d_fguv_32x32xn_i420, ssse3));
-decl_fguv_32x32xn_fn(BF(dav1d_fguv_32x32xn_i422, ssse3));
-decl_fguv_32x32xn_fn(BF(dav1d_fguv_32x32xn_i444, ssse3));
+#define decl_fg_fns(ext) \
+decl_generate_grain_y_fn(BF(dav1d_generate_grain_y, ext)); \
+decl_generate_grain_uv_fn(BF(dav1d_generate_grain_uv_420, ext)); \
+decl_generate_grain_uv_fn(BF(dav1d_generate_grain_uv_422, ext)); \
+decl_generate_grain_uv_fn(BF(dav1d_generate_grain_uv_444, ext)); \
+decl_fgy_32x32xn_fn(BF(dav1d_fgy_32x32xn, ext)); \
+decl_fguv_32x32xn_fn(BF(dav1d_fguv_32x32xn_i420, ext)); \
+decl_fguv_32x32xn_fn(BF(dav1d_fguv_32x32xn_i422, ext)); \
+decl_fguv_32x32xn_fn(BF(dav1d_fguv_32x32xn_i444, ext))
-decl_generate_grain_y_fn(BF(dav1d_generate_grain_y, avx2));
-decl_generate_grain_uv_fn(BF(dav1d_generate_grain_uv_420, avx2));
-decl_generate_grain_uv_fn(BF(dav1d_generate_grain_uv_422, avx2));
-decl_generate_grain_uv_fn(BF(dav1d_generate_grain_uv_444, avx2));
-decl_fgy_32x32xn_fn(BF(dav1d_fgy_32x32xn, avx2));
-decl_fguv_32x32xn_fn(BF(dav1d_fguv_32x32xn_i420, avx2));
-decl_fguv_32x32xn_fn(BF(dav1d_fguv_32x32xn_i422, avx2));
-decl_fguv_32x32xn_fn(BF(dav1d_fguv_32x32xn_i444, avx2));
+decl_fg_fns(ssse3);
+decl_fg_fns(avx2);
+decl_fg_fns(avx512icl);
COLD void bitfn(dav1d_film_grain_dsp_init_x86)(Dav1dFilmGrainDSPContext *const c) {
const unsigned flags = dav1d_get_cpu_flags();
@@ -68,11 +64,20 @@ COLD void bitfn(dav1d_film_grain_dsp_init_x86)(Dav1dFilmGrainDSPContext *const c
c->generate_grain_uv[DAV1D_PIXEL_LAYOUT_I422 - 1] = BF(dav1d_generate_grain_uv_422, avx2);
c->generate_grain_uv[DAV1D_PIXEL_LAYOUT_I444 - 1] = BF(dav1d_generate_grain_uv_444, avx2);
- if (flags & DAV1D_X86_CPU_FLAG_SLOW_GATHER) return;
+ if (!(flags & DAV1D_X86_CPU_FLAG_SLOW_GATHER)) {
+ c->fgy_32x32xn = BF(dav1d_fgy_32x32xn, avx2);
+ c->fguv_32x32xn[DAV1D_PIXEL_LAYOUT_I420 - 1] = BF(dav1d_fguv_32x32xn_i420, avx2);
+ c->fguv_32x32xn[DAV1D_PIXEL_LAYOUT_I422 - 1] = BF(dav1d_fguv_32x32xn_i422, avx2);
+ c->fguv_32x32xn[DAV1D_PIXEL_LAYOUT_I444 - 1] = BF(dav1d_fguv_32x32xn_i444, avx2);
+ }
- c->fgy_32x32xn = BF(dav1d_fgy_32x32xn, avx2);
- c->fguv_32x32xn[DAV1D_PIXEL_LAYOUT_I420 - 1] = BF(dav1d_fguv_32x32xn_i420, avx2);
- c->fguv_32x32xn[DAV1D_PIXEL_LAYOUT_I422 - 1] = BF(dav1d_fguv_32x32xn_i422, avx2);
- c->fguv_32x32xn[DAV1D_PIXEL_LAYOUT_I444 - 1] = BF(dav1d_fguv_32x32xn_i444, avx2);
+#if BITDEPTH == 8
+ if (!(flags & DAV1D_X86_CPU_FLAG_AVX512ICL)) return;
+
+ c->fgy_32x32xn = BF(dav1d_fgy_32x32xn, avx512icl);
+ c->fguv_32x32xn[DAV1D_PIXEL_LAYOUT_I420 - 1] = BF(dav1d_fguv_32x32xn_i420, avx512icl);
+ c->fguv_32x32xn[DAV1D_PIXEL_LAYOUT_I422 - 1] = BF(dav1d_fguv_32x32xn_i422, avx512icl);
+ c->fguv_32x32xn[DAV1D_PIXEL_LAYOUT_I444 - 1] = BF(dav1d_fguv_32x32xn_i444, avx512icl);
+#endif
#endif
}
diff --git a/src/x86/filmgrain_sse.asm b/src/x86/filmgrain_sse.asm
index 2a16722..0172f98 100644
--- a/src/x86/filmgrain_sse.asm
+++ b/src/x86/filmgrain_sse.asm
@@ -25,6 +25,7 @@
%include "config.asm"
%include "ext/x86/x86inc.asm"
+%include "x86/filmgrain_common.asm"
SECTION_RODATA
@@ -66,28 +67,6 @@ JMP_TABLE generate_grain_uv_420, ssse3, 0, 1, 2, 3
JMP_TABLE generate_grain_uv_422, ssse3, 0, 1, 2, 3
JMP_TABLE generate_grain_uv_444, ssse3, 0, 1, 2, 3
-struc FGData
- .seed: resd 1
- .num_y_points: resd 1
- .y_points: resb 14 * 2
- .chroma_scaling_from_luma: resd 1
- .num_uv_points: resd 2
- .uv_points: resb 2 * 10 * 2
- .scaling_shift: resd 1
- .ar_coeff_lag: resd 1
- .ar_coeffs_y: resb 24
- .ar_coeffs_uv: resb 2 * 28 ; includes padding
- .ar_coeff_shift: resq 1
- .grain_scale_shift: resd 1
- .uv_mult: resd 2
- .uv_luma_mult: resd 2
- .uv_offset: resd 2
- .overlap_flag: resd 1
- .clip_to_restricted_range: resd 1
-endstruc
-
-cextern gaussian_sequence
-
SECTION .text
%if ARCH_X86_32
diff --git a/tests/checkasm/filmgrain.c b/tests/checkasm/filmgrain.c
index 305a4f7..ff7ffc3 100644
--- a/tests/checkasm/filmgrain.c
+++ b/tests/checkasm/filmgrain.c
@@ -155,6 +155,7 @@ static void check_fgy_sbrow(const Dav1dFilmGrainDSPContext *const dsp) {
if (check_func(dsp->fgy_32x32xn, "fgy_32x32xn_%dbpc", BITDEPTH)) {
ALIGN_STK_16(Dav1dFilmGrainData, fg_data, 16,);
+ ALIGN_STK_64(uint8_t, scaling, SCALING_SIZE,);
fg_data[0].seed = rnd() & 0xFFFF;
#if BITDEPTH == 16
@@ -163,7 +164,6 @@ static void check_fgy_sbrow(const Dav1dFilmGrainDSPContext *const dsp) {
const int bitdepth_max = 0xff;
#endif
- uint8_t scaling[SCALING_SIZE];
entry grain_lut[GRAIN_HEIGHT + 1][GRAIN_WIDTH];
fg_data[0].grain_scale_shift = rnd() & 3;
fg_data[0].ar_coeff_shift = (rnd() & 3) + 6;
@@ -267,6 +267,7 @@ static void check_fguv_sbrow(const Dav1dFilmGrainDSPContext *const dsp) {
BITDEPTH, ss_name[layout_idx], csfl))
{
ALIGN_STK_16(Dav1dFilmGrainData, fg_data, 1,);
+ ALIGN_STK_64(uint8_t, scaling, SCALING_SIZE,);
fg_data[0].seed = rnd() & 0xFFFF;
@@ -278,7 +279,6 @@ static void check_fguv_sbrow(const Dav1dFilmGrainDSPContext *const dsp) {
const int uv_pl = rnd() & 1;
const int is_identity = rnd() & 1;
- uint8_t scaling[SCALING_SIZE];
entry grain_lut[2][GRAIN_HEIGHT + 1][GRAIN_WIDTH];
fg_data[0].grain_scale_shift = rnd() & 3;
fg_data[0].ar_coeff_shift = (rnd() & 3) + 6;
@@ -368,7 +368,7 @@ static void check_fguv_sbrow(const Dav1dFilmGrainDSPContext *const dsp) {
checkasm_check_pixel_padded_align(c_dst, stride,
a_dst, stride,
w, h, "dst",
- 32 >> ss_x, 2);
+ 32 >> ss_x, 4);
}
}
@@ -380,7 +380,7 @@ static void check_fguv_sbrow(const Dav1dFilmGrainDSPContext *const dsp) {
luma_src[y * PXSTRIDE(lstride) + x] &= bitdepth_max;
}
}
- bench_new(a_dst, src, stride, fg_data, 32, scaling, grain_lut[1], 16,
+ bench_new(a_dst, src, stride, fg_data, 64 >> ss_x, scaling, grain_lut[1], 32 >> ss_y,
1, luma_src, lstride, uv_pl, is_identity HIGHBD_TAIL_SUFFIX);
}
}