Welcome to mirror list, hosted at ThFree Co, Russian Federation.

cdef.asm « x86 « src - github.com/videolan/dav1d.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 35283d704e7170516e8c37ff2ad41b95119af1fb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
; Copyright © 2018, VideoLAN and dav1d authors
; Copyright © 2018, Two Orioles, LLC
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions are met:
;
; 1. Redistributions of source code must retain the above copyright notice, this
;    list of conditions and the following disclaimer.
;
; 2. Redistributions in binary form must reproduce the above copyright notice,
;    this list of conditions and the following disclaimer in the documentation
;    and/or other materials provided with the distribution.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
; ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
; WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
; ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
; (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
; ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

%include "config.asm"
%include "ext/x86/x86inc.asm"

%if ARCH_X86_64

SECTION_RODATA 32
pd_02564713: dd 0, 2, 5, 6, 4, 7, 1, 3
pd_47130256: dd 4, 7, 1, 3, 0, 2, 5, 6
div_table: dd 840, 420, 280, 210, 168, 140, 120, 105
           dd 420, 210, 140, 105
shufw_6543210x: db 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1, 14, 15
shufw_210xxxxx: db 4, 5, 2, 3, 0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
pw_128: times 2 dw 128
pw_2048: times 2 dw 2048
tap_table: dw 4, 2, 3, 3, 2, 1
           db -1 * 16 + 1, -2 * 16 + 2
           db  0 * 16 + 1, -1 * 16 + 2
           db  0 * 16 + 1,  0 * 16 + 2
           db  0 * 16 + 1,  1 * 16 + 2
           db  1 * 16 + 1,  2 * 16 + 2
           db  1 * 16 + 0,  2 * 16 + 1
           db  1 * 16 + 0,  2 * 16 + 0
           db  1 * 16 + 0,  2 * 16 - 1
           ; the last 6 are repeats of the first 6 so we don't need to & 7
           db -1 * 16 + 1, -2 * 16 + 2
           db  0 * 16 + 1, -1 * 16 + 2
           db  0 * 16 + 1,  0 * 16 + 2
           db  0 * 16 + 1,  1 * 16 + 2
           db  1 * 16 + 1,  2 * 16 + 2
           db  1 * 16 + 0,  2 * 16 + 1

SECTION .text

%macro ACCUMULATE_TAP 6 ; tap_offset, shift, strength, mul_tap, w, stride
    ; load p0/p1
    movsx         offq, byte [dirq+kq+%1]       ; off1
%if %5 == 4
    movq           xm5, [stkq+offq*2+%6*0]      ; p0
    movq           xm6, [stkq+offq*2+%6*2]
    movhps         xm5, [stkq+offq*2+%6*1]
    movhps         xm6, [stkq+offq*2+%6*3]
    vinserti128     m5, xm6, 1
%else
    movu           xm5, [stkq+offq*2+%6*0]      ; p0
    vinserti128     m5, [stkq+offq*2+%6*1], 1
%endif
    neg           offq                          ; -off1
%if %5 == 4
    movq           xm6, [stkq+offq*2+%6*0]      ; p1
    movq           xm9, [stkq+offq*2+%6*2]
    movhps         xm6, [stkq+offq*2+%6*1]
    movhps         xm9, [stkq+offq*2+%6*3]
    vinserti128     m6, xm9, 1
%else
    movu           xm6, [stkq+offq*2+%6*0]      ; p1
    vinserti128     m6, [stkq+offq*2+%6*1], 1
%endif
    pcmpeqw         m9, m14, m5
    pcmpeqw        m10, m14, m6
    pandn           m9, m5
    pandn          m10, m6
    pmaxsw          m7, m9                      ; max after p0
    pminsw          m8, m5                      ; min after p0
    pmaxsw          m7, m10                     ; max after p1
    pminsw          m8, m6                      ; min after p1

    ; accumulate sum[m15] over p0/p1
    psubw           m5, m4                      ; diff_p0(p0 - px)
    psubw           m6, m4                      ; diff_p1(p1 - px)
    pabsw           m9, m5
    pabsw          m10, m6
    psignw         m11, %4, m5
    psignw         m12, %4, m6
    psrlw           m5, m9, %2
    psrlw           m6, m10, %2
    psubusw         m5, %3, m5
    psubusw         m6, %3, m6
    pminsw          m5, m9                      ; constrain(diff_p0)
    pminsw          m6, m10                     ; constrain(diff_p1)
    pmullw          m5, m11                     ; constrain(diff_p0) * taps
    pmullw          m6, m12                     ; constrain(diff_p1) * taps
    paddw          m15, m5
    paddw          m15, m6
%endmacro

%macro cdef_filter_fn 3 ; w, h, stride
INIT_YMM avx2
%if %1 != 4 || %2 != 8
cglobal cdef_filter_%1x%2, 4, 9, 16, 2 * 16 + (%2+4)*%3, \
                           dst, stride, left, top, pri, sec, stride3, dst4, edge
%else
cglobal cdef_filter_%1x%2, 4, 10, 16, 2 * 16 + (%2+4)*%3, \
                           dst, stride, left, top, pri, sec, stride3, dst4, edge
%endif
%define px rsp+2*16+2*%3
    pcmpeqw        m14, m14
    psrlw          m14, 1                   ; 0x7fff
    mov          edged, r8m

    ; prepare pixel buffers - body/right
%if %1 == 4
    INIT_XMM avx2
%endif
%if %2 == 8
    lea          dst4q, [dstq+strideq*4]
%endif
    lea       stride3q, [strideq*3]
    test         edged, 2                   ; have_right
    jz .no_right
    pmovzxbw        m1, [dstq+strideq*0]
    pmovzxbw        m2, [dstq+strideq*1]
    pmovzxbw        m3, [dstq+strideq*2]
    pmovzxbw        m4, [dstq+stride3q]
    mova     [px+0*%3], m1
    mova     [px+1*%3], m2
    mova     [px+2*%3], m3
    mova     [px+3*%3], m4
%if %2 == 8
    pmovzxbw        m1, [dst4q+strideq*0]
    pmovzxbw        m2, [dst4q+strideq*1]
    pmovzxbw        m3, [dst4q+strideq*2]
    pmovzxbw        m4, [dst4q+stride3q]
    mova     [px+4*%3], m1
    mova     [px+5*%3], m2
    mova     [px+6*%3], m3
    mova     [px+7*%3], m4
%endif
    jmp .body_done
.no_right:
%if %1 == 4
    movd           xm1, [dstq+strideq*0]
    movd           xm2, [dstq+strideq*1]
    movd           xm3, [dstq+strideq*2]
    movd           xm4, [dstq+stride3q]
    pmovzxbw       xm1, xm1
    pmovzxbw       xm2, xm2
    pmovzxbw       xm3, xm3
    pmovzxbw       xm4, xm4
    movq     [px+0*%3], xm1
    movq     [px+1*%3], xm2
    movq     [px+2*%3], xm3
    movq     [px+3*%3], xm4
%else
    pmovzxbw       xm1, [dstq+strideq*0]
    pmovzxbw       xm2, [dstq+strideq*1]
    pmovzxbw       xm3, [dstq+strideq*2]
    pmovzxbw       xm4, [dstq+stride3q]
    mova     [px+0*%3], xm1
    mova     [px+1*%3], xm2
    mova     [px+2*%3], xm3
    mova     [px+3*%3], xm4
%endif
    movd [px+0*%3+%1*2], xm14
    movd [px+1*%3+%1*2], xm14
    movd [px+2*%3+%1*2], xm14
    movd [px+3*%3+%1*2], xm14
%if %2 == 8
 %if %1 == 4
    movd           xm1, [dst4q+strideq*0]
    movd           xm2, [dst4q+strideq*1]
    movd           xm3, [dst4q+strideq*2]
    movd           xm4, [dst4q+stride3q]
    pmovzxbw       xm1, xm1
    pmovzxbw       xm2, xm2
    pmovzxbw       xm3, xm3
    pmovzxbw       xm4, xm4
    movq     [px+4*%3], xm1
    movq     [px+5*%3], xm2
    movq     [px+6*%3], xm3
    movq     [px+7*%3], xm4
 %else
    pmovzxbw       xm1, [dst4q+strideq*0]
    pmovzxbw       xm2, [dst4q+strideq*1]
    pmovzxbw       xm3, [dst4q+strideq*2]
    pmovzxbw       xm4, [dst4q+stride3q]
    mova     [px+4*%3], xm1
    mova     [px+5*%3], xm2
    mova     [px+6*%3], xm3
    mova     [px+7*%3], xm4
 %endif
    movd [px+4*%3+%1*2], xm14
    movd [px+5*%3+%1*2], xm14
    movd [px+6*%3+%1*2], xm14
    movd [px+7*%3+%1*2], xm14
%endif
.body_done:

    ; top
    DEFINE_ARGS dst, stride, left, top2, pri, sec, stride3, top1, edge
    test         edged, 4                    ; have_top
    jz .no_top
    mov          top1q, [top2q+0*gprsize]
    mov          top2q, [top2q+1*gprsize]
    test         edged, 1                    ; have_left
    jz .top_no_left
    test         edged, 2                    ; have_right
    jz .top_no_right
    pmovzxbw        m1, [top1q-(%1/2)]
    pmovzxbw        m2, [top2q-(%1/2)]
    movu  [px-2*%3-%1], m1
    movu  [px-1*%3-%1], m2
    jmp .top_done
.top_no_right:
    pmovzxbw        m1, [top1q-%1]
    pmovzxbw        m2, [top2q-%1]
    movu [px-2*%3-%1*2], m1
    movu [px-1*%3-%1*2], m2
    movd [px-2*%3+%1*2], xm14
    movd [px-1*%3+%1*2], xm14
    jmp .top_done
.top_no_left:
    test         edged, 2                   ; have_right
    jz .top_no_left_right
    pmovzxbw        m1, [top1q]
    pmovzxbw        m2, [top2q]
    mova   [px-2*%3+0], m1
    mova   [px-1*%3+0], m2
    movd   [px-2*%3-4], xm14
    movd   [px-1*%3-4], xm14
    jmp .top_done
.top_no_left_right:
%if %1 == 4
    movd           xm1, [top1q]
    pinsrd         xm1, [top2q], 1
    pmovzxbw       xm1, xm1
    movq   [px-2*%3+0], xm1
    movhps [px-1*%3+0], xm1
%else
    pmovzxbw       xm1, [top1q]
    pmovzxbw       xm2, [top2q]
    mova   [px-2*%3+0], xm1
    mova   [px-1*%3+0], xm2
%endif
    movd   [px-2*%3-4], xm14
    movd   [px-1*%3-4], xm14
    movd [px-2*%3+%1*2], xm14
    movd [px-1*%3+%1*2], xm14
    jmp .top_done
.no_top:
    movu   [px-2*%3-%1], m14
    movu   [px-1*%3-%1], m14
.top_done:

    ; left
    test         edged, 1                   ; have_left
    jz .no_left
    pmovzxbw       xm1, [leftq+ 0]
%if %2 == 8
    pmovzxbw       xm2, [leftq+ 8]
%endif
    movd   [px+0*%3-4], xm1
    pextrd [px+1*%3-4], xm1, 1
    pextrd [px+2*%3-4], xm1, 2
    pextrd [px+3*%3-4], xm1, 3
%if %2 == 8
    movd   [px+4*%3-4], xm2
    pextrd [px+5*%3-4], xm2, 1
    pextrd [px+6*%3-4], xm2, 2
    pextrd [px+7*%3-4], xm2, 3
%endif
    jmp .left_done
.no_left:
    movd   [px+0*%3-4], xm14
    movd   [px+1*%3-4], xm14
    movd   [px+2*%3-4], xm14
    movd   [px+3*%3-4], xm14
%if %2 == 8
    movd   [px+4*%3-4], xm14
    movd   [px+5*%3-4], xm14
    movd   [px+6*%3-4], xm14
    movd   [px+7*%3-4], xm14
%endif
.left_done:

    ; bottom
    DEFINE_ARGS dst, stride, dst8, dummy1, pri, sec, stride3, dummy3, edge
    test         edged, 8                   ; have_bottom
    jz .no_bottom
    lea          dst8q, [dstq+%2*strideq]
    test         edged, 1                   ; have_left
    jz .bottom_no_left
    test         edged, 2                   ; have_right
    jz .bottom_no_right
    pmovzxbw        m1, [dst8q-(%1/2)]
    pmovzxbw        m2, [dst8q+strideq-(%1/2)]
    movu   [px+(%2+0)*%3-%1], m1
    movu   [px+(%2+1)*%3-%1], m2
    jmp .bottom_done
.bottom_no_right:
    pmovzxbw        m1, [dst8q-%1]
    pmovzxbw        m2, [dst8q+strideq-%1]
    movu  [px+(%2+0)*%3-%1*2], m1
    movu  [px+(%2+1)*%3-%1*2], m2
%if %1 == 8
    movd  [px+(%2-1)*%3+%1*2], xm14                ; overwritten by previous movu
%endif
    movd  [px+(%2+0)*%3+%1*2], xm14
    movd  [px+(%2+1)*%3+%1*2], xm14
    jmp .bottom_done
.bottom_no_left:
    test          edged, 2                  ; have_right
    jz .bottom_no_left_right
    pmovzxbw        m1, [dst8q]
    pmovzxbw        m2, [dst8q+strideq]
    mova   [px+(%2+0)*%3+0], m1
    mova   [px+(%2+1)*%3+0], m2
    movd   [px+(%2+0)*%3-4], xm14
    movd   [px+(%2+1)*%3-4], xm14
    jmp .bottom_done
.bottom_no_left_right:
%if %1 == 4
    movd           xm1, [dst8q]
    pinsrd         xm1, [dst8q+strideq], 1
    pmovzxbw       xm1, xm1
    movq   [px+(%2+0)*%3+0], xm1
    movhps [px+(%2+1)*%3+0], xm1
%else
    pmovzxbw       xm1, [dst8q]
    pmovzxbw       xm2, [dst8q+strideq]
    mova   [px+(%2+0)*%3+0], xm1
    mova   [px+(%2+1)*%3+0], xm2
%endif
    movd   [px+(%2+0)*%3-4], xm14
    movd   [px+(%2+1)*%3-4], xm14
    movd  [px+(%2+0)*%3+%1*2], xm14
    movd  [px+(%2+1)*%3+%1*2], xm14
    jmp .bottom_done
.no_bottom:
    movu   [px+(%2+0)*%3-%1], m14
    movu   [px+(%2+1)*%3-%1], m14
.bottom_done:

    ; actual filter
    INIT_YMM avx2
    DEFINE_ARGS dst, stride, pridmp, damping, pri, sec, stride3, secdmp
%undef edged
    movifnidn     prid, prim
    movifnidn     secd, secm
    mov       dampingd, r7m

    mov        pridmpd, prid
    mov        secdmpd, secd
    or         pridmpd, 1
    or         secdmpd, 1
    lzcnt      pridmpd, pridmpd
    lzcnt      secdmpd, secdmpd
    lea        pridmpd, [pridmpd+dampingd-31]
    lea        secdmpd, [secdmpd+dampingd-31]
    xor       dampingd, dampingd
    test       pridmpd, pridmpd
    cmovl      pridmpd, dampingd
    test       secdmpd, secdmpd
    cmovl      secdmpd, dampingd
    mov        [rsp+0], pridmpq                 ; pri_shift
    mov        [rsp+8], secdmpq                 ; sec_shift

    ; pri/sec_taps[k] [4 total]
    DEFINE_ARGS dst, stride, tap, dummy, pri, sec, stride3
    movd           xm0, prid
    movd           xm1, secd
    vpbroadcastw    m0, xm0                     ; pri_strength
    vpbroadcastw    m1, xm1                     ; sec_strength
    and           prid, 1
    lea           tapq, [tap_table]
    lea           priq, [tapq+priq*4]           ; pri_taps
    lea           secq, [tapq+8]                ; sec_taps

    ; off1/2/3[k] [6 total] from [tapq+12+(dir+0/2/6)*2+k]
    DEFINE_ARGS dst, stride, tap, dir, pri, sec, stride3
    mov           dird, r6m
    lea           tapq, [tapq+dirq*2+12]
%if %1*%2*2/mmsize > 1
 %if %1 == 4
    DEFINE_ARGS dst, stride, dir, stk, pri, sec, stride3, h, off, k
 %else
    DEFINE_ARGS dst, stride, dir, stk, pri, sec, h, off, k
 %endif
    mov             hd, %1*%2*2/mmsize
%else
    DEFINE_ARGS dst, stride, dir, stk, pri, sec, stride3, off, k
%endif
    lea           stkq, [px]
    pxor           m13, m13
%if %1*%2*2/mmsize > 1
.v_loop:
%endif
    mov             kd, 1
%if %1 == 4
    movq           xm4, [stkq+%3*0]
    movhps         xm4, [stkq+%3*1]
    movq           xm5, [stkq+%3*2]
    movhps         xm5, [stkq+%3*3]
    vinserti128     m4, xm5, 1
%else
    mova           xm4, [stkq+%3*0]             ; px
    vinserti128     m4, [stkq+%3*1], 1
%endif
    pxor           m15, m15                     ; sum
    mova            m7, m4                      ; max
    mova            m8, m4                      ; min
.k_loop:
    vpbroadcastw    m2, [priq+kq*2]             ; pri_taps
    vpbroadcastw    m3, [secq+kq*2]             ; sec_taps

    ACCUMULATE_TAP 0*2, [rsp+0], m0, m2, %1, %3
    ACCUMULATE_TAP 2*2, [rsp+8], m1, m3, %1, %3
    ACCUMULATE_TAP 6*2, [rsp+8], m1, m3, %1, %3

    dec             kq
    jge .k_loop

    vpbroadcastd   m12, [pw_2048]
    pcmpgtw        m11, m13, m15
    paddw          m15, m11
    pmulhrsw       m15, m12
    paddw           m4, m15
    pminsw          m4, m7
    pmaxsw          m4, m8
    packuswb        m4, m4
    vextracti128   xm5, m4, 1
%if %1 == 4
    movd [dstq+strideq*0], xm4
    pextrd [dstq+strideq*1], xm4, 1
    movd [dstq+strideq*2], xm5
    pextrd [dstq+stride3q], xm5, 1
%else
    movq [dstq+strideq*0], xm4
    movq [dstq+strideq*1], xm5
%endif

%if %1*%2*2/mmsize > 1
 %define vloop_lines (mmsize/(%1*2))
    lea           dstq, [dstq+strideq*vloop_lines]
    add           stkq, %3*vloop_lines
    dec             hd
    jg .v_loop
%endif

    RET
%endmacro

cdef_filter_fn 8, 8, 32
cdef_filter_fn 4, 8, 32
cdef_filter_fn 4, 4, 32

INIT_YMM avx2
cglobal cdef_dir, 3, 4, 15, src, stride, var, stride3
    lea       stride3q, [strideq*3]
    movq           xm0, [srcq+strideq*0]
    movq           xm1, [srcq+strideq*1]
    movq           xm2, [srcq+strideq*2]
    movq           xm3, [srcq+stride3q]
    lea           srcq, [srcq+strideq*4]
    vpbroadcastq    m4, [srcq+strideq*0]
    vpbroadcastq    m5, [srcq+strideq*1]
    vpbroadcastq    m6, [srcq+strideq*2]
    vpbroadcastq    m7, [srcq+stride3q]
    vpbroadcastd    m8, [pw_128]
    pxor            m9, m9

    vpblendd        m0, m0, m7, 0xf0
    vpblendd        m1, m1, m6, 0xf0
    vpblendd        m2, m2, m5, 0xf0
    vpblendd        m3, m3, m4, 0xf0

    punpcklbw       m0, m9
    punpcklbw       m1, m9
    punpcklbw       m2, m9
    punpcklbw       m3, m9

    psubw           m0, m8
    psubw           m1, m8
    psubw           m2, m8
    psubw           m3, m8

    ; shuffle registers to generate partial_sum_diag[0-1] together
    vpermq          m7, m0, q1032
    vpermq          m6, m1, q1032
    vpermq          m5, m2, q1032
    vpermq          m4, m3, q1032

    ; start with partial_sum_hv[0-1]
    paddw           m8, m0, m1
    paddw           m9, m2, m3
    phaddw         m10, m0, m1
    phaddw         m11, m2, m3
    paddw           m8, m9
    phaddw         m10, m11
    vextracti128   xm9, m8, 1
    vextracti128  xm11, m10, 1
    paddw          xm8, xm9                 ; partial_sum_hv[1]
    phaddw        xm10, xm11                ; partial_sum_hv[0]
    vinserti128     m8, xm10, 1
    vpbroadcastd    m9, [div_table+44]
    pmaddwd         m8, m8
    pmulld          m8, m9                  ; cost6[2a-d] | cost2[a-d]

    ; create aggregates [lower half]:
    ; m9 = m0:01234567+m1:x0123456+m2:xx012345+m3:xxx01234+
    ;      m4:xxxx0123+m5:xxxxx012+m6:xxxxxx01+m7:xxxxxxx0
    ; m10=             m1:7xxxxxxx+m2:67xxxxxx+m3:567xxxxx+
    ;      m4:4567xxxx+m5:34567xxx+m6:234567xx+m7:1234567x
    ; and [upper half]:
    ; m9 = m0:xxxxxxx0+m1:xxxxxx01+m2:xxxxx012+m3:xxxx0123+
    ;      m4:xxx01234+m5:xx012345+m6:x0123456+m7:01234567
    ; m10= m0:1234567x+m1:234567xx+m2:34567xxx+m3:4567xxxx+
    ;      m4:567xxxxx+m5:67xxxxxx+m6:7xxxxxxx
    ; and then shuffle m11 [shufw_6543210x], unpcklwd, pmaddwd, pmulld, paddd

    pslldq          m9, m1, 2
    psrldq         m10, m1, 14
    pslldq         m11, m2, 4
    psrldq         m12, m2, 12
    pslldq         m13, m3, 6
    psrldq         m14, m3, 10
    paddw           m9, m11
    paddw          m10, m12
    paddw           m9, m13
    paddw          m10, m14
    pslldq         m11, m4, 8
    psrldq         m12, m4, 8
    pslldq         m13, m5, 10
    psrldq         m14, m5, 6
    paddw           m9, m11
    paddw          m10, m12
    paddw           m9, m13
    paddw          m10, m14
    pslldq         m11, m6, 12
    psrldq         m12, m6, 4
    pslldq         m13, m7, 14
    psrldq         m14, m7, 2
    paddw           m9, m11
    paddw          m10, m12
    paddw           m9, m13
    paddw          m10, m14                 ; partial_sum_diag[0/1][8-14,zero]
    vbroadcasti128 m14, [shufw_6543210x]
    vbroadcasti128 m13, [div_table+16]
    vbroadcasti128 m12, [div_table+0]
    paddw           m9, m0                  ; partial_sum_diag[0/1][0-7]
    pshufb         m10, m14
    punpckhwd      m11, m9, m10
    punpcklwd       m9, m10
    pmaddwd        m11, m11
    pmaddwd         m9, m9
    pmulld         m11, m13
    pmulld          m9, m12
    paddd           m9, m11                 ; cost0[a-d] | cost4[a-d]

    ; merge horizontally and vertically for partial_sum_alt[0-3]
    paddw          m10, m0, m1
    paddw          m11, m2, m3
    paddw          m12, m4, m5
    paddw          m13, m6, m7
    phaddw          m0, m4
    phaddw          m1, m5
    phaddw          m2, m6
    phaddw          m3, m7

    ; create aggregates [lower half]:
    ; m4 = m10:01234567+m11:x0123456+m12:xx012345+m13:xxx01234
    ; m11=              m11:7xxxxxxx+m12:67xxxxxx+m13:567xxxxx
    ; and [upper half]:
    ; m4 = m10:xxx01234+m11:xx012345+m12:x0123456+m13:01234567
    ; m11= m10:567xxxxx+m11:67xxxxxx+m12:7xxxxxxx
    ; and then shuffle m11 [shufw_210xxxxx], unpcklwd, pmaddwd, pmulld, paddd

    vbroadcasti128 m14, [shufw_210xxxxx]
    pslldq          m4, m11, 2
    psrldq         m11, 14
    pslldq          m5, m12, 4
    psrldq         m12, 12
    pslldq          m6, m13, 6
    psrldq         m13, 10
    paddw           m4, m10
    paddw          m11, m12
    vpbroadcastd   m12, [div_table+44]
    paddw           m5, m6
    paddw          m11, m13                 ; partial_sum_alt[3/2] right
    vbroadcasti128 m13, [div_table+32]
    paddw           m4, m5                  ; partial_sum_alt[3/2] left
    pshufb         m11, m14
    punpckhwd       m6, m4, m11
    punpcklwd       m4, m11
    pmaddwd         m6, m6
    pmaddwd         m4, m4
    pmulld          m6, m12
    pmulld          m4, m13
    paddd           m4, m6                  ; cost7[a-d] | cost5[a-d]

    ; create aggregates [lower half]:
    ; m5 = m0:01234567+m1:x0123456+m2:xx012345+m3:xxx01234
    ; m1 =             m1:7xxxxxxx+m2:67xxxxxx+m3:567xxxxx
    ; and [upper half]:
    ; m5 = m0:xxx01234+m1:xx012345+m2:x0123456+m3:01234567
    ; m1 = m0:567xxxxx+m1:67xxxxxx+m2:7xxxxxxx
    ; and then shuffle m11 [shufw_210xxxxx], unpcklwd, pmaddwd, pmulld, paddd

    pslldq          m5, m1, 2
    psrldq          m1, 14
    pslldq          m6, m2, 4
    psrldq          m2, 12
    pslldq          m7, m3, 6
    psrldq          m3, 10
    paddw           m5, m0
    paddw           m1, m2
    paddw           m6, m7
    paddw           m1, m3                  ; partial_sum_alt[0/1] right
    paddw           m5, m6                  ; partial_sum_alt[0/1] left
    pshufb          m1, m14
    punpckhwd       m6, m5, m1
    punpcklwd       m5, m1
    pmaddwd         m6, m6
    pmaddwd         m5, m5
    pmulld          m6, m12
    pmulld          m5, m13
    paddd           m5, m6                  ; cost1[a-d] | cost3[a-d]

    mova           xm0, [pd_47130256+ 16]
    mova            m1, [pd_47130256]
    phaddd          m9, m8
    phaddd          m5, m4
    phaddd          m9, m5
    vpermd          m0, m9                  ; cost[0-3]
    vpermd          m1, m9                  ; cost[4-7] | cost[0-3]

    ; now find the best cost
    pmaxsd         xm2, xm0, xm1
    pshufd         xm3, xm2, q3232
    pmaxsd         xm2, xm3
    pshufd         xm3, xm2, q1111
    pmaxsd         xm2, xm3
    pshufd         xm2, xm2, q0000 ; best cost

    ; find the idx using minpos
    ; make everything other than the best cost negative via subtraction
    ; find the min of unsigned 16-bit ints to sort out the negative values
    psubd          xm4, xm1, xm2
    psubd          xm3, xm0, xm2
    packssdw       xm3, xm4
    phminposuw     xm3, xm3

    ; convert idx to 32-bits
    psrldq         xm3, 2
    movd           eax, xm3

    ; get idx^4 complement
    vpermd          m3, m1
    psubd          xm2, xm3
    psrld          xm2, 10
    movd        [varq], xm2
    RET
%endif ; ARCH_X86_64