Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/boringssl.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Benjamin <davidben@google.com>2016-02-24 00:25:40 +0300
committerAdam Langley <agl@google.com>2016-02-26 03:41:48 +0300
commitf28caea521aab668abf83629dc8116a518f53459 (patch)
tree48e1717a876b83f996f194da3188c18348df7c5f /crypto/poly1305
parent8ccc3c383a3dc9f3fee1974c08f4f93b3e38404e (diff)
Check in pristine copies of upstream's poly1305 assembly.
Taken from 6b2ebe4332e22b4eb7dd6fadf418e3da7b926ca4. These don't do anything right now but are checked in unmodified to make diffs easier to see. Change-Id: I4f5bdb7b16f4ac27e7ef175f475540c481b8d593 Reviewed-on: https://boringssl-review.googlesource.com/7224 Reviewed-by: Adam Langley <agl@google.com>
Diffstat (limited to 'crypto/poly1305')
-rwxr-xr-xcrypto/poly1305/asm/poly1305-armv4.pl1216
-rwxr-xr-xcrypto/poly1305/asm/poly1305-armv8.pl925
-rwxr-xr-xcrypto/poly1305/asm/poly1305-x86.pl1794
-rwxr-xr-xcrypto/poly1305/asm/poly1305-x86_64.pl2246
4 files changed, 6181 insertions, 0 deletions
diff --git a/crypto/poly1305/asm/poly1305-armv4.pl b/crypto/poly1305/asm/poly1305-armv4.pl
new file mode 100755
index 00000000..65b79cf1
--- /dev/null
+++ b/crypto/poly1305/asm/poly1305-armv4.pl
@@ -0,0 +1,1216 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# IALU(*)/gcc-4.4 NEON
+#
+# ARM11xx(ARMv6) 7.78/+100% -
+# Cortex-A5 6.30/+130% 2.96
+# Cortex-A8 6.25/+115% 2.36
+# Cortex-A9 5.10/+95% 2.55
+# Cortex-A15 3.79/+85% 1.25(**)
+# Snapdragon S4 5.70/+100% 1.48(**)
+#
+# (*) this is for -march=armv6, i.e. with bunch of ldrb loading data;
+# (**) these are trade-off results, they can be improved by ~8% but at
+# the cost of 15/12% regression on Cortex-A5/A7, it's even possible
+# to improve Cortex-A9 result, but then A5/A7 loose more than 20%;
+
+$flavour = shift;
+if ($flavour=~/^\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+ ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+ ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+ die "can't locate arm-xlate.pl";
+
+ open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+ open STDOUT,">$output";
+}
+
+($ctx,$inp,$len,$padbit)=map("r$_",(0..3));
+
+$code.=<<___;
+#include "arm_arch.h"
+
+.text
+#if defined(__thumb2__)
+.syntax unified
+.thumb
+#else
+.code 32
+#endif
+
+.globl poly1305_emit
+.globl poly1305_blocks
+.globl poly1305_init
+.type poly1305_init,%function
+.align 5
+poly1305_init:
+.Lpoly1305_init:
+ stmdb sp!,{r4-r11}
+
+ eor r3,r3,r3
+ cmp $inp,#0
+ str r3,[$ctx,#0] @ zero hash value
+ str r3,[$ctx,#4]
+ str r3,[$ctx,#8]
+ str r3,[$ctx,#12]
+ str r3,[$ctx,#16]
+ str r3,[$ctx,#36] @ is_base2_26
+ add $ctx,$ctx,#20
+
+#ifdef __thumb2__
+ it eq
+#endif
+ moveq r0,#0
+ beq .Lno_key
+
+#if __ARM_MAX_ARCH__>=7
+ adr r11,.Lpoly1305_init
+ ldr r12,.LOPENSSL_armcap
+#endif
+ ldrb r4,[$inp,#0]
+ mov r10,#0x0fffffff
+ ldrb r5,[$inp,#1]
+ and r3,r10,#-4 @ 0x0ffffffc
+ ldrb r6,[$inp,#2]
+ ldrb r7,[$inp,#3]
+ orr r4,r4,r5,lsl#8
+ ldrb r5,[$inp,#4]
+ orr r4,r4,r6,lsl#16
+ ldrb r6,[$inp,#5]
+ orr r4,r4,r7,lsl#24
+ ldrb r7,[$inp,#6]
+ and r4,r4,r10
+
+#if __ARM_MAX_ARCH__>=7
+ ldr r12,[r11,r12] @ OPENSSL_armcap_P
+# ifdef __APPLE__
+ ldr r12,[r12]
+# endif
+#endif
+ ldrb r8,[$inp,#7]
+ orr r5,r5,r6,lsl#8
+ ldrb r6,[$inp,#8]
+ orr r5,r5,r7,lsl#16
+ ldrb r7,[$inp,#9]
+ orr r5,r5,r8,lsl#24
+ ldrb r8,[$inp,#10]
+ and r5,r5,r3
+
+#if __ARM_MAX_ARCH__>=7
+ tst r12,#1 @ check for NEON
+# ifdef __APPLE__
+ adr r9,poly1305_blocks_neon
+ adr r11,poly1305_blocks
+# ifdef __thumb2__
+ it ne
+# endif
+ movne r11,r9
+ adr r12,poly1305_emit
+ adr r10,poly1305_emit_neon
+# ifdef __thumb2__
+ it ne
+# endif
+ movne r12,r10
+# else
+# ifdef __thumb2__
+ itete eq
+# endif
+ addeq r12,r11,#(poly1305_emit-.Lpoly1305_init)
+ addne r12,r11,#(poly1305_emit_neon-.Lpoly1305_init)
+ addeq r11,r11,#(poly1305_blocks-.Lpoly1305_init)
+ addne r11,r11,#(poly1305_blocks_neon-.Lpoly1305_init)
+# endif
+# ifdef __thumb2__
+ orr r12,r12,#1 @ thumb-ify address
+ orr r11,r11,#1
+# endif
+#endif
+ ldrb r9,[$inp,#11]
+ orr r6,r6,r7,lsl#8
+ ldrb r7,[$inp,#12]
+ orr r6,r6,r8,lsl#16
+ ldrb r8,[$inp,#13]
+ orr r6,r6,r9,lsl#24
+ ldrb r9,[$inp,#14]
+ and r6,r6,r3
+
+ ldrb r10,[$inp,#15]
+ orr r7,r7,r8,lsl#8
+ str r4,[$ctx,#0]
+ orr r7,r7,r9,lsl#16
+ str r5,[$ctx,#4]
+ orr r7,r7,r10,lsl#24
+ str r6,[$ctx,#8]
+ and r7,r7,r3
+ str r7,[$ctx,#12]
+#if __ARM_MAX_ARCH__>=7
+ stmia r2,{r11,r12} @ fill functions table
+ mov r0,#1
+#else
+ mov r0,#0
+#endif
+.Lno_key:
+ ldmia sp!,{r4-r11}
+#if __ARM_ARCH__>=5
+ ret @ bx lr
+#else
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size poly1305_init,.-poly1305_init
+___
+{
+my ($h0,$h1,$h2,$h3,$h4,$r0,$r1,$r2,$r3)=map("r$_",(4..12));
+my ($s1,$s2,$s3)=($r1,$r2,$r3);
+
+$code.=<<___;
+.type poly1305_blocks,%function
+.align 5
+poly1305_blocks:
+ stmdb sp!,{r3-r11,lr}
+
+ ands $len,$len,#-16
+ beq .Lno_data
+
+ cmp $padbit,#0
+ add $len,$len,$inp @ end pointer
+ sub sp,sp,#32
+
+ ldmia $ctx,{$h0-$r3} @ load context
+
+ str $ctx,[sp,#12] @ offload stuff
+ mov lr,$inp
+ str $len,[sp,#16]
+ str $r1,[sp,#20]
+ str $r2,[sp,#24]
+ str $r3,[sp,#28]
+ b .Loop
+
+.Loop:
+#if __ARM_ARCH__<7
+ ldrb r0,[lr],#16 @ load input
+# ifdef __thumb2__
+ it hi
+# endif
+ addhi $h4,$h4,#1 @ 1<<128
+ ldrb r1,[lr,#-15]
+ ldrb r2,[lr,#-14]
+ ldrb r3,[lr,#-13]
+ orr r1,r0,r1,lsl#8
+ ldrb r0,[lr,#-12]
+ orr r2,r1,r2,lsl#16
+ ldrb r1,[lr,#-11]
+ orr r3,r2,r3,lsl#24
+ ldrb r2,[lr,#-10]
+ adds $h0,$h0,r3 @ accumulate input
+
+ ldrb r3,[lr,#-9]
+ orr r1,r0,r1,lsl#8
+ ldrb r0,[lr,#-8]
+ orr r2,r1,r2,lsl#16
+ ldrb r1,[lr,#-7]
+ orr r3,r2,r3,lsl#24
+ ldrb r2,[lr,#-6]
+ adcs $h1,$h1,r3
+
+ ldrb r3,[lr,#-5]
+ orr r1,r0,r1,lsl#8
+ ldrb r0,[lr,#-4]
+ orr r2,r1,r2,lsl#16
+ ldrb r1,[lr,#-3]
+ orr r3,r2,r3,lsl#24
+ ldrb r2,[lr,#-2]
+ adcs $h2,$h2,r3
+
+ ldrb r3,[lr,#-1]
+ orr r1,r0,r1,lsl#8
+ str lr,[sp,#8] @ offload input pointer
+ orr r2,r1,r2,lsl#16
+ add $s1,$r1,$r1,lsr#2
+ orr r3,r2,r3,lsl#24
+#else
+ ldr r0,[lr],#16 @ load input
+# ifdef __thumb2__
+ it hi
+# endif
+ addhi $h4,$h4,#1 @ padbit
+ ldr r1,[lr,#-12]
+ ldr r2,[lr,#-8]
+ ldr r3,[lr,#-4]
+# ifdef __ARMEB__
+ rev r0,r0
+ rev r1,r1
+ rev r2,r2
+ rev r3,r3
+# endif
+ adds $h0,$h0,r0 @ accumulate input
+ str lr,[sp,#8] @ offload input pointer
+ adcs $h1,$h1,r1
+ add $s1,$r1,$r1,lsr#2
+ adcs $h2,$h2,r2
+#endif
+ add $s2,$r2,$r2,lsr#2
+ adcs $h3,$h3,r3
+ add $s3,$r3,$r3,lsr#2
+
+ umull r2,r3,$h1,$r0
+ adc $h4,$h4,#0
+ umull r0,r1,$h0,$r0
+ umlal r2,r3,$h4,$s1
+ umlal r0,r1,$h3,$s1
+ ldr $r1,[sp,#20] @ reload $r1
+ umlal r2,r3,$h2,$s3
+ umlal r0,r1,$h1,$s3
+ umlal r2,r3,$h3,$s2
+ umlal r0,r1,$h2,$s2
+ umlal r2,r3,$h0,$r1
+ str r0,[sp,#0] @ future $h0
+ mul r0,$s2,$h4
+ ldr $r2,[sp,#24] @ reload $r2
+ adds r2,r2,r1 @ d1+=d0>>32
+ eor r1,r1,r1
+ adc lr,r3,#0 @ future $h2
+ str r2,[sp,#4] @ future $h1
+
+ mul r2,$s3,$h4
+ eor r3,r3,r3
+ umlal r0,r1,$h3,$s3
+ ldr $r3,[sp,#28] @ reload $r3
+ umlal r2,r3,$h3,$r0
+ umlal r0,r1,$h2,$r0
+ umlal r2,r3,$h2,$r1
+ umlal r0,r1,$h1,$r1
+ umlal r2,r3,$h1,$r2
+ umlal r0,r1,$h0,$r2
+ umlal r2,r3,$h0,$r3
+ ldr $h0,[sp,#0]
+ mul $h4,$r0,$h4
+ ldr $h1,[sp,#4]
+
+ adds $h2,lr,r0 @ d2+=d1>>32
+ ldr lr,[sp,#8] @ reload input pointer
+ adc r1,r1,#0
+ adds $h3,r2,r1 @ d3+=d2>>32
+ ldr r0,[sp,#16] @ reload end pointer
+ adc r3,r3,#0
+ add $h4,$h4,r3 @ h4+=d3>>32
+
+ and r1,$h4,#-4
+ and $h4,$h4,#3
+ add r1,r1,r1,lsr#2 @ *=5
+ adds $h0,$h0,r1
+ adcs $h1,$h1,#0
+ adcs $h2,$h2,#0
+ adc $h3,$h3,#0
+
+ cmp r0,lr @ done yet?
+ bhi .Loop
+
+ ldr $ctx,[sp,#12]
+ add sp,sp,#32
+ stmia $ctx,{$h0-$h4} @ store the result
+
+.Lno_data:
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r3-r11,pc}
+#else
+ ldmia sp!,{r3-r11,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size poly1305_blocks,.-poly1305_blocks
+___
+}
+{
+my ($ctx,$mac,$nonce)=map("r$_",(0..2));
+my ($h0,$h1,$h2,$h3,$h4,$g0,$g1,$g2,$g3)=map("r$_",(3..11));
+my $g4=$h4;
+
+$code.=<<___;
+.type poly1305_emit,%function
+.align 5
+poly1305_emit:
+ stmdb sp!,{r4-r11}
+.Lpoly1305_emit_enter:
+
+ ldmia $ctx,{$h0-$h4}
+ adds $g0,$h0,#5 @ compare to modulus
+ adcs $g1,$h1,#0
+ adcs $g2,$h2,#0
+ adcs $g3,$h3,#0
+ adc $g4,$h4,#0
+ tst $g4,#4 @ did it carry/borrow?
+
+#ifdef __thumb2__
+ it ne
+#endif
+ movne $h0,$g0
+ ldr $g0,[$nonce,#0]
+#ifdef __thumb2__
+ it ne
+#endif
+ movne $h1,$g1
+ ldr $g1,[$nonce,#4]
+#ifdef __thumb2__
+ it ne
+#endif
+ movne $h2,$g2
+ ldr $g2,[$nonce,#8]
+#ifdef __thumb2__
+ it ne
+#endif
+ movne $h3,$g3
+ ldr $g3,[$nonce,#12]
+
+ adds $h0,$h0,$g0
+ adcs $h1,$h1,$g1
+ adcs $h2,$h2,$g2
+ adc $h3,$h3,$g3
+
+#if __ARM_ARCH__>=7
+# ifdef __ARMEB__
+ rev $h0,$h0
+ rev $h1,$h1
+ rev $h2,$h2
+ rev $h3,$h3
+# endif
+ str $h0,[$mac,#0]
+ str $h1,[$mac,#4]
+ str $h2,[$mac,#8]
+ str $h3,[$mac,#12]
+#else
+ strb $h0,[$mac,#0]
+ mov $h0,$h0,lsr#8
+ strb $h1,[$mac,#4]
+ mov $h1,$h1,lsr#8
+ strb $h2,[$mac,#8]
+ mov $h2,$h2,lsr#8
+ strb $h3,[$mac,#12]
+ mov $h3,$h3,lsr#8
+
+ strb $h0,[$mac,#1]
+ mov $h0,$h0,lsr#8
+ strb $h1,[$mac,#5]
+ mov $h1,$h1,lsr#8
+ strb $h2,[$mac,#9]
+ mov $h2,$h2,lsr#8
+ strb $h3,[$mac,#13]
+ mov $h3,$h3,lsr#8
+
+ strb $h0,[$mac,#2]
+ mov $h0,$h0,lsr#8
+ strb $h1,[$mac,#6]
+ mov $h1,$h1,lsr#8
+ strb $h2,[$mac,#10]
+ mov $h2,$h2,lsr#8
+ strb $h3,[$mac,#14]
+ mov $h3,$h3,lsr#8
+
+ strb $h0,[$mac,#3]
+ strb $h1,[$mac,#7]
+ strb $h2,[$mac,#11]
+ strb $h3,[$mac,#15]
+#endif
+ ldmia sp!,{r4-r11}
+#if __ARM_ARCH__>=5
+ ret @ bx lr
+#else
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size poly1305_emit,.-poly1305_emit
+___
+{
+my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("d$_",(0..9));
+my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14));
+my ($T0,$T1,$MASK) = map("q$_",(15,4,0));
+
+my ($in2,$zeros,$tbl0,$tbl1) = map("r$_",(4..7));
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.fpu neon
+
+.type poly1305_init_neon,%function
+.align 5
+poly1305_init_neon:
+ ldr r4,[$ctx,#20] @ load key base 2^32
+ ldr r5,[$ctx,#24]
+ ldr r6,[$ctx,#28]
+ ldr r7,[$ctx,#32]
+
+ and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
+ mov r3,r4,lsr#26
+ mov r4,r5,lsr#20
+ orr r3,r3,r5,lsl#6
+ mov r5,r6,lsr#14
+ orr r4,r4,r6,lsl#12
+ mov r6,r7,lsr#8
+ orr r5,r5,r7,lsl#18
+ and r3,r3,#0x03ffffff
+ and r4,r4,#0x03ffffff
+ and r5,r5,#0x03ffffff
+
+ vdup.32 $R0,r2 @ r^1 in both lanes
+ add r2,r3,r3,lsl#2 @ *5
+ vdup.32 $R1,r3
+ add r3,r4,r4,lsl#2
+ vdup.32 $S1,r2
+ vdup.32 $R2,r4
+ add r4,r5,r5,lsl#2
+ vdup.32 $S2,r3
+ vdup.32 $R3,r5
+ add r5,r6,r6,lsl#2
+ vdup.32 $S3,r4
+ vdup.32 $R4,r6
+ vdup.32 $S4,r5
+
+ mov $zeros,#2 @ counter
+
+.Lsquare_neon:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+ @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+
+ vmull.u32 $D0,$R0,${R0}[1]
+ vmull.u32 $D1,$R1,${R0}[1]
+ vmull.u32 $D2,$R2,${R0}[1]
+ vmull.u32 $D3,$R3,${R0}[1]
+ vmull.u32 $D4,$R4,${R0}[1]
+
+ vmlal.u32 $D0,$R4,${S1}[1]
+ vmlal.u32 $D1,$R0,${R1}[1]
+ vmlal.u32 $D2,$R1,${R1}[1]
+ vmlal.u32 $D3,$R2,${R1}[1]
+ vmlal.u32 $D4,$R3,${R1}[1]
+
+ vmlal.u32 $D0,$R3,${S2}[1]
+ vmlal.u32 $D1,$R4,${S2}[1]
+ vmlal.u32 $D3,$R1,${R2}[1]
+ vmlal.u32 $D2,$R0,${R2}[1]
+ vmlal.u32 $D4,$R2,${R2}[1]
+
+ vmlal.u32 $D0,$R2,${S3}[1]
+ vmlal.u32 $D3,$R0,${R3}[1]
+ vmlal.u32 $D1,$R3,${S3}[1]
+ vmlal.u32 $D2,$R4,${S3}[1]
+ vmlal.u32 $D4,$R1,${R3}[1]
+
+ vmlal.u32 $D3,$R4,${S4}[1]
+ vmlal.u32 $D0,$R1,${S4}[1]
+ vmlal.u32 $D1,$R2,${S4}[1]
+ vmlal.u32 $D2,$R3,${S4}[1]
+ vmlal.u32 $D4,$R0,${R4}[1]
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
+ @ and P. Schwabe
+
+ vshr.u64 $T0,$D3,#26
+ vmovn.i64 $D3#lo,$D3
+ vshr.u64 $T1,$D0,#26
+ vmovn.i64 $D0#lo,$D0
+ vadd.i64 $D4,$D4,$T0 @ h3 -> h4
+ vbic.i32 $D3#lo,#0xfc000000 @ &=0x03ffffff
+ vadd.i64 $D1,$D1,$T1 @ h0 -> h1
+ vbic.i32 $D0#lo,#0xfc000000
+
+ vshrn.u64 $T0#lo,$D4,#26
+ vmovn.i64 $D4#lo,$D4
+ vshr.u64 $T1,$D1,#26
+ vmovn.i64 $D1#lo,$D1
+ vadd.i64 $D2,$D2,$T1 @ h1 -> h2
+ vbic.i32 $D4#lo,#0xfc000000
+ vbic.i32 $D1#lo,#0xfc000000
+
+ vadd.i32 $D0#lo,$D0#lo,$T0#lo
+ vshl.u32 $T0#lo,$T0#lo,#2
+ vshrn.u64 $T1#lo,$D2,#26
+ vmovn.i64 $D2#lo,$D2
+ vadd.i32 $D0#lo,$D0#lo,$T0#lo @ h4 -> h0
+ vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3
+ vbic.i32 $D2#lo,#0xfc000000
+
+ vshr.u32 $T0#lo,$D0#lo,#26
+ vbic.i32 $D0#lo,#0xfc000000
+ vshr.u32 $T1#lo,$D3#lo,#26
+ vbic.i32 $D3#lo,#0xfc000000
+ vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1
+ vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4
+
+ subs $zeros,$zeros,#1
+ beq .Lsquare_break_neon
+
+ add $tbl0,$ctx,#(48+0*9*4)
+ add $tbl1,$ctx,#(48+1*9*4)
+
+ vtrn.32 $R0,$D0#lo @ r^2:r^1
+ vtrn.32 $R2,$D2#lo
+ vtrn.32 $R3,$D3#lo
+ vtrn.32 $R1,$D1#lo
+ vtrn.32 $R4,$D4#lo
+
+ vshl.u32 $S2,$R2,#2 @ *5
+ vshl.u32 $S3,$R3,#2
+ vshl.u32 $S1,$R1,#2
+ vshl.u32 $S4,$R4,#2
+ vadd.i32 $S2,$S2,$R2
+ vadd.i32 $S1,$S1,$R1
+ vadd.i32 $S3,$S3,$R3
+ vadd.i32 $S4,$S4,$R4
+
+ vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
+ vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
+ vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
+ vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
+ vst1.32 {${S4}[0]},[$tbl0,:32]
+ vst1.32 {${S4}[1]},[$tbl1,:32]
+
+ b .Lsquare_neon
+
+.align 4
+.Lsquare_break_neon:
+ add $tbl0,$ctx,#(48+2*4*9)
+ add $tbl1,$ctx,#(48+3*4*9)
+
+ vmov $R0,$D0#lo @ r^4:r^3
+ vshl.u32 $S1,$D1#lo,#2 @ *5
+ vmov $R1,$D1#lo
+ vshl.u32 $S2,$D2#lo,#2
+ vmov $R2,$D2#lo
+ vshl.u32 $S3,$D3#lo,#2
+ vmov $R3,$D3#lo
+ vshl.u32 $S4,$D4#lo,#2
+ vmov $R4,$D4#lo
+ vadd.i32 $S1,$S1,$D1#lo
+ vadd.i32 $S2,$S2,$D2#lo
+ vadd.i32 $S3,$S3,$D3#lo
+ vadd.i32 $S4,$S4,$D4#lo
+
+ vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
+ vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
+ vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
+ vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
+ vst1.32 {${S4}[0]},[$tbl0]
+ vst1.32 {${S4}[1]},[$tbl1]
+
+ ret @ bx lr
+.size poly1305_init_neon,.-poly1305_init_neon
+
+.type poly1305_blocks_neon,%function
+.align 5
+poly1305_blocks_neon:
+ ldr ip,[$ctx,#36] @ is_base2_26
+ ands $len,$len,#-16
+ beq .Lno_data_neon
+
+ cmp $len,#64
+ bhs .Lenter_neon
+ tst ip,ip @ is_base2_26?
+ beq poly1305_blocks
+
+.Lenter_neon:
+ stmdb sp!,{r4-r7}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ tst ip,ip @ is_base2_26?
+ bne .Lbase2_26_neon
+
+ stmdb sp!,{r1-r3,lr}
+ bl poly1305_init_neon
+
+ ldr r4,[$ctx,#0] @ load hash value base 2^32
+ ldr r5,[$ctx,#4]
+ ldr r6,[$ctx,#8]
+ ldr r7,[$ctx,#12]
+ ldr ip,[$ctx,#16]
+
+ and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
+ mov r3,r4,lsr#26
+ veor $D0#lo,$D0#lo,$D0#lo
+ mov r4,r5,lsr#20
+ orr r3,r3,r5,lsl#6
+ veor $D1#lo,$D1#lo,$D1#lo
+ mov r5,r6,lsr#14
+ orr r4,r4,r6,lsl#12
+ veor $D2#lo,$D2#lo,$D2#lo
+ mov r6,r7,lsr#8
+ orr r5,r5,r7,lsl#18
+ veor $D3#lo,$D3#lo,$D3#lo
+ and r3,r3,#0x03ffffff
+ orr r6,r6,ip,lsl#24
+ veor $D4#lo,$D4#lo,$D4#lo
+ and r4,r4,#0x03ffffff
+ mov r1,#1
+ and r5,r5,#0x03ffffff
+ str r1,[$ctx,#36] @ is_base2_26
+
+ vmov.32 $D0#lo[0],r2
+ vmov.32 $D1#lo[0],r3
+ vmov.32 $D2#lo[0],r4
+ vmov.32 $D3#lo[0],r5
+ vmov.32 $D4#lo[0],r6
+ adr $zeros,.Lzeros
+
+ ldmia sp!,{r1-r3,lr}
+ b .Lbase2_32_neon
+
+.align 4
+.Lbase2_26_neon:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ load hash value
+
+ veor $D0#lo,$D0#lo,$D0#lo
+ veor $D1#lo,$D1#lo,$D1#lo
+ veor $D2#lo,$D2#lo,$D2#lo
+ veor $D3#lo,$D3#lo,$D3#lo
+ veor $D4#lo,$D4#lo,$D4#lo
+ vld4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
+ adr $zeros,.Lzeros
+ vld1.32 {$D4#lo[0]},[$ctx]
+ sub $ctx,$ctx,#16 @ rewind
+
+.Lbase2_32_neon:
+ add $in2,$inp,#32
+ mov $padbit,$padbit,lsl#24
+ tst $len,#31
+ beq .Leven
+
+ vld4.32 {$H0#lo[0],$H1#lo[0],$H2#lo[0],$H3#lo[0]},[$inp]!
+ vmov.32 $H4#lo[0],$padbit
+ sub $len,$len,#16
+ add $in2,$inp,#32
+
+# ifdef __ARMEB__
+ vrev32.8 $H0,$H0
+ vrev32.8 $H3,$H3
+ vrev32.8 $H1,$H1
+ vrev32.8 $H2,$H2
+# endif
+ vsri.u32 $H4#lo,$H3#lo,#8 @ base 2^32 -> base 2^26
+ vshl.u32 $H3#lo,$H3#lo,#18
+
+ vsri.u32 $H3#lo,$H2#lo,#14
+ vshl.u32 $H2#lo,$H2#lo,#12
+ vadd.i32 $H4#hi,$H4#lo,$D4#lo @ add hash value and move to #hi
+
+ vbic.i32 $H3#lo,#0xfc000000
+ vsri.u32 $H2#lo,$H1#lo,#20
+ vshl.u32 $H1#lo,$H1#lo,#6
+
+ vbic.i32 $H2#lo,#0xfc000000
+ vsri.u32 $H1#lo,$H0#lo,#26
+ vadd.i32 $H3#hi,$H3#lo,$D3#lo
+
+ vbic.i32 $H0#lo,#0xfc000000
+ vbic.i32 $H1#lo,#0xfc000000
+ vadd.i32 $H2#hi,$H2#lo,$D2#lo
+
+ vadd.i32 $H0#hi,$H0#lo,$D0#lo
+ vadd.i32 $H1#hi,$H1#lo,$D1#lo
+
+ mov $tbl1,$zeros
+ add $tbl0,$ctx,#48
+
+ cmp $len,$len
+ b .Long_tail
+
+.align 4
+.Leven:
+ subs $len,$len,#64
+# ifdef __thumb2__
+ it lo
+# endif
+ movlo $in2,$zeros
+
+ vmov.i32 $H4,#1<<24 @ padbit, yes, always
+ vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1]
+ add $inp,$inp,#64
+ vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0)
+ add $in2,$in2,#64
+# ifdef __thumb2__
+ itt hi
+# endif
+ addhi $tbl1,$ctx,#(48+1*9*4)
+ addhi $tbl0,$ctx,#(48+3*9*4)
+
+# ifdef __ARMEB__
+ vrev32.8 $H0,$H0
+ vrev32.8 $H3,$H3
+ vrev32.8 $H1,$H1
+ vrev32.8 $H2,$H2
+# endif
+ vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
+ vshl.u32 $H3,$H3,#18
+
+ vsri.u32 $H3,$H2,#14
+ vshl.u32 $H2,$H2,#12
+
+ vbic.i32 $H3,#0xfc000000
+ vsri.u32 $H2,$H1,#20
+ vshl.u32 $H1,$H1,#6
+
+ vbic.i32 $H2,#0xfc000000
+ vsri.u32 $H1,$H0,#26
+
+ vbic.i32 $H0,#0xfc000000
+ vbic.i32 $H1,#0xfc000000
+
+ bls .Lskip_loop
+
+ vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^2
+ vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4
+ vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
+ vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
+ b .Loop_neon
+
+.align 5
+.Loop_neon:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
+ @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
+ @ \___________________/
+ @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
+ @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
+ @ \___________________/ \____________________/
+ @
+ @ Note that we start with inp[2:3]*r^2. This is because it
+ @ doesn't depend on reduction in previous iteration.
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ inp[2:3]*r^2
+
+ vadd.i32 $H2#lo,$H2#lo,$D2#lo @ accumulate inp[0:1]
+ vmull.u32 $D2,$H2#hi,${R0}[1]
+ vadd.i32 $H0#lo,$H0#lo,$D0#lo
+ vmull.u32 $D0,$H0#hi,${R0}[1]
+ vadd.i32 $H3#lo,$H3#lo,$D3#lo
+ vmull.u32 $D3,$H3#hi,${R0}[1]
+ vmlal.u32 $D2,$H1#hi,${R1}[1]
+ vadd.i32 $H1#lo,$H1#lo,$D1#lo
+ vmull.u32 $D1,$H1#hi,${R0}[1]
+
+ vadd.i32 $H4#lo,$H4#lo,$D4#lo
+ vmull.u32 $D4,$H4#hi,${R0}[1]
+ subs $len,$len,#64
+ vmlal.u32 $D0,$H4#hi,${S1}[1]
+# ifdef __thumb2__
+ it lo
+# endif
+ movlo $in2,$zeros
+ vmlal.u32 $D3,$H2#hi,${R1}[1]
+ vld1.32 ${S4}[1],[$tbl1,:32]
+ vmlal.u32 $D1,$H0#hi,${R1}[1]
+ vmlal.u32 $D4,$H3#hi,${R1}[1]
+
+ vmlal.u32 $D0,$H3#hi,${S2}[1]
+ vmlal.u32 $D3,$H1#hi,${R2}[1]
+ vmlal.u32 $D4,$H2#hi,${R2}[1]
+ vmlal.u32 $D1,$H4#hi,${S2}[1]
+ vmlal.u32 $D2,$H0#hi,${R2}[1]
+
+ vmlal.u32 $D3,$H0#hi,${R3}[1]
+ vmlal.u32 $D0,$H2#hi,${S3}[1]
+ vmlal.u32 $D4,$H1#hi,${R3}[1]
+ vmlal.u32 $D1,$H3#hi,${S3}[1]
+ vmlal.u32 $D2,$H4#hi,${S3}[1]
+
+ vmlal.u32 $D3,$H4#hi,${S4}[1]
+ vmlal.u32 $D0,$H1#hi,${S4}[1]
+ vmlal.u32 $D4,$H0#hi,${R4}[1]
+ vmlal.u32 $D1,$H2#hi,${S4}[1]
+ vmlal.u32 $D2,$H3#hi,${S4}[1]
+
+ vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0)
+ add $in2,$in2,#64
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ (hash+inp[0:1])*r^4 and accumulate
+
+ vmlal.u32 $D3,$H3#lo,${R0}[0]
+ vmlal.u32 $D0,$H0#lo,${R0}[0]
+ vmlal.u32 $D4,$H4#lo,${R0}[0]
+ vmlal.u32 $D1,$H1#lo,${R0}[0]
+ vmlal.u32 $D2,$H2#lo,${R0}[0]
+ vld1.32 ${S4}[0],[$tbl0,:32]
+
+ vmlal.u32 $D3,$H2#lo,${R1}[0]
+ vmlal.u32 $D0,$H4#lo,${S1}[0]
+ vmlal.u32 $D4,$H3#lo,${R1}[0]
+ vmlal.u32 $D1,$H0#lo,${R1}[0]
+ vmlal.u32 $D2,$H1#lo,${R1}[0]
+
+ vmlal.u32 $D3,$H1#lo,${R2}[0]
+ vmlal.u32 $D0,$H3#lo,${S2}[0]
+ vmlal.u32 $D4,$H2#lo,${R2}[0]
+ vmlal.u32 $D1,$H4#lo,${S2}[0]
+ vmlal.u32 $D2,$H0#lo,${R2}[0]
+
+ vmlal.u32 $D3,$H0#lo,${R3}[0]
+ vmlal.u32 $D0,$H2#lo,${S3}[0]
+ vmlal.u32 $D4,$H1#lo,${R3}[0]
+ vmlal.u32 $D1,$H3#lo,${S3}[0]
+ vmlal.u32 $D3,$H4#lo,${S4}[0]
+
+ vmlal.u32 $D2,$H4#lo,${S3}[0]
+ vmlal.u32 $D0,$H1#lo,${S4}[0]
+ vmlal.u32 $D4,$H0#lo,${R4}[0]
+ vmov.i32 $H4,#1<<24 @ padbit, yes, always
+ vmlal.u32 $D1,$H2#lo,${S4}[0]
+ vmlal.u32 $D2,$H3#lo,${S4}[0]
+
+ vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1]
+ add $inp,$inp,#64
+# ifdef __ARMEB__
+ vrev32.8 $H0,$H0
+ vrev32.8 $H1,$H1
+ vrev32.8 $H2,$H2
+ vrev32.8 $H3,$H3
+# endif
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ lazy reduction interleaved with base 2^32 -> base 2^26
+
+ vshr.u64 $T0,$D3,#26
+ vmovn.i64 $D3#lo,$D3
+ vshr.u64 $T1,$D0,#26
+ vmovn.i64 $D0#lo,$D0
+ vadd.i64 $D4,$D4,$T0 @ h3 -> h4
+ vbic.i32 $D3#lo,#0xfc000000
+ vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
+ vadd.i64 $D1,$D1,$T1 @ h0 -> h1
+ vshl.u32 $H3,$H3,#18
+ vbic.i32 $D0#lo,#0xfc000000
+
+ vshrn.u64 $T0#lo,$D4,#26
+ vmovn.i64 $D4#lo,$D4
+ vshr.u64 $T1,$D1,#26
+ vmovn.i64 $D1#lo,$D1
+ vadd.i64 $D2,$D2,$T1 @ h1 -> h2
+ vsri.u32 $H3,$H2,#14
+ vbic.i32 $D4#lo,#0xfc000000
+ vshl.u32 $H2,$H2,#12
+ vbic.i32 $D1#lo,#0xfc000000
+
+ vadd.i32 $D0#lo,$D0#lo,$T0#lo
+ vshl.u32 $T0#lo,$T0#lo,#2
+ vbic.i32 $H3,#0xfc000000
+ vshrn.u64 $T1#lo,$D2,#26
+ vmovn.i64 $D2#lo,$D2
+ vadd.i32 $D0#lo,$D0#lo,$T0#lo @ h4 -> h0
+ vsri.u32 $H2,$H1,#20
+ vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3
+ vshl.u32 $H1,$H1,#6
+ vbic.i32 $D2#lo,#0xfc000000
+ vbic.i32 $H2,#0xfc000000
+
+ vshr.u32 $T0#lo,$D0#lo,#26
+ vbic.i32 $D0#lo,#0xfc000000
+ vsri.u32 $H1,$H0,#26
+ vbic.i32 $H0,#0xfc000000
+ vshr.u32 $T1#lo,$D3#lo,#26
+ vbic.i32 $D3#lo,#0xfc000000
+ vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1
+ vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4
+ vbic.i32 $H1,#0xfc000000
+
+ bhi .Loop_neon
+
+.Lskip_loop:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
+
+ add $tbl1,$ctx,#(48+0*9*4)
+ add $tbl0,$ctx,#(48+1*9*4)
+ adds $len,$len,#32
+# ifdef __thumb2__
+ it ne
+# endif
+ movne $len,#0
+ bne .Long_tail
+
+ vadd.i32 $H2#hi,$H2#lo,$D2#lo @ add hash value and move to #hi
+ vadd.i32 $H0#hi,$H0#lo,$D0#lo
+ vadd.i32 $H3#hi,$H3#lo,$D3#lo
+ vadd.i32 $H1#hi,$H1#lo,$D1#lo
+ vadd.i32 $H4#hi,$H4#lo,$D4#lo
+
+.Long_tail:
+ vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^1
+ vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^2
+
+ vadd.i32 $H2#lo,$H2#lo,$D2#lo @ can be redundant
+ vmull.u32 $D2,$H2#hi,$R0
+ vadd.i32 $H0#lo,$H0#lo,$D0#lo
+ vmull.u32 $D0,$H0#hi,$R0
+ vadd.i32 $H3#lo,$H3#lo,$D3#lo
+ vmull.u32 $D3,$H3#hi,$R0
+ vadd.i32 $H1#lo,$H1#lo,$D1#lo
+ vmull.u32 $D1,$H1#hi,$R0
+ vadd.i32 $H4#lo,$H4#lo,$D4#lo
+ vmull.u32 $D4,$H4#hi,$R0
+
+ vmlal.u32 $D0,$H4#hi,$S1
+ vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
+ vmlal.u32 $D3,$H2#hi,$R1
+ vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
+ vmlal.u32 $D1,$H0#hi,$R1
+ vmlal.u32 $D4,$H3#hi,$R1
+ vmlal.u32 $D2,$H1#hi,$R1
+
+ vmlal.u32 $D3,$H1#hi,$R2
+ vld1.32 ${S4}[1],[$tbl1,:32]
+ vmlal.u32 $D0,$H3#hi,$S2
+ vld1.32 ${S4}[0],[$tbl0,:32]
+ vmlal.u32 $D4,$H2#hi,$R2
+ vmlal.u32 $D1,$H4#hi,$S2
+ vmlal.u32 $D2,$H0#hi,$R2
+
+ vmlal.u32 $D3,$H0#hi,$R3
+# ifdef __thumb2__
+ it ne
+# endif
+ addne $tbl1,$ctx,#(48+2*9*4)
+ vmlal.u32 $D0,$H2#hi,$S3
+# ifdef __thumb2__
+ it ne
+# endif
+ addne $tbl0,$ctx,#(48+3*9*4)
+ vmlal.u32 $D4,$H1#hi,$R3
+ vmlal.u32 $D1,$H3#hi,$S3
+ vmlal.u32 $D2,$H4#hi,$S3
+
+ vmlal.u32 $D3,$H4#hi,$S4
+ vorn $MASK,$MASK,$MASK @ all-ones, can be redundant
+ vmlal.u32 $D0,$H1#hi,$S4
+ vshr.u64 $MASK,$MASK,#38
+ vmlal.u32 $D4,$H0#hi,$R4
+ vmlal.u32 $D1,$H2#hi,$S4
+ vmlal.u32 $D2,$H3#hi,$S4
+
+ beq .Lshort_tail
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ (hash+inp[0:1])*r^4:r^3 and accumulate
+
+ vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^3
+ vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4
+
+ vmlal.u32 $D2,$H2#lo,$R0
+ vmlal.u32 $D0,$H0#lo,$R0
+ vmlal.u32 $D3,$H3#lo,$R0
+ vmlal.u32 $D1,$H1#lo,$R0
+ vmlal.u32 $D4,$H4#lo,$R0
+
+ vmlal.u32 $D0,$H4#lo,$S1
+ vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
+ vmlal.u32 $D3,$H2#lo,$R1
+ vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
+ vmlal.u32 $D1,$H0#lo,$R1
+ vmlal.u32 $D4,$H3#lo,$R1
+ vmlal.u32 $D2,$H1#lo,$R1
+
+ vmlal.u32 $D3,$H1#lo,$R2
+ vld1.32 ${S4}[1],[$tbl1,:32]
+ vmlal.u32 $D0,$H3#lo,$S2
+ vld1.32 ${S4}[0],[$tbl0,:32]
+ vmlal.u32 $D4,$H2#lo,$R2
+ vmlal.u32 $D1,$H4#lo,$S2
+ vmlal.u32 $D2,$H0#lo,$R2
+
+ vmlal.u32 $D3,$H0#lo,$R3
+ vmlal.u32 $D0,$H2#lo,$S3
+ vmlal.u32 $D4,$H1#lo,$R3
+ vmlal.u32 $D1,$H3#lo,$S3
+ vmlal.u32 $D2,$H4#lo,$S3
+
+ vmlal.u32 $D3,$H4#lo,$S4
+ vorn $MASK,$MASK,$MASK @ all-ones
+ vmlal.u32 $D0,$H1#lo,$S4
+ vshr.u64 $MASK,$MASK,#38
+ vmlal.u32 $D4,$H0#lo,$R4
+ vmlal.u32 $D1,$H2#lo,$S4
+ vmlal.u32 $D2,$H3#lo,$S4
+
+.Lshort_tail:
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ lazy reduction, but without narrowing
+
+ vshr.u64 $T0,$D3,#26
+ vand.i64 $D3,$D3,$MASK
+ vshr.u64 $T1,$D0,#26
+ vand.i64 $D0,$D0,$MASK
+ vadd.i64 $D4,$D4,$T0 @ h3 -> h4
+ vadd.i64 $D1,$D1,$T1 @ h0 -> h1
+
+ vshr.u64 $T0,$D4,#26
+ vand.i64 $D4,$D4,$MASK
+ vshr.u64 $T1,$D1,#26
+ vand.i64 $D1,$D1,$MASK
+ vadd.i64 $D2,$D2,$T1 @ h1 -> h2
+
+ vadd.i64 $D0,$D0,$T0
+ vshl.u64 $T0,$T0,#2
+ vshr.u64 $T1,$D2,#26
+ vand.i64 $D2,$D2,$MASK
+ vadd.i64 $D0,$D0,$T0 @ h4 -> h0
+ vadd.i64 $D3,$D3,$T1 @ h2 -> h3
+
+ vshr.u64 $T0,$D0,#26
+ vand.i64 $D0,$D0,$MASK
+ vshr.u64 $T1,$D3,#26
+ vand.i64 $D3,$D3,$MASK
+ vadd.i64 $D1,$D1,$T0 @ h0 -> h1
+ vadd.i64 $D4,$D4,$T1 @ h3 -> h4
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ horizontal addition
+
+ vadd.i64 $D2#lo,$D2#lo,$D2#hi
+ vadd.i64 $D0#lo,$D0#lo,$D0#hi
+ vadd.i64 $D3#lo,$D3#lo,$D3#hi
+ vadd.i64 $D1#lo,$D1#lo,$D1#hi
+ vadd.i64 $D4#lo,$D4#lo,$D4#hi
+
+ cmp $len,#0
+ bne .Leven
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ store hash value
+
+ vst4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
+ vst1.32 {$D4#lo[0]},[$ctx]
+
+ vldmia sp!,{d8-d15} @ epilogue
+ ldmia sp!,{r4-r7}
+.Lno_data_neon:
+ ret @ bx lr
+.size poly1305_blocks_neon,.-poly1305_blocks_neon
+
+.type poly1305_emit_neon,%function
+.align 5
+poly1305_emit_neon:
+ ldr ip,[$ctx,#36] @ is_base2_26
+
+ stmdb sp!,{r4-r11}
+
+ tst ip,ip
+ beq .Lpoly1305_emit_enter
+
+ ldmia $ctx,{$h0-$h4}
+ eor $g0,$g0,$g0
+
+ adds $h0,$h0,$h1,lsl#26 @ base 2^26 -> base 2^32
+ mov $h1,$h1,lsr#6
+ adcs $h1,$h1,$h2,lsl#20
+ mov $h2,$h2,lsr#12
+ adcs $h2,$h2,$h3,lsl#14
+ mov $h3,$h3,lsr#18
+ adcs $h3,$h3,$h4,lsl#8
+ adc $h4,$g0,$h4,lsr#24 @ can be partially reduced ...
+
+ and $g0,$h4,#-4 @ ... so reduce
+ and $h4,$h3,#3
+ add $g0,$g0,$g0,lsr#2 @ *= 5
+ adds $h0,$h0,$g0
+ adcs $h1,$h1,#0
+ adcs $h2,$h2,#0
+ adc $h3,$h3,#0
+
+ adds $g0,$h0,#5 @ compare to modulus
+ adcs $g1,$h1,#0
+ adcs $g2,$h2,#0
+ adcs $g3,$h3,#0
+ adc $g4,$h4,#0
+ tst $g4,#4 @ did it carry/borrow?
+
+# ifdef __thumb2__
+ it ne
+# endif
+ movne $h0,$g0
+ ldr $g0,[$nonce,#0]
+# ifdef __thumb2__
+ it ne
+# endif
+ movne $h1,$g1
+ ldr $g1,[$nonce,#4]
+# ifdef __thumb2__
+ it ne
+# endif
+ movne $h2,$g2
+ ldr $g2,[$nonce,#8]
+# ifdef __thumb2__
+ it ne
+# endif
+ movne $h3,$g3
+ ldr $g3,[$nonce,#12]
+
+ adds $h0,$h0,$g0 @ accumulate nonce
+ adcs $h1,$h1,$g1
+ adcs $h2,$h2,$g2
+ adc $h3,$h3,$g3
+
+# ifdef __ARMEB__
+ rev $h0,$h0
+ rev $h1,$h1
+ rev $h2,$h2
+ rev $h3,$h3
+# endif
+ str $h0,[$mac,#0] @ store the result
+ str $h1,[$mac,#4]
+ str $h2,[$mac,#8]
+ str $h3,[$mac,#12]
+
+ ldmia sp!,{r4-r11}
+ ret @ bx lr
+.size poly1305_emit_neon,.-poly1305_emit_neon
+
+.align 5
+.Lzeros:
+.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-.Lpoly1305_init
+#endif
+___
+} }
+$code.=<<___;
+.asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+#if __ARM_MAX_ARCH__>=7
+.comm OPENSSL_armcap_P,4,4
+#endif
+___
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
+ s/\bret\b/bx lr/go or
+ s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
+
+ print $_,"\n";
+}
+close STDOUT; # enforce flush
diff --git a/crypto/poly1305/asm/poly1305-armv8.pl b/crypto/poly1305/asm/poly1305-armv8.pl
new file mode 100755
index 00000000..79185d2b
--- /dev/null
+++ b/crypto/poly1305/asm/poly1305-armv8.pl
@@ -0,0 +1,925 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements Poly1305 hash for ARMv8.
+#
+# June 2015
+#
+# Numbers are cycles per processed byte with poly1305_blocks alone.
+#
+# IALU/gcc-4.9 NEON
+#
+# Apple A7 1.86/+5% 0.72
+# Cortex-A53 2.63/+58% 1.47
+# Cortex-A57 2.70/+7% 1.14
+# Denver 1.39/+50% 1.18(*)
+# X-Gene 2.00/+68% 2.19
+#
+# (*) estimate based on resources availability is less than 1.0,
+# i.e. measured result is worse than expected, presumably binary
+# translator is not almighty;
+
+$flavour=shift;
+$output=shift;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+die "can't locate arm-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3));
+my ($mac,$nonce)=($inp,$len);
+
+my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14));
+
+$code.=<<___;
+#include "arm_arch.h"
+
+.text
+
+// forward "declarations" are required for Apple
+.extern OPENSSL_armcap_P
+.globl poly1305_blocks
+.globl poly1305_emit
+
+.globl poly1305_init
+.type poly1305_init,%function
+.align 5
+poly1305_init:
+ cmp $inp,xzr
+ stp xzr,xzr,[$ctx] // zero hash value
+ stp xzr,xzr,[$ctx,#16] // [along with is_base2_26]
+
+ csel x0,xzr,x0,eq
+ b.eq .Lno_key
+
+#ifdef __ILP32__
+ ldrsw $t1,.LOPENSSL_armcap_P
+#else
+ ldr $t1,.LOPENSSL_armcap_P
+#endif
+ adr $t0,.LOPENSSL_armcap_P
+
+ ldp $r0,$r1,[$inp] // load key
+ mov $s1,#0xfffffffc0fffffff
+ movk $s1,#0x0fff,lsl#48
+ ldr w17,[$t0,$t1]
+#ifdef __ARMEB__
+ rev $r0,$r0 // flip bytes
+ rev $r1,$r1
+#endif
+ and $r0,$r0,$s1 // &=0ffffffc0fffffff
+ and $s1,$s1,#-4
+ and $r1,$r1,$s1 // &=0ffffffc0ffffffc
+ stp $r0,$r1,[$ctx,#32] // save key value
+
+ tst w17,#ARMV7_NEON
+
+ adr $d0,poly1305_blocks
+ adr $r0,poly1305_blocks_neon
+ adr $d1,poly1305_emit
+ adr $r1,poly1305_emit_neon
+
+ csel $d0,$d0,$r0,eq
+ csel $d1,$d1,$r1,eq
+
+ stp $d0,$d1,[$len]
+
+ mov x0,#1
+.Lno_key:
+ ret
+.size poly1305_init,.-poly1305_init
+
+.type poly1305_blocks,%function
+.align 5
+poly1305_blocks:
+ ands $len,$len,#-16
+ b.eq .Lno_data
+
+ ldp $h0,$h1,[$ctx] // load hash value
+ ldp $r0,$r1,[$ctx,#32] // load key value
+ ldr $h2,[$ctx,#16]
+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
+ b .Loop
+
+.align 5
+.Loop:
+ ldp $t0,$t1,[$inp],#16 // load input
+ sub $len,$len,#16
+#ifdef __ARMEB__
+ rev $t0,$t0
+ rev $t1,$t1
+#endif
+ adds $h0,$h0,$t0 // accumulate input
+ adcs $h1,$h1,$t1
+
+ mul $d0,$h0,$r0 // h0*r0
+ adc $h2,$h2,$padbit
+ umulh $d1,$h0,$r0
+
+ mul $t0,$h1,$s1 // h1*5*r1
+ umulh $t1,$h1,$s1
+
+ adds $d0,$d0,$t0
+ mul $t0,$h0,$r1 // h0*r1
+ adc $d1,$d1,$t1
+ umulh $d2,$h0,$r1
+
+ adds $d1,$d1,$t0
+ mul $t0,$h1,$r0 // h1*r0
+ adc $d2,$d2,xzr
+ umulh $t1,$h1,$r0
+
+ adds $d1,$d1,$t0
+ mul $t0,$h2,$s1 // h2*5*r1
+ adc $d2,$d2,$t1
+ mul $t1,$h2,$r0 // h2*r0
+
+ adds $d1,$d1,$t0
+ adc $d2,$d2,$t1
+
+ and $t0,$d2,#-4 // final reduction
+ and $h2,$d2,#3
+ add $t0,$t0,$d2,lsr#2
+ adds $h0,$d0,$t0
+ adc $h1,$d1,xzr
+
+ cbnz $len,.Loop
+
+ stp $h0,$h1,[$ctx] // store hash value
+ str $h2,[$ctx,#16]
+
+.Lno_data:
+ ret
+.size poly1305_blocks,.-poly1305_blocks
+
+.type poly1305_emit,%function
+.align 5
+poly1305_emit:
+ ldp $h0,$h1,[$ctx] // load hash base 2^64
+ ldr $h2,[$ctx,#16]
+ ldp $t0,$t1,[$nonce] // load nonce
+
+ adds $d0,$h0,#5 // compare to modulus
+ adcs $d1,$h1,xzr
+ adc $d2,$h2,xzr
+
+ tst $d2,#-4 // see if it's carried/borrowed
+
+ csel $h0,$h0,$d0,eq
+ csel $h1,$h1,$d1,eq
+
+#ifdef __ARMEB__
+ ror $t0,$t0,#32 // flip nonce words
+ ror $t1,$t1,#32
+#endif
+ adds $h0,$h0,$t0 // accumulate nonce
+ adc $h1,$h1,$t1
+#ifdef __ARMEB__
+ rev $h0,$h0 // flip output bytes
+ rev $h1,$h1
+#endif
+ stp $h0,$h1,[$mac] // write result
+
+ ret
+.size poly1305_emit,.-poly1305_emit
+___
+my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8));
+my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13));
+my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18));
+my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23));
+my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28));
+my ($T0,$T1,$MASK) = map("v$_",(29..31));
+
+my ($in2,$zeros)=("x16","x17");
+my $is_base2_26 = $zeros; # borrow
+
+$code.=<<___;
+.type poly1305_mult,%function
+.align 5
+poly1305_mult:
+ mul $d0,$h0,$r0 // h0*r0
+ umulh $d1,$h0,$r0
+
+ mul $t0,$h1,$s1 // h1*5*r1
+ umulh $t1,$h1,$s1
+
+ adds $d0,$d0,$t0
+ mul $t0,$h0,$r1 // h0*r1
+ adc $d1,$d1,$t1
+ umulh $d2,$h0,$r1
+
+ adds $d1,$d1,$t0
+ mul $t0,$h1,$r0 // h1*r0
+ adc $d2,$d2,xzr
+ umulh $t1,$h1,$r0
+
+ adds $d1,$d1,$t0
+ mul $t0,$h2,$s1 // h2*5*r1
+ adc $d2,$d2,$t1
+ mul $t1,$h2,$r0 // h2*r0
+
+ adds $d1,$d1,$t0
+ adc $d2,$d2,$t1
+
+ and $t0,$d2,#-4 // final reduction
+ and $h2,$d2,#3
+ add $t0,$t0,$d2,lsr#2
+ adds $h0,$d0,$t0
+ adc $h1,$d1,xzr
+
+ ret
+.size poly1305_mult,.-poly1305_mult
+
+.type poly1305_splat,%function
+.align 5
+poly1305_splat:
+ and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26
+ ubfx x13,$h0,#26,#26
+ extr x14,$h1,$h0,#52
+ and x14,x14,#0x03ffffff
+ ubfx x15,$h1,#14,#26
+ extr x16,$h2,$h1,#40
+
+ str w12,[$ctx,#16*0] // r0
+ add w12,w13,w13,lsl#2 // r1*5
+ str w13,[$ctx,#16*1] // r1
+ add w13,w14,w14,lsl#2 // r2*5
+ str w12,[$ctx,#16*2] // s1
+ str w14,[$ctx,#16*3] // r2
+ add w14,w15,w15,lsl#2 // r3*5
+ str w13,[$ctx,#16*4] // s2
+ str w15,[$ctx,#16*5] // r3
+ add w15,w16,w16,lsl#2 // r4*5
+ str w14,[$ctx,#16*6] // s3
+ str w16,[$ctx,#16*7] // r4
+ str w15,[$ctx,#16*8] // s4
+
+ ret
+.size poly1305_splat,.-poly1305_splat
+
+.type poly1305_blocks_neon,%function
+.align 5
+poly1305_blocks_neon:
+ ldr $is_base2_26,[$ctx,#24]
+ cmp $len,#128
+ b.hs .Lblocks_neon
+ cbz $is_base2_26,poly1305_blocks
+
+.Lblocks_neon:
+ stp x29,x30,[sp,#-80]!
+ add x29,sp,#0
+
+ ands $len,$len,#-16
+ b.eq .Lno_data_neon
+
+ cbz $is_base2_26,.Lbase2_64_neon
+
+ ldp w10,w11,[$ctx] // load hash value base 2^26
+ ldp w12,w13,[$ctx,#8]
+ ldr w14,[$ctx,#16]
+
+ tst $len,#31
+ b.eq .Leven_neon
+
+ ldp $r0,$r1,[$ctx,#32] // load key value
+
+ add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
+ lsr $h1,x12,#12
+ adds $h0,$h0,x12,lsl#52
+ add $h1,$h1,x13,lsl#14
+ adc $h1,$h1,xzr
+ lsr $h2,x14,#24
+ adds $h1,$h1,x14,lsl#40
+ adc $d2,$h2,xzr // can be partially reduced...
+
+ ldp $d0,$d1,[$inp],#16 // load input
+ sub $len,$len,#16
+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
+
+ and $t0,$d2,#-4 // ... so reduce
+ and $h2,$d2,#3
+ add $t0,$t0,$d2,lsr#2
+ adds $h0,$h0,$t0
+ adc $h1,$h1,xzr
+
+#ifdef __ARMEB__
+ rev $d0,$d0
+ rev $d1,$d1
+#endif
+ adds $h0,$h0,$d0 // accumulate input
+ adcs $h1,$h1,$d1
+ adc $h2,$h2,$padbit
+
+ bl poly1305_mult
+ ldr x30,[sp,#8]
+
+ cbz $padbit,.Lstore_base2_64_neon
+
+ and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
+ ubfx x11,$h0,#26,#26
+ extr x12,$h1,$h0,#52
+ and x12,x12,#0x03ffffff
+ ubfx x13,$h1,#14,#26
+ extr x14,$h2,$h1,#40
+
+ cbnz $len,.Leven_neon
+
+ stp w10,w11,[$ctx] // store hash value base 2^26
+ stp w12,w13,[$ctx,#8]
+ str w14,[$ctx,#16]
+ b .Lno_data_neon
+
+.align 4
+.Lstore_base2_64_neon:
+ stp $h0,$h1,[$ctx] // store hash value base 2^64
+ stp $h2,xzr,[$ctx,#16] // note that is_base2_26 is zeroed
+ b .Lno_data_neon
+
+.align 4
+.Lbase2_64_neon:
+ ldp $r0,$r1,[$ctx,#32] // load key value
+
+ ldp $h0,$h1,[$ctx] // load hash value base 2^64
+ ldr $h2,[$ctx,#16]
+
+ tst $len,#31
+ b.eq .Linit_neon
+
+ ldp $d0,$d1,[$inp],#16 // load input
+ sub $len,$len,#16
+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
+#ifdef __ARMEB__
+ rev $d0,$d0
+ rev $d1,$d1
+#endif
+ adds $h0,$h0,$d0 // accumulate input
+ adcs $h1,$h1,$d1
+ adc $h2,$h2,$padbit
+
+ bl poly1305_mult
+
+.Linit_neon:
+ and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
+ ubfx x11,$h0,#26,#26
+ extr x12,$h1,$h0,#52
+ and x12,x12,#0x03ffffff
+ ubfx x13,$h1,#14,#26
+ extr x14,$h2,$h1,#40
+
+ stp d8,d9,[sp,#16] // meet ABI requirements
+ stp d10,d11,[sp,#32]
+ stp d12,d13,[sp,#48]
+ stp d14,d15,[sp,#64]
+
+ fmov ${H0},x10
+ fmov ${H1},x11
+ fmov ${H2},x12
+ fmov ${H3},x13
+ fmov ${H4},x14
+
+ ////////////////////////////////// initialize r^n table
+ mov $h0,$r0 // r^1
+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
+ mov $h1,$r1
+ mov $h2,xzr
+ add $ctx,$ctx,#48+12
+ bl poly1305_splat
+
+ bl poly1305_mult // r^2
+ sub $ctx,$ctx,#4
+ bl poly1305_splat
+
+ bl poly1305_mult // r^3
+ sub $ctx,$ctx,#4
+ bl poly1305_splat
+
+ bl poly1305_mult // r^4
+ sub $ctx,$ctx,#4
+ bl poly1305_splat
+ ldr x30,[sp,#8]
+
+ add $in2,$inp,#32
+ adr $zeros,.Lzeros
+ subs $len,$len,#64
+ csel $in2,$zeros,$in2,lo
+
+ mov x4,#1
+ str x4,[$ctx,#-24] // set is_base2_26
+ sub $ctx,$ctx,#48 // restore original $ctx
+ b .Ldo_neon
+
+.align 4
+.Leven_neon:
+ add $in2,$inp,#32
+ adr $zeros,.Lzeros
+ subs $len,$len,#64
+ csel $in2,$zeros,$in2,lo
+
+ stp d8,d9,[sp,#16] // meet ABI requirements
+ stp d10,d11,[sp,#32]
+ stp d12,d13,[sp,#48]
+ stp d14,d15,[sp,#64]
+
+ fmov ${H0},x10
+ fmov ${H1},x11
+ fmov ${H2},x12
+ fmov ${H3},x13
+ fmov ${H4},x14
+
+.Ldo_neon:
+ ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
+ ldp x9,x13,[$in2],#48
+
+ lsl $padbit,$padbit,#24
+ add x15,$ctx,#48
+
+#ifdef __ARMEB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ and x5,x9,#0x03ffffff
+ ubfx x6,x8,#26,#26
+ ubfx x7,x9,#26,#26
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+ extr x8,x12,x8,#52
+ extr x9,x13,x9,#52
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ fmov $IN23_0,x4
+ and x8,x8,#0x03ffffff
+ and x9,x9,#0x03ffffff
+ ubfx x10,x12,#14,#26
+ ubfx x11,x13,#14,#26
+ add x12,$padbit,x12,lsr#40
+ add x13,$padbit,x13,lsr#40
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ fmov $IN23_1,x6
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ fmov $IN23_2,x8
+ fmov $IN23_3,x10
+ fmov $IN23_4,x12
+
+ ldp x8,x12,[$inp],#16 // inp[0:1]
+ ldp x9,x13,[$inp],#48
+
+ ld1 {$R0,$R1,$S1,$R2},[x15],#64
+ ld1 {$S2,$R3,$S3,$R4},[x15],#64
+ ld1 {$S4},[x15]
+
+#ifdef __ARMEB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ and x5,x9,#0x03ffffff
+ ubfx x6,x8,#26,#26
+ ubfx x7,x9,#26,#26
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+ extr x8,x12,x8,#52
+ extr x9,x13,x9,#52
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ fmov $IN01_0,x4
+ and x8,x8,#0x03ffffff
+ and x9,x9,#0x03ffffff
+ ubfx x10,x12,#14,#26
+ ubfx x11,x13,#14,#26
+ add x12,$padbit,x12,lsr#40
+ add x13,$padbit,x13,lsr#40
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ fmov $IN01_1,x6
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ fmov $IN01_2,x8
+ fmov $IN01_3,x10
+ fmov $IN01_4,x12
+
+ b.ls .Lskip_loop
+
+.align 4
+.Loop_neon:
+ ////////////////////////////////////////////////////////////////
+ // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
+ // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
+ // \___________________/
+ // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
+ // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
+ // \___________________/ \____________________/
+ //
+ // Note that we start with inp[2:3]*r^2. This is because it
+ // doesn't depend on reduction in previous iteration.
+ ////////////////////////////////////////////////////////////////
+ // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
+ // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
+ // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
+ // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
+ // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
+
+ subs $len,$len,#64
+ umull $ACC4,$IN23_0,${R4}[2]
+ csel $in2,$zeros,$in2,lo
+ umull $ACC3,$IN23_0,${R3}[2]
+ umull $ACC2,$IN23_0,${R2}[2]
+ ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
+ umull $ACC1,$IN23_0,${R1}[2]
+ ldp x9,x13,[$in2],#48
+ umull $ACC0,$IN23_0,${R0}[2]
+#ifdef __ARMEB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+
+ umlal $ACC4,$IN23_1,${R3}[2]
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ umlal $ACC3,$IN23_1,${R2}[2]
+ and x5,x9,#0x03ffffff
+ umlal $ACC2,$IN23_1,${R1}[2]
+ ubfx x6,x8,#26,#26
+ umlal $ACC1,$IN23_1,${R0}[2]
+ ubfx x7,x9,#26,#26
+ umlal $ACC0,$IN23_1,${S4}[2]
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+
+ umlal $ACC4,$IN23_2,${R2}[2]
+ extr x8,x12,x8,#52
+ umlal $ACC3,$IN23_2,${R1}[2]
+ extr x9,x13,x9,#52
+ umlal $ACC2,$IN23_2,${R0}[2]
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ umlal $ACC1,$IN23_2,${S4}[2]
+ fmov $IN23_0,x4
+ umlal $ACC0,$IN23_2,${S3}[2]
+ and x8,x8,#0x03ffffff
+
+ umlal $ACC4,$IN23_3,${R1}[2]
+ and x9,x9,#0x03ffffff
+ umlal $ACC3,$IN23_3,${R0}[2]
+ ubfx x10,x12,#14,#26
+ umlal $ACC2,$IN23_3,${S4}[2]
+ ubfx x11,x13,#14,#26
+ umlal $ACC1,$IN23_3,${S3}[2]
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ umlal $ACC0,$IN23_3,${S2}[2]
+ fmov $IN23_1,x6
+
+ add $IN01_2,$IN01_2,$H2
+ add x12,$padbit,x12,lsr#40
+ umlal $ACC4,$IN23_4,${R0}[2]
+ add x13,$padbit,x13,lsr#40
+ umlal $ACC3,$IN23_4,${S4}[2]
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ umlal $ACC2,$IN23_4,${S3}[2]
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ umlal $ACC1,$IN23_4,${S2}[2]
+ fmov $IN23_2,x8
+ umlal $ACC0,$IN23_4,${S1}[2]
+ fmov $IN23_3,x10
+
+ ////////////////////////////////////////////////////////////////
+ // (hash+inp[0:1])*r^4 and accumulate
+
+ add $IN01_0,$IN01_0,$H0
+ fmov $IN23_4,x12
+ umlal $ACC3,$IN01_2,${R1}[0]
+ ldp x8,x12,[$inp],#16 // inp[0:1]
+ umlal $ACC0,$IN01_2,${S3}[0]
+ ldp x9,x13,[$inp],#48
+ umlal $ACC4,$IN01_2,${R2}[0]
+ umlal $ACC1,$IN01_2,${S4}[0]
+ umlal $ACC2,$IN01_2,${R0}[0]
+#ifdef __ARMEB__
+ rev x8,x8
+ rev x12,x12
+ rev x9,x9
+ rev x13,x13
+#endif
+
+ add $IN01_1,$IN01_1,$H1
+ umlal $ACC3,$IN01_0,${R3}[0]
+ umlal $ACC4,$IN01_0,${R4}[0]
+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
+ umlal $ACC2,$IN01_0,${R2}[0]
+ and x5,x9,#0x03ffffff
+ umlal $ACC0,$IN01_0,${R0}[0]
+ ubfx x6,x8,#26,#26
+ umlal $ACC1,$IN01_0,${R1}[0]
+ ubfx x7,x9,#26,#26
+
+ add $IN01_3,$IN01_3,$H3
+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
+ umlal $ACC3,$IN01_1,${R2}[0]
+ extr x8,x12,x8,#52
+ umlal $ACC4,$IN01_1,${R3}[0]
+ extr x9,x13,x9,#52
+ umlal $ACC0,$IN01_1,${S4}[0]
+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
+ umlal $ACC2,$IN01_1,${R1}[0]
+ fmov $IN01_0,x4
+ umlal $ACC1,$IN01_1,${R0}[0]
+ and x8,x8,#0x03ffffff
+
+ add $IN01_4,$IN01_4,$H4
+ and x9,x9,#0x03ffffff
+ umlal $ACC3,$IN01_3,${R0}[0]
+ ubfx x10,x12,#14,#26
+ umlal $ACC0,$IN01_3,${S2}[0]
+ ubfx x11,x13,#14,#26
+ umlal $ACC4,$IN01_3,${R1}[0]
+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
+ umlal $ACC1,$IN01_3,${S3}[0]
+ fmov $IN01_1,x6
+ umlal $ACC2,$IN01_3,${S4}[0]
+ add x12,$padbit,x12,lsr#40
+
+ umlal $ACC3,$IN01_4,${S4}[0]
+ add x13,$padbit,x13,lsr#40
+ umlal $ACC0,$IN01_4,${S1}[0]
+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
+ umlal $ACC4,$IN01_4,${R0}[0]
+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
+ umlal $ACC1,$IN01_4,${S2}[0]
+ fmov $IN01_2,x8
+ umlal $ACC2,$IN01_4,${S3}[0]
+ fmov $IN01_3,x10
+
+ /////////////////////////////////////////////////////////////////
+ // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
+ // and P. Schwabe
+
+ ushr $T0.2d,$ACC3,#26
+ fmov $IN01_4,x12
+ xtn $H3,$ACC3
+ ushr $T1.2d,$ACC0,#26
+ xtn $H0,$ACC0
+ add $ACC4,$ACC4,$T0.2d // h3 -> h4
+ bic $H3,#0xfc,lsl#24 // &=0x03ffffff
+ add $ACC1,$ACC1,$T1.2d // h0 -> h1
+ bic $H0,#0xfc,lsl#24
+
+ shrn $T0.2s,$ACC4,#26
+ xtn $H4,$ACC4
+ ushr $T1.2d,$ACC1,#26
+ xtn $H1,$ACC1
+ add $ACC2,$ACC2,$T1.2d // h1 -> h2
+ bic $H4,#0xfc,lsl#24
+ bic $H1,#0xfc,lsl#24
+
+ add $H0,$H0,$T0.2s
+ shl $T0.2s,$T0.2s,#2
+ shrn $T1.2s,$ACC2,#26
+ xtn $H2,$ACC2
+ add $H0,$H0,$T0.2s // h4 -> h0
+ add $H3,$H3,$T1.2s // h2 -> h3
+ bic $H2,#0xfc,lsl#24
+
+ ushr $T0.2s,$H0,#26
+ bic $H0,#0xfc,lsl#24
+ ushr $T1.2s,$H3,#26
+ bic $H3,#0xfc,lsl#24
+ add $H1,$H1,$T0.2s // h0 -> h1
+ add $H4,$H4,$T1.2s // h3 -> h4
+
+ b.hi .Loop_neon
+
+.Lskip_loop:
+ dup $IN23_2,${IN23_2}[0]
+ movi $MASK.2d,#-1
+ add $IN01_2,$IN01_2,$H2
+ ushr $MASK.2d,$MASK.2d,#38
+
+ ////////////////////////////////////////////////////////////////
+ // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
+
+ adds $len,$len,#32
+ b.ne .Long_tail
+
+ dup $IN23_2,${IN01_2}[0]
+ add $IN23_0,$IN01_0,$H0
+ add $IN23_3,$IN01_3,$H3
+ add $IN23_1,$IN01_1,$H1
+ add $IN23_4,$IN01_4,$H4
+
+.Long_tail:
+ dup $IN23_0,${IN23_0}[0]
+ umull2 $ACC0,$IN23_2,${S3}
+ umull2 $ACC3,$IN23_2,${R1}
+ umull2 $ACC4,$IN23_2,${R2}
+ umull2 $ACC2,$IN23_2,${R0}
+ umull2 $ACC1,$IN23_2,${S4}
+
+ dup $IN23_1,${IN23_1}[0]
+ umlal2 $ACC0,$IN23_0,${R0}
+ umlal2 $ACC2,$IN23_0,${R2}
+ umlal2 $ACC3,$IN23_0,${R3}
+ umlal2 $ACC4,$IN23_0,${R4}
+ umlal2 $ACC1,$IN23_0,${R1}
+
+ dup $IN23_3,${IN23_3}[0]
+ umlal2 $ACC0,$IN23_1,${S4}
+ umlal2 $ACC3,$IN23_1,${R2}
+ umlal2 $ACC2,$IN23_1,${R1}
+ umlal2 $ACC4,$IN23_1,${R3}
+ umlal2 $ACC1,$IN23_1,${R0}
+
+ dup $IN23_4,${IN23_4}[0]
+ umlal2 $ACC3,$IN23_3,${R0}
+ umlal2 $ACC4,$IN23_3,${R1}
+ umlal2 $ACC0,$IN23_3,${S2}
+ umlal2 $ACC1,$IN23_3,${S3}
+ umlal2 $ACC2,$IN23_3,${S4}
+
+ umlal2 $ACC3,$IN23_4,${S4}
+ umlal2 $ACC0,$IN23_4,${S1}
+ umlal2 $ACC4,$IN23_4,${R0}
+ umlal2 $ACC1,$IN23_4,${S2}
+ umlal2 $ACC2,$IN23_4,${S3}
+
+ b.eq .Lshort_tail
+
+ ////////////////////////////////////////////////////////////////
+ // (hash+inp[0:1])*r^4:r^3 and accumulate
+
+ add $IN01_0,$IN01_0,$H0
+ umlal $ACC3,$IN01_2,${R1}
+ umlal $ACC0,$IN01_2,${S3}
+ umlal $ACC4,$IN01_2,${R2}
+ umlal $ACC1,$IN01_2,${S4}
+ umlal $ACC2,$IN01_2,${R0}
+
+ add $IN01_1,$IN01_1,$H1
+ umlal $ACC3,$IN01_0,${R3}
+ umlal $ACC0,$IN01_0,${R0}
+ umlal $ACC4,$IN01_0,${R4}
+ umlal $ACC1,$IN01_0,${R1}
+ umlal $ACC2,$IN01_0,${R2}
+
+ add $IN01_3,$IN01_3,$H3
+ umlal $ACC3,$IN01_1,${R2}
+ umlal $ACC0,$IN01_1,${S4}
+ umlal $ACC4,$IN01_1,${R3}
+ umlal $ACC1,$IN01_1,${R0}
+ umlal $ACC2,$IN01_1,${R1}
+
+ add $IN01_4,$IN01_4,$H4
+ umlal $ACC3,$IN01_3,${R0}
+ umlal $ACC0,$IN01_3,${S2}
+ umlal $ACC4,$IN01_3,${R1}
+ umlal $ACC1,$IN01_3,${S3}
+ umlal $ACC2,$IN01_3,${S4}
+
+ umlal $ACC3,$IN01_4,${S4}
+ umlal $ACC0,$IN01_4,${S1}
+ umlal $ACC4,$IN01_4,${R0}
+ umlal $ACC1,$IN01_4,${S2}
+ umlal $ACC2,$IN01_4,${S3}
+
+.Lshort_tail:
+ ////////////////////////////////////////////////////////////////
+ // lazy reduction, but without narrowing
+
+ ushr $T0.2d,$ACC3,#26
+ and $ACC3,$ACC3,$MASK.2d
+ ushr $T1.2d,$ACC0,#26
+ and $ACC0,$ACC0,$MASK.2d
+
+ add $ACC4,$ACC4,$T0.2d // h3 -> h4
+ add $ACC1,$ACC1,$T1.2d // h0 -> h1
+
+ ushr $T0.2d,$ACC4,#26
+ and $ACC4,$ACC4,$MASK.2d
+ ushr $T1.2d,$ACC1,#26
+ and $ACC1,$ACC1,$MASK.2d
+ add $ACC2,$ACC2,$T1.2d // h1 -> h2
+
+ add $ACC0,$ACC0,$T0.2d
+ shl $T0.2d,$T0.2d,#2
+ ushr $T1.2d,$ACC2,#26
+ and $ACC2,$ACC2,$MASK.2d
+ add $ACC0,$ACC0,$T0.2d // h4 -> h0
+ add $ACC3,$ACC3,$T1.2d // h2 -> h3
+
+ ushr $T0.2d,$ACC0,#26
+ and $ACC0,$ACC0,$MASK.2d
+ ushr $T1.2d,$ACC3,#26
+ and $ACC3,$ACC3,$MASK.2d
+ add $ACC1,$ACC1,$T0.2d // h0 -> h1
+ add $ACC4,$ACC4,$T1.2d // h3 -> h4
+
+ ////////////////////////////////////////////////////////////////
+ // horizontal add
+
+ addp $ACC2,$ACC2,$ACC2
+ ldp d8,d9,[sp,#16] // meet ABI requirements
+ addp $ACC0,$ACC0,$ACC0
+ ldp d10,d11,[sp,#32]
+ addp $ACC1,$ACC1,$ACC1
+ ldp d12,d13,[sp,#48]
+ addp $ACC3,$ACC3,$ACC3
+ ldp d14,d15,[sp,#64]
+ addp $ACC4,$ACC4,$ACC4
+
+ ////////////////////////////////////////////////////////////////
+ // write the result, can be partially reduced
+
+ st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16
+ st1 {$ACC4}[0],[$ctx]
+
+.Lno_data_neon:
+ ldr x29,[sp],#80
+ ret
+.size poly1305_blocks_neon,.-poly1305_blocks_neon
+
+.type poly1305_emit_neon,%function
+.align 5
+poly1305_emit_neon:
+ ldr $is_base2_26,[$ctx,#24]
+ cbz $is_base2_26,poly1305_emit
+
+ ldp w10,w11,[$ctx] // load hash value base 2^26
+ ldp w12,w13,[$ctx,#8]
+ ldr w14,[$ctx,#16]
+
+ add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
+ lsr $h1,x12,#12
+ adds $h0,$h0,x12,lsl#52
+ add $h1,$h1,x13,lsl#14
+ adc $h1,$h1,xzr
+ lsr $h2,x14,#24
+ adds $h1,$h1,x14,lsl#40
+ adc $h2,$h2,xzr // can be partially reduced...
+
+ ldp $t0,$t1,[$nonce] // load nonce
+
+ and $d0,$h2,#-4 // ... so reduce
+ add $d0,$d0,$h2,lsr#2
+ and $h2,$h2,#3
+ adds $h0,$h0,$d0
+ adc $h1,$h1,xzr
+
+ adds $d0,$h0,#5 // compare to modulus
+ adcs $d1,$h1,xzr
+ adc $d2,$h2,xzr
+
+ tst $d2,#-4 // see if it's carried/borrowed
+
+ csel $h0,$h0,$d0,eq
+ csel $h1,$h1,$d1,eq
+
+#ifdef __ARMEB__
+ ror $t0,$t0,#32 // flip nonce words
+ ror $t1,$t1,#32
+#endif
+ adds $h0,$h0,$t0 // accumulate nonce
+ adc $h1,$h1,$t1
+#ifdef __ARMEB__
+ rev $h0,$h0 // flip output bytes
+ rev $h1,$h1
+#endif
+ stp $h0,$h1,[$mac] // write result
+
+ ret
+.size poly1305_emit_neon,.-poly1305_emit_neon
+
+.align 5
+.Lzeros:
+.long 0,0,0,0,0,0,0,0
+.LOPENSSL_armcap_P:
+#ifdef __ILP32__
+.long OPENSSL_armcap_P-.
+#else
+.quad OPENSSL_armcap_P-.
+#endif
+.asciz "Poly1305 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+
+foreach (split("\n",$code)) {
+ s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or
+ s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or
+ (m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or
+ (m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or
+ (m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or
+ (m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or
+ (m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1));
+
+ s/\.[124]([sd])\[/.$1\[/;
+
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/crypto/poly1305/asm/poly1305-x86.pl b/crypto/poly1305/asm/poly1305-x86.pl
new file mode 100755
index 00000000..7c1aee5f
--- /dev/null
+++ b/crypto/poly1305/asm/poly1305-x86.pl
@@ -0,0 +1,1794 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements Poly1305 hash for x86.
+#
+# April 2015
+#
+# Numbers are cycles per processed byte with poly1305_blocks alone,
+# measured with rdtsc at fixed clock frequency.
+#
+# IALU/gcc-3.4(*) SSE2(**) AVX2
+# Pentium 15.7/+80% -
+# PIII 6.21/+90% -
+# P4 19.8/+40% 3.24
+# Core 2 4.85/+90% 1.80
+# Westmere 4.58/+100% 1.43
+# Sandy Bridge 3.90/+100% 1.36
+# Haswell 3.88/+70% 1.18 0.72
+# Silvermont 11.0/+40% 4.80
+# VIA Nano 6.71/+90% 2.47
+# Sledgehammer 3.51/+180% 4.27
+# Bulldozer 4.53/+140% 1.31
+#
+# (*) gcc 4.8 for some reason generated worse code;
+# (**) besides SSE2 there are floating-point and AVX options; FP
+# is deemed unnecessary, because pre-SSE2 processor are too
+# old to care about, while it's not the fastest option on
+# SSE2-capable ones; AVX is omitted, because it doesn't give
+# a lot of improvement, 5-10% depending on processor;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"poly1305-x86.pl",$ARGV[$#ARGV] eq "386");
+
+$sse2=$avx=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+if ($sse2) {
+ &static_label("const_sse2");
+ &static_label("enter_blocks");
+ &static_label("enter_emit");
+ &external_label("OPENSSL_ia32cap_P");
+
+ if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.19) + ($1>=2.22);
+ }
+
+ if (!$avx && $ARGV[0] eq "win32n" &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.09) + ($1>=2.10);
+ }
+
+ if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
+ $avx = ($2>=3.0) + ($2>3.0);
+ }
+}
+
+########################################################################
+# Layout of opaque area is following.
+#
+# unsigned __int32 h[5]; # current hash value base 2^32
+# unsigned __int32 pad; # is_base2_26 in vector context
+# unsigned __int32 r[4]; # key value base 2^32
+
+&align(64);
+&function_begin("poly1305_init");
+ &mov ("edi",&wparam(0)); # context
+ &mov ("esi",&wparam(1)); # key
+ &mov ("ebp",&wparam(2)); # function table
+
+ &xor ("eax","eax");
+ &mov (&DWP(4*0,"edi"),"eax"); # zero hash value
+ &mov (&DWP(4*1,"edi"),"eax");
+ &mov (&DWP(4*2,"edi"),"eax");
+ &mov (&DWP(4*3,"edi"),"eax");
+ &mov (&DWP(4*4,"edi"),"eax");
+ &mov (&DWP(4*5,"edi"),"eax"); # is_base2_26
+
+ &cmp ("esi",0);
+ &je (&label("nokey"));
+
+ if ($sse2) {
+ &call (&label("pic_point"));
+ &set_label("pic_point");
+ &blindpop("ebx");
+
+ &lea ("eax",&DWP("poly1305_blocks-".&label("pic_point"),"ebx"));
+ &lea ("edx",&DWP("poly1305_emit-".&label("pic_point"),"ebx"));
+
+ &picmeup("edi","OPENSSL_ia32cap_P","ebx",&label("pic_point"));
+ &mov ("ecx",&DWP(0,"edi"));
+ &and ("ecx",1<<26|1<<24);
+ &cmp ("ecx",1<<26|1<<24); # SSE2 and XMM?
+ &jne (&label("no_sse2"));
+
+ &lea ("eax",&DWP("_poly1305_blocks_sse2-".&label("pic_point"),"ebx"));
+ &lea ("edx",&DWP("_poly1305_emit_sse2-".&label("pic_point"),"ebx"));
+
+ if ($avx>1) {
+ &mov ("ecx",&DWP(8,"edi"));
+ &test ("ecx",1<<5); # AVX2?
+ &jz (&label("no_sse2"));
+
+ &lea ("eax",&DWP("_poly1305_blocks_avx2-".&label("pic_point"),"ebx"));
+ }
+ &set_label("no_sse2");
+ &mov ("edi",&wparam(0)); # reload context
+ &mov (&DWP(0,"ebp"),"eax"); # fill function table
+ &mov (&DWP(4,"ebp"),"edx");
+ }
+
+ &mov ("eax",&DWP(4*0,"esi")); # load input key
+ &mov ("ebx",&DWP(4*1,"esi"));
+ &mov ("ecx",&DWP(4*2,"esi"));
+ &mov ("edx",&DWP(4*3,"esi"));
+ &and ("eax",0x0fffffff);
+ &and ("ebx",0x0ffffffc);
+ &and ("ecx",0x0ffffffc);
+ &and ("edx",0x0ffffffc);
+ &mov (&DWP(4*6,"edi"),"eax");
+ &mov (&DWP(4*7,"edi"),"ebx");
+ &mov (&DWP(4*8,"edi"),"ecx");
+ &mov (&DWP(4*9,"edi"),"edx");
+
+ &mov ("eax",$sse2);
+&set_label("nokey");
+&function_end("poly1305_init");
+
+($h0,$h1,$h2,$h3,$h4,
+ $d0,$d1,$d2,$d3,
+ $r0,$r1,$r2,$r3,
+ $s1,$s2,$s3)=map(4*$_,(0..15));
+
+&function_begin("poly1305_blocks");
+ &mov ("edi",&wparam(0)); # ctx
+ &mov ("esi",&wparam(1)); # inp
+ &mov ("ecx",&wparam(2)); # len
+&set_label("enter_blocks");
+ &and ("ecx",-15);
+ &jz (&label("nodata"));
+
+ &stack_push(16);
+ &mov ("eax",&DWP(4*6,"edi")); # r0
+ &mov ("ebx",&DWP(4*7,"edi")); # r1
+ &lea ("ebp",&DWP(0,"esi","ecx")); # end of input
+ &mov ("ecx",&DWP(4*8,"edi")); # r2
+ &mov ("edx",&DWP(4*9,"edi")); # r3
+
+ &mov (&wparam(2),"ebp");
+ &mov ("ebp","esi");
+
+ &mov (&DWP($r0,"esp"),"eax"); # r0
+ &mov ("eax","ebx");
+ &shr ("eax",2);
+ &mov (&DWP($r1,"esp"),"ebx"); # r1
+ &add ("eax","ebx"); # s1
+ &mov ("ebx","ecx");
+ &shr ("ebx",2);
+ &mov (&DWP($r2,"esp"),"ecx"); # r2
+ &add ("ebx","ecx"); # s2
+ &mov ("ecx","edx");
+ &shr ("ecx",2);
+ &mov (&DWP($r3,"esp"),"edx"); # r3
+ &add ("ecx","edx"); # s3
+ &mov (&DWP($s1,"esp"),"eax"); # s1
+ &mov (&DWP($s2,"esp"),"ebx"); # s2
+ &mov (&DWP($s3,"esp"),"ecx"); # s3
+
+ &mov ("eax",&DWP(4*0,"edi")); # load hash value
+ &mov ("ebx",&DWP(4*1,"edi"));
+ &mov ("ecx",&DWP(4*2,"edi"));
+ &mov ("esi",&DWP(4*3,"edi"));
+ &mov ("edi",&DWP(4*4,"edi"));
+ &jmp (&label("loop"));
+
+&set_label("loop",32);
+ &add ("eax",&DWP(4*0,"ebp")); # accumulate input
+ &adc ("ebx",&DWP(4*1,"ebp"));
+ &adc ("ecx",&DWP(4*2,"ebp"));
+ &adc ("esi",&DWP(4*3,"ebp"));
+ &lea ("ebp",&DWP(4*4,"ebp"));
+ &adc ("edi",&wparam(3)); # padbit
+
+ &mov (&DWP($h0,"esp"),"eax"); # put aside hash[+inp]
+ &mov (&DWP($h3,"esp"),"esi");
+
+ &mul (&DWP($r0,"esp")); # h0*r0
+ &mov (&DWP($h4,"esp"),"edi");
+ &mov ("edi","eax");
+ &mov ("eax","ebx"); # h1
+ &mov ("esi","edx");
+ &mul (&DWP($s3,"esp")); # h1*s3
+ &add ("edi","eax");
+ &mov ("eax","ecx"); # h2
+ &adc ("esi","edx");
+ &mul (&DWP($s2,"esp")); # h2*s2
+ &add ("edi","eax");
+ &mov ("eax",&DWP($h3,"esp"));
+ &adc ("esi","edx");
+ &mul (&DWP($s1,"esp")); # h3*s1
+ &add ("edi","eax");
+ &mov ("eax",&DWP($h0,"esp"));
+ &adc ("esi","edx");
+
+ &mul (&DWP($r1,"esp")); # h0*r1
+ &mov (&DWP($d0,"esp"),"edi");
+ &xor ("edi","edi");
+ &add ("esi","eax");
+ &mov ("eax","ebx"); # h1
+ &adc ("edi","edx");
+ &mul (&DWP($r0,"esp")); # h1*r0
+ &add ("esi","eax");
+ &mov ("eax","ecx"); # h2
+ &adc ("edi","edx");
+ &mul (&DWP($s3,"esp")); # h2*s3
+ &add ("esi","eax");
+ &mov ("eax",&DWP($h3,"esp"));
+ &adc ("edi","edx");
+ &mul (&DWP($s2,"esp")); # h3*s2
+ &add ("esi","eax");
+ &mov ("eax",&DWP($h4,"esp"));
+ &adc ("edi","edx");
+ &imul ("eax",&DWP($s1,"esp")); # h4*s1
+ &add ("esi","eax");
+ &mov ("eax",&DWP($h0,"esp"));
+ &adc ("edi",0);
+
+ &mul (&DWP($r2,"esp")); # h0*r2
+ &mov (&DWP($d1,"esp"),"esi");
+ &xor ("esi","esi");
+ &add ("edi","eax");
+ &mov ("eax","ebx"); # h1
+ &adc ("esi","edx");
+ &mul (&DWP($r1,"esp")); # h1*r1
+ &add ("edi","eax");
+ &mov ("eax","ecx"); # h2
+ &adc ("esi","edx");
+ &mul (&DWP($r0,"esp")); # h2*r0
+ &add ("edi","eax");
+ &mov ("eax",&DWP($h3,"esp"));
+ &adc ("esi","edx");
+ &mul (&DWP($s3,"esp")); # h3*s3
+ &add ("edi","eax");
+ &mov ("eax",&DWP($h4,"esp"));
+ &adc ("esi","edx");
+ &imul ("eax",&DWP($s2,"esp")); # h4*s2
+ &add ("edi","eax");
+ &mov ("eax",&DWP($h0,"esp"));
+ &adc ("esi",0);
+
+ &mul (&DWP($r3,"esp")); # h0*r3
+ &mov (&DWP($d2,"esp"),"edi");
+ &xor ("edi","edi");
+ &add ("esi","eax");
+ &mov ("eax","ebx"); # h1
+ &adc ("edi","edx");
+ &mul (&DWP($r2,"esp")); # h1*r2
+ &add ("esi","eax");
+ &mov ("eax","ecx"); # h2
+ &adc ("edi","edx");
+ &mul (&DWP($r1,"esp")); # h2*r1
+ &add ("esi","eax");
+ &mov ("eax",&DWP($h3,"esp"));
+ &adc ("edi","edx");
+ &mul (&DWP($r0,"esp")); # h3*r0
+ &add ("esi","eax");
+ &mov ("ecx",&DWP($h4,"esp"));
+ &adc ("edi","edx");
+
+ &mov ("edx","ecx");
+ &imul ("ecx",&DWP($s3,"esp")); # h4*s3
+ &add ("esi","ecx");
+ &mov ("eax",&DWP($d0,"esp"));
+ &adc ("edi",0);
+
+ &imul ("edx",&DWP($r0,"esp")); # h4*r0
+ &add ("edx","edi");
+
+ &mov ("ebx",&DWP($d1,"esp"));
+ &mov ("ecx",&DWP($d2,"esp"));
+
+ &mov ("edi","edx"); # last reduction step
+ &shr ("edx",2);
+ &and ("edi",3);
+ &lea ("edx",&DWP(0,"edx","edx",4)); # *5
+ &add ("eax","edx");
+ &adc ("ebx",0);
+ &adc ("ecx",0);
+ &adc ("esi",0);
+
+ &cmp ("ebp",&wparam(2)); # done yet?
+ &jne (&label("loop"));
+
+ &mov ("edx",&wparam(0)); # ctx
+ &stack_pop(16);
+ &mov (&DWP(4*0,"edx"),"eax"); # store hash value
+ &mov (&DWP(4*1,"edx"),"ebx");
+ &mov (&DWP(4*2,"edx"),"ecx");
+ &mov (&DWP(4*3,"edx"),"esi");
+ &mov (&DWP(4*4,"edx"),"edi");
+&set_label("nodata");
+&function_end("poly1305_blocks");
+
+&function_begin("poly1305_emit");
+ &mov ("ebp",&wparam(0)); # context
+&set_label("enter_emit");
+ &mov ("edi",&wparam(1)); # output
+ &mov ("eax",&DWP(4*0,"ebp")); # load hash value
+ &mov ("ebx",&DWP(4*1,"ebp"));
+ &mov ("ecx",&DWP(4*2,"ebp"));
+ &mov ("edx",&DWP(4*3,"ebp"));
+ &mov ("esi",&DWP(4*4,"ebp"));
+
+ &add ("eax",5); # compare to modulus
+ &adc ("ebx",0);
+ &adc ("ecx",0);
+ &adc ("edx",0);
+ &adc ("esi",0);
+ &shr ("esi",2); # did it carry/borrow?
+ &neg ("esi"); # do we choose hash-modulus?
+
+ &and ("eax","esi");
+ &and ("ebx","esi");
+ &and ("ecx","esi");
+ &and ("edx","esi");
+ &mov (&DWP(4*0,"edi"),"eax");
+ &mov (&DWP(4*1,"edi"),"ebx");
+ &mov (&DWP(4*2,"edi"),"ecx");
+ &mov (&DWP(4*3,"edi"),"edx");
+
+ &not ("esi"); # or original hash value?
+ &mov ("eax",&DWP(4*0,"ebp"));
+ &mov ("ebx",&DWP(4*1,"ebp"));
+ &mov ("ecx",&DWP(4*2,"ebp"));
+ &mov ("edx",&DWP(4*3,"ebp"));
+ &mov ("ebp",&wparam(2));
+ &and ("eax","esi");
+ &and ("ebx","esi");
+ &and ("ecx","esi");
+ &and ("edx","esi");
+ &or ("eax",&DWP(4*0,"edi"));
+ &or ("ebx",&DWP(4*1,"edi"));
+ &or ("ecx",&DWP(4*2,"edi"));
+ &or ("edx",&DWP(4*3,"edi"));
+
+ &add ("eax",&DWP(4*0,"ebp")); # accumulate key
+ &adc ("ebx",&DWP(4*1,"ebp"));
+ &adc ("ecx",&DWP(4*2,"ebp"));
+ &adc ("edx",&DWP(4*3,"ebp"));
+
+ &mov (&DWP(4*0,"edi"),"eax");
+ &mov (&DWP(4*1,"edi"),"ebx");
+ &mov (&DWP(4*2,"edi"),"ecx");
+ &mov (&DWP(4*3,"edi"),"edx");
+&function_end("poly1305_emit");
+
+if ($sse2) {
+########################################################################
+# Layout of opaque area is following.
+#
+# unsigned __int32 h[5]; # current hash value base 2^26
+# unsigned __int32 is_base2_26;
+# unsigned __int32 r[4]; # key value base 2^32
+# unsigned __int32 pad[2];
+# struct { unsigned __int32 r^4, r^3, r^2, r^1; } r[9];
+#
+# where r^n are base 2^26 digits of degrees of multiplier key. There are
+# 5 digits, but last four are interleaved with multiples of 5, totalling
+# in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
+
+my ($D0,$D1,$D2,$D3,$D4,$T0,$T1,$T2)=map("xmm$_",(0..7));
+my $MASK=$T2; # borrow and keep in mind
+
+&align (32);
+&function_begin_B("_poly1305_init_sse2");
+ &movdqu ($D4,&QWP(4*6,"edi")); # key base 2^32
+ &lea ("edi",&DWP(16*3,"edi")); # size optimization
+ &mov ("ebp","esp");
+ &sub ("esp",16*(9+5));
+ &and ("esp",-16);
+
+ #&pand ($D4,&QWP(96,"ebx")); # magic mask
+ &movq ($MASK,&QWP(64,"ebx"));
+
+ &movdqa ($D0,$D4);
+ &movdqa ($D1,$D4);
+ &movdqa ($D2,$D4);
+
+ &pand ($D0,$MASK); # -> base 2^26
+ &psrlq ($D1,26);
+ &psrldq ($D2,6);
+ &pand ($D1,$MASK);
+ &movdqa ($D3,$D2);
+ &psrlq ($D2,4)
+ &psrlq ($D3,30);
+ &pand ($D2,$MASK);
+ &pand ($D3,$MASK);
+ &psrldq ($D4,13);
+
+ &lea ("edx",&DWP(16*9,"esp")); # size optimization
+ &mov ("ecx",2);
+&set_label("square");
+ &movdqa (&QWP(16*0,"esp"),$D0);
+ &movdqa (&QWP(16*1,"esp"),$D1);
+ &movdqa (&QWP(16*2,"esp"),$D2);
+ &movdqa (&QWP(16*3,"esp"),$D3);
+ &movdqa (&QWP(16*4,"esp"),$D4);
+
+ &movdqa ($T1,$D1);
+ &movdqa ($T0,$D2);
+ &pslld ($T1,2);
+ &pslld ($T0,2);
+ &paddd ($T1,$D1); # *5
+ &paddd ($T0,$D2); # *5
+ &movdqa (&QWP(16*5,"esp"),$T1);
+ &movdqa (&QWP(16*6,"esp"),$T0);
+ &movdqa ($T1,$D3);
+ &movdqa ($T0,$D4);
+ &pslld ($T1,2);
+ &pslld ($T0,2);
+ &paddd ($T1,$D3); # *5
+ &paddd ($T0,$D4); # *5
+ &movdqa (&QWP(16*7,"esp"),$T1);
+ &movdqa (&QWP(16*8,"esp"),$T0);
+
+ &pshufd ($T1,$D0,0b01000100);
+ &movdqa ($T0,$D1);
+ &pshufd ($D1,$D1,0b01000100);
+ &pshufd ($D2,$D2,0b01000100);
+ &pshufd ($D3,$D3,0b01000100);
+ &pshufd ($D4,$D4,0b01000100);
+ &movdqa (&QWP(16*0,"edx"),$T1);
+ &movdqa (&QWP(16*1,"edx"),$D1);
+ &movdqa (&QWP(16*2,"edx"),$D2);
+ &movdqa (&QWP(16*3,"edx"),$D3);
+ &movdqa (&QWP(16*4,"edx"),$D4);
+
+ ################################################################
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+
+ &pmuludq ($D4,$D0); # h4*r0
+ &pmuludq ($D3,$D0); # h3*r0
+ &pmuludq ($D2,$D0); # h2*r0
+ &pmuludq ($D1,$D0); # h1*r0
+ &pmuludq ($D0,$T1); # h0*r0
+
+sub pmuladd {
+my $load = shift;
+my $base = shift; $base = "esp" if (!defined($base));
+
+ ################################################################
+ # As for choice to "rotate" $T0-$T2 in order to move paddq
+ # past next multiplication. While it makes code harder to read
+ # and doesn't have significant effect on most processors, it
+ # makes a lot of difference on Atom, up to 30% improvement.
+
+ &movdqa ($T1,$T0);
+ &pmuludq ($T0,&QWP(16*3,$base)); # r1*h3
+ &movdqa ($T2,$T1);
+ &pmuludq ($T1,&QWP(16*2,$base)); # r1*h2
+ &paddq ($D4,$T0);
+ &movdqa ($T0,$T2);
+ &pmuludq ($T2,&QWP(16*1,$base)); # r1*h1
+ &paddq ($D3,$T1);
+ &$load ($T1,5); # s1
+ &pmuludq ($T0,&QWP(16*0,$base)); # r1*h0
+ &paddq ($D2,$T2);
+ &pmuludq ($T1,&QWP(16*4,$base)); # s1*h4
+ &$load ($T2,2); # r2^n
+ &paddq ($D1,$T0);
+
+ &movdqa ($T0,$T2);
+ &pmuludq ($T2,&QWP(16*2,$base)); # r2*h2
+ &paddq ($D0,$T1);
+ &movdqa ($T1,$T0);
+ &pmuludq ($T0,&QWP(16*1,$base)); # r2*h1
+ &paddq ($D4,$T2);
+ &$load ($T2,6); # s2^n
+ &pmuludq ($T1,&QWP(16*0,$base)); # r2*h0
+ &paddq ($D3,$T0);
+ &movdqa ($T0,$T2);
+ &pmuludq ($T2,&QWP(16*4,$base)); # s2*h4
+ &paddq ($D2,$T1);
+ &pmuludq ($T0,&QWP(16*3,$base)); # s2*h3
+ &$load ($T1,3); # r3^n
+ &paddq ($D1,$T2);
+
+ &movdqa ($T2,$T1);
+ &pmuludq ($T1,&QWP(16*1,$base)); # r3*h1
+ &paddq ($D0,$T0);
+ &$load ($T0,7); # s3^n
+ &pmuludq ($T2,&QWP(16*0,$base)); # r3*h0
+ &paddq ($D4,$T1);
+ &movdqa ($T1,$T0);
+ &pmuludq ($T0,&QWP(16*4,$base)); # s3*h4
+ &paddq ($D3,$T2);
+ &movdqa ($T2,$T1);
+ &pmuludq ($T1,&QWP(16*3,$base)); # s3*h3
+ &paddq ($D2,$T0);
+ &pmuludq ($T2,&QWP(16*2,$base)); # s3*h2
+ &$load ($T0,4); # r4^n
+ &paddq ($D1,$T1);
+
+ &$load ($T1,8); # s4^n
+ &pmuludq ($T0,&QWP(16*0,$base)); # r4*h0
+ &paddq ($D0,$T2);
+ &movdqa ($T2,$T1);
+ &pmuludq ($T1,&QWP(16*4,$base)); # s4*h4
+ &paddq ($D4,$T0);
+ &movdqa ($T0,$T2);
+ &pmuludq ($T2,&QWP(16*1,$base)); # s4*h1
+ &paddq ($D3,$T1);
+ &movdqa ($T1,$T0);
+ &pmuludq ($T0,&QWP(16*2,$base)); # s4*h2
+ &paddq ($D0,$T2);
+ &pmuludq ($T1,&QWP(16*3,$base)); # s4*h3
+ &movdqa ($MASK,&QWP(64,"ebx"));
+ &paddq ($D1,$T0);
+ &paddq ($D2,$T1);
+}
+ &pmuladd (sub { my ($reg,$i)=@_;
+ &movdqa ($reg,&QWP(16*$i,"esp"));
+ },"edx");
+
+sub lazy_reduction {
+ ################################################################
+ # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
+ # and P. Schwabe
+
+ &movdqa ($T0,$D3);
+ &pand ($D3,$MASK);
+ &psrlq ($T0,26);
+ &paddq ($T0,$D4); # h3 -> h4
+ &movdqa ($T1,$D0);
+ &pand ($D0,$MASK);
+ &psrlq ($T1,26);
+ &movdqa ($D4,$T0);
+ &paddq ($T1,$D1); # h0 -> h1
+ &psrlq ($T0,26);
+ &pand ($D4,$MASK);
+ &movdqa ($D1,$T1);
+ &psrlq ($T1,26);
+ &paddd ($D0,$T0); # favour paddd when
+ # possible, because
+ # paddq is "broken"
+ # on Atom
+ &pand ($D1,$MASK);
+ &paddq ($T1,$D2); # h1 -> h2
+ &psllq ($T0,2);
+ &movdqa ($D2,$T1);
+ &psrlq ($T1,26);
+ &paddd ($T0,$D0); # h4 -> h0
+ &pand ($D2,$MASK);
+ &paddd ($T1,$D3); # h2 -> h3
+ &movdqa ($D0,$T0);
+ &psrlq ($T0,26);
+ &movdqa ($D3,$T1);
+ &psrlq ($T1,26);
+ &pand ($D0,$MASK);
+ &paddd ($D1,$T0); # h0 -> h1
+ &pand ($D3,$MASK);
+ &paddd ($D4,$T1); # h3 -> h4
+}
+ &lazy_reduction ();
+
+ &dec ("ecx");
+ &jz (&label("square_break"));
+
+ &punpcklqdq ($D0,&QWP(16*0,"esp")); # 0:r^1:0:r^2
+ &punpcklqdq ($D1,&QWP(16*1,"esp"));
+ &punpcklqdq ($D2,&QWP(16*2,"esp"));
+ &punpcklqdq ($D3,&QWP(16*3,"esp"));
+ &punpcklqdq ($D4,&QWP(16*4,"esp"));
+ &jmp (&label("square"));
+
+&set_label("square_break");
+ &psllq ($D0,32); # -> r^3:0:r^4:0
+ &psllq ($D1,32);
+ &psllq ($D2,32);
+ &psllq ($D3,32);
+ &psllq ($D4,32);
+ &por ($D0,&QWP(16*0,"esp")); # r^3:r^1:r^4:r^2
+ &por ($D1,&QWP(16*1,"esp"));
+ &por ($D2,&QWP(16*2,"esp"));
+ &por ($D3,&QWP(16*3,"esp"));
+ &por ($D4,&QWP(16*4,"esp"));
+
+ &pshufd ($D0,$D0,0b10001101); # -> r^1:r^2:r^3:r^4
+ &pshufd ($D1,$D1,0b10001101);
+ &pshufd ($D2,$D2,0b10001101);
+ &pshufd ($D3,$D3,0b10001101);
+ &pshufd ($D4,$D4,0b10001101);
+
+ &movdqu (&QWP(16*0,"edi"),$D0); # save the table
+ &movdqu (&QWP(16*1,"edi"),$D1);
+ &movdqu (&QWP(16*2,"edi"),$D2);
+ &movdqu (&QWP(16*3,"edi"),$D3);
+ &movdqu (&QWP(16*4,"edi"),$D4);
+
+ &movdqa ($T1,$D1);
+ &movdqa ($T0,$D2);
+ &pslld ($T1,2);
+ &pslld ($T0,2);
+ &paddd ($T1,$D1); # *5
+ &paddd ($T0,$D2); # *5
+ &movdqu (&QWP(16*5,"edi"),$T1);
+ &movdqu (&QWP(16*6,"edi"),$T0);
+ &movdqa ($T1,$D3);
+ &movdqa ($T0,$D4);
+ &pslld ($T1,2);
+ &pslld ($T0,2);
+ &paddd ($T1,$D3); # *5
+ &paddd ($T0,$D4); # *5
+ &movdqu (&QWP(16*7,"edi"),$T1);
+ &movdqu (&QWP(16*8,"edi"),$T0);
+
+ &mov ("esp","ebp");
+ &lea ("edi",&DWP(-16*3,"edi")); # size de-optimization
+ &ret ();
+&function_end_B("_poly1305_init_sse2");
+
+&align (32);
+&function_begin("_poly1305_blocks_sse2");
+ &mov ("edi",&wparam(0)); # ctx
+ &mov ("esi",&wparam(1)); # inp
+ &mov ("ecx",&wparam(2)); # len
+
+ &mov ("eax",&DWP(4*5,"edi")); # is_base2_26
+ &and ("ecx",-16);
+ &jz (&label("nodata"));
+ &cmp ("ecx",64);
+ &jae (&label("enter_sse2"));
+ &test ("eax","eax"); # is_base2_26?
+ &jz (&label("enter_blocks"));
+
+&set_label("enter_sse2",16);
+ &call (&label("pic_point"));
+&set_label("pic_point");
+ &blindpop("ebx");
+ &lea ("ebx",&DWP(&label("const_sse2")."-".&label("pic_point"),"ebx"));
+
+ &test ("eax","eax"); # is_base2_26?
+ &jnz (&label("base2_26"));
+
+ &call ("_poly1305_init_sse2");
+
+ ################################################# base 2^32 -> base 2^26
+ &mov ("eax",&DWP(0,"edi"));
+ &mov ("ecx",&DWP(3,"edi"));
+ &mov ("edx",&DWP(6,"edi"));
+ &mov ("esi",&DWP(9,"edi"));
+ &mov ("ebp",&DWP(13,"edi"));
+ &mov (&DWP(4*5,"edi"),1); # is_base2_26
+
+ &shr ("ecx",2);
+ &and ("eax",0x3ffffff);
+ &shr ("edx",4);
+ &and ("ecx",0x3ffffff);
+ &shr ("esi",6);
+ &and ("edx",0x3ffffff);
+
+ &movd ($D0,"eax");
+ &movd ($D1,"ecx");
+ &movd ($D2,"edx");
+ &movd ($D3,"esi");
+ &movd ($D4,"ebp");
+
+ &mov ("esi",&wparam(1)); # [reload] inp
+ &mov ("ecx",&wparam(2)); # [reload] len
+ &jmp (&label("base2_32"));
+
+&set_label("base2_26",16);
+ &movd ($D0,&DWP(4*0,"edi")); # load hash value
+ &movd ($D1,&DWP(4*1,"edi"));
+ &movd ($D2,&DWP(4*2,"edi"));
+ &movd ($D3,&DWP(4*3,"edi"));
+ &movd ($D4,&DWP(4*4,"edi"));
+ &movdqa ($MASK,&QWP(64,"ebx"));
+
+&set_label("base2_32");
+ &mov ("eax",&wparam(3)); # padbit
+ &mov ("ebp","esp");
+
+ &sub ("esp",16*(5+5+5+9+9));
+ &and ("esp",-16);
+
+ &lea ("edi",&DWP(16*3,"edi")); # size optimization
+ &shl ("eax",24); # padbit
+
+ &test ("ecx",31);
+ &jz (&label("even"));
+
+ ################################################################
+ # process single block, with SSE2, because it's still faster
+ # even though half of result is discarded
+
+ &movdqu ($T1,&QWP(0,"esi")); # input
+ &lea ("esi",&DWP(16,"esi"));
+
+ &movdqa ($T0,$T1); # -> base 2^26 ...
+ &pand ($T1,$MASK);
+ &paddd ($D0,$T1); # ... and accumuate
+
+ &movdqa ($T1,$T0);
+ &psrlq ($T0,26);
+ &psrldq ($T1,6);
+ &pand ($T0,$MASK);
+ &paddd ($D1,$T0);
+
+ &movdqa ($T0,$T1);
+ &psrlq ($T1,4);
+ &pand ($T1,$MASK);
+ &paddd ($D2,$T1);
+
+ &movdqa ($T1,$T0);
+ &psrlq ($T0,30);
+ &pand ($T0,$MASK);
+ &psrldq ($T1,7);
+ &paddd ($D3,$T0);
+
+ &movd ($T0,"eax"); # padbit
+ &paddd ($D4,$T1);
+ &movd ($T1,&DWP(16*0+12,"edi")); # r0
+ &paddd ($D4,$T0);
+
+ &movdqa (&QWP(16*0,"esp"),$D0);
+ &movdqa (&QWP(16*1,"esp"),$D1);
+ &movdqa (&QWP(16*2,"esp"),$D2);
+ &movdqa (&QWP(16*3,"esp"),$D3);
+ &movdqa (&QWP(16*4,"esp"),$D4);
+
+ ################################################################
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+
+ &pmuludq ($D0,$T1); # h4*r0
+ &pmuludq ($D1,$T1); # h3*r0
+ &pmuludq ($D2,$T1); # h2*r0
+ &movd ($T0,&DWP(16*1+12,"edi")); # r1
+ &pmuludq ($D3,$T1); # h1*r0
+ &pmuludq ($D4,$T1); # h0*r0
+
+ &pmuladd (sub { my ($reg,$i)=@_;
+ &movd ($reg,&DWP(16*$i+12,"edi"));
+ });
+
+ &lazy_reduction ();
+
+ &sub ("ecx",16);
+ &jz (&label("done"));
+
+&set_label("even");
+ &lea ("edx",&DWP(16*(5+5+5+9),"esp"));# size optimization
+ &lea ("eax",&DWP(-16*2,"esi"));
+ &sub ("ecx",64);
+
+ ################################################################
+ # expand and copy pre-calculated table to stack
+
+ &movdqu ($T0,&QWP(16*0,"edi")); # r^1:r^2:r^3:r^4
+ &pshufd ($T1,$T0,0b01000100); # duplicate r^3:r^4
+ &cmovb ("esi","eax");
+ &pshufd ($T0,$T0,0b11101110); # duplicate r^1:r^2
+ &movdqa (&QWP(16*0,"edx"),$T1);
+ &lea ("eax",&DWP(16*10,"esp"));
+ &movdqu ($T1,&QWP(16*1,"edi"));
+ &movdqa (&QWP(16*(0-9),"edx"),$T0);
+ &pshufd ($T0,$T1,0b01000100);
+ &pshufd ($T1,$T1,0b11101110);
+ &movdqa (&QWP(16*1,"edx"),$T0);
+ &movdqu ($T0,&QWP(16*2,"edi"));
+ &movdqa (&QWP(16*(1-9),"edx"),$T1);
+ &pshufd ($T1,$T0,0b01000100);
+ &pshufd ($T0,$T0,0b11101110);
+ &movdqa (&QWP(16*2,"edx"),$T1);
+ &movdqu ($T1,&QWP(16*3,"edi"));
+ &movdqa (&QWP(16*(2-9),"edx"),$T0);
+ &pshufd ($T0,$T1,0b01000100);
+ &pshufd ($T1,$T1,0b11101110);
+ &movdqa (&QWP(16*3,"edx"),$T0);
+ &movdqu ($T0,&QWP(16*4,"edi"));
+ &movdqa (&QWP(16*(3-9),"edx"),$T1);
+ &pshufd ($T1,$T0,0b01000100);
+ &pshufd ($T0,$T0,0b11101110);
+ &movdqa (&QWP(16*4,"edx"),$T1);
+ &movdqu ($T1,&QWP(16*5,"edi"));
+ &movdqa (&QWP(16*(4-9),"edx"),$T0);
+ &pshufd ($T0,$T1,0b01000100);
+ &pshufd ($T1,$T1,0b11101110);
+ &movdqa (&QWP(16*5,"edx"),$T0);
+ &movdqu ($T0,&QWP(16*6,"edi"));
+ &movdqa (&QWP(16*(5-9),"edx"),$T1);
+ &pshufd ($T1,$T0,0b01000100);
+ &pshufd ($T0,$T0,0b11101110);
+ &movdqa (&QWP(16*6,"edx"),$T1);
+ &movdqu ($T1,&QWP(16*7,"edi"));
+ &movdqa (&QWP(16*(6-9),"edx"),$T0);
+ &pshufd ($T0,$T1,0b01000100);
+ &pshufd ($T1,$T1,0b11101110);
+ &movdqa (&QWP(16*7,"edx"),$T0);
+ &movdqu ($T0,&QWP(16*8,"edi"));
+ &movdqa (&QWP(16*(7-9),"edx"),$T1);
+ &pshufd ($T1,$T0,0b01000100);
+ &pshufd ($T0,$T0,0b11101110);
+ &movdqa (&QWP(16*8,"edx"),$T1);
+ &movdqa (&QWP(16*(8-9),"edx"),$T0);
+
+sub load_input {
+my ($inpbase,$offbase)=@_;
+
+ &movdqu ($T0,&QWP($inpbase+0,"esi")); # load input
+ &movdqu ($T1,&QWP($inpbase+16,"esi"));
+ &lea ("esi",&DWP(16*2,"esi"));
+
+ &movdqa (&QWP($offbase+16*2,"esp"),$D2);
+ &movdqa (&QWP($offbase+16*3,"esp"),$D3);
+ &movdqa (&QWP($offbase+16*4,"esp"),$D4);
+
+ &movdqa ($D2,$T0); # splat input
+ &movdqa ($D3,$T1);
+ &psrldq ($D2,6);
+ &psrldq ($D3,6);
+ &movdqa ($D4,$T0);
+ &punpcklqdq ($D2,$D3); # 2:3
+ &punpckhqdq ($D4,$T1); # 4
+ &punpcklqdq ($T0,$T1); # 0:1
+
+ &movdqa ($D3,$D2);
+ &psrlq ($D2,4);
+ &psrlq ($D3,30);
+ &movdqa ($T1,$T0);
+ &psrlq ($D4,40); # 4
+ &psrlq ($T1,26);
+ &pand ($T0,$MASK); # 0
+ &pand ($T1,$MASK); # 1
+ &pand ($D2,$MASK); # 2
+ &pand ($D3,$MASK); # 3
+ &por ($D4,&QWP(0,"ebx")); # padbit, yes, always
+
+ &movdqa (&QWP($offbase+16*0,"esp"),$D0) if ($offbase);
+ &movdqa (&QWP($offbase+16*1,"esp"),$D1) if ($offbase);
+}
+ &load_input (16*2,16*5);
+
+ &jbe (&label("skip_loop"));
+ &jmp (&label("loop"));
+
+&set_label("loop",32);
+ ################################################################
+ # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
+ # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
+ # \___________________/
+ # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
+ # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
+ # \___________________/ \____________________/
+ ################################################################
+
+ &movdqa ($T2,&QWP(16*(0-9),"edx")); # r0^2
+ &movdqa (&QWP(16*1,"eax"),$T1);
+ &movdqa (&QWP(16*2,"eax"),$D2);
+ &movdqa (&QWP(16*3,"eax"),$D3);
+ &movdqa (&QWP(16*4,"eax"),$D4);
+
+ ################################################################
+ # d4 = h4*r0 + h0*r4 + h1*r3 + h2*r2 + h3*r1
+ # d3 = h3*r0 + h0*r3 + h1*r2 + h2*r1 + h4*5*r4
+ # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3
+ # d1 = h1*r0 + h0*r1 + h2*5*r4 + h3*5*r3 + h4*5*r2
+ # d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
+
+ &movdqa ($D1,$T0);
+ &pmuludq ($T0,$T2); # h0*r0
+ &movdqa ($D0,$T1);
+ &pmuludq ($T1,$T2); # h1*r0
+ &pmuludq ($D2,$T2); # h2*r0
+ &pmuludq ($D3,$T2); # h3*r0
+ &pmuludq ($D4,$T2); # h4*r0
+
+sub pmuladd_alt {
+my $addr = shift;
+
+ &pmuludq ($D0,&$addr(8)); # h1*s4
+ &movdqa ($T2,$D1);
+ &pmuludq ($D1,&$addr(1)); # h0*r1
+ &paddq ($D0,$T0);
+ &movdqa ($T0,$T2);
+ &pmuludq ($T2,&$addr(2)); # h0*r2
+ &paddq ($D1,$T1);
+ &movdqa ($T1,$T0);
+ &pmuludq ($T0,&$addr(3)); # h0*r3
+ &paddq ($D2,$T2);
+ &movdqa ($T2,&QWP(16*1,"eax")); # pull h1
+ &pmuludq ($T1,&$addr(4)); # h0*r4
+ &paddq ($D3,$T0);
+
+ &movdqa ($T0,$T2);
+ &pmuludq ($T2,&$addr(1)); # h1*r1
+ &paddq ($D4,$T1);
+ &movdqa ($T1,$T0);
+ &pmuludq ($T0,&$addr(2)); # h1*r2
+ &paddq ($D2,$T2);
+ &movdqa ($T2,&QWP(16*2,"eax")); # pull h2
+ &pmuludq ($T1,&$addr(3)); # h1*r3
+ &paddq ($D3,$T0);
+ &movdqa ($T0,$T2);
+ &pmuludq ($T2,&$addr(7)); # h2*s3
+ &paddq ($D4,$T1);
+ &movdqa ($T1,$T0);
+ &pmuludq ($T0,&$addr(8)); # h2*s4
+ &paddq ($D0,$T2);
+
+ &movdqa ($T2,$T1);
+ &pmuludq ($T1,&$addr(1)); # h2*r1
+ &paddq ($D1,$T0);
+ &movdqa ($T0,&QWP(16*3,"eax")); # pull h3
+ &pmuludq ($T2,&$addr(2)); # h2*r2
+ &paddq ($D3,$T1);
+ &movdqa ($T1,$T0);
+ &pmuludq ($T0,&$addr(6)); # h3*s2
+ &paddq ($D4,$T2);
+ &movdqa ($T2,$T1);
+ &pmuludq ($T1,&$addr(7)); # h3*s3
+ &paddq ($D0,$T0);
+ &movdqa ($T0,$T2);
+ &pmuludq ($T2,&$addr(8)); # h3*s4
+ &paddq ($D1,$T1);
+
+ &movdqa ($T1,&QWP(16*4,"eax")); # pull h4
+ &pmuludq ($T0,&$addr(1)); # h3*r1
+ &paddq ($D2,$T2);
+ &movdqa ($T2,$T1);
+ &pmuludq ($T1,&$addr(8)); # h4*s4
+ &paddq ($D4,$T0);
+ &movdqa ($T0,$T2);
+ &pmuludq ($T2,&$addr(5)); # h4*s1
+ &paddq ($D3,$T1);
+ &movdqa ($T1,$T0);
+ &pmuludq ($T0,&$addr(6)); # h4*s2
+ &paddq ($D0,$T2);
+ &movdqa ($MASK,&QWP(64,"ebx"));
+ &pmuludq ($T1,&$addr(7)); # h4*s3
+ &paddq ($D1,$T0);
+ &paddq ($D2,$T1);
+}
+ &pmuladd_alt (sub { my $i=shift; &QWP(16*($i-9),"edx"); });
+
+ &load_input (-16*2,0);
+ &lea ("eax",&DWP(-16*2,"esi"));
+ &sub ("ecx",64);
+
+ &paddd ($T0,&QWP(16*(5+0),"esp")); # add hash value
+ &paddd ($T1,&QWP(16*(5+1),"esp"));
+ &paddd ($D2,&QWP(16*(5+2),"esp"));
+ &paddd ($D3,&QWP(16*(5+3),"esp"));
+ &paddd ($D4,&QWP(16*(5+4),"esp"));
+
+ &cmovb ("esi","eax");
+ &lea ("eax",&DWP(16*10,"esp"));
+
+ &movdqa ($T2,&QWP(16*0,"edx")); # r0^4
+ &movdqa (&QWP(16*1,"esp"),$D1);
+ &movdqa (&QWP(16*1,"eax"),$T1);
+ &movdqa (&QWP(16*2,"eax"),$D2);
+ &movdqa (&QWP(16*3,"eax"),$D3);
+ &movdqa (&QWP(16*4,"eax"),$D4);
+
+ ################################################################
+ # d4 += h4*r0 + h0*r4 + h1*r3 + h2*r2 + h3*r1
+ # d3 += h3*r0 + h0*r3 + h1*r2 + h2*r1 + h4*5*r4
+ # d2 += h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3
+ # d1 += h1*r0 + h0*r1 + h2*5*r4 + h3*5*r3 + h4*5*r2
+ # d0 += h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
+
+ &movdqa ($D1,$T0);
+ &pmuludq ($T0,$T2); # h0*r0
+ &paddq ($T0,$D0);
+ &movdqa ($D0,$T1);
+ &pmuludq ($T1,$T2); # h1*r0
+ &pmuludq ($D2,$T2); # h2*r0
+ &pmuludq ($D3,$T2); # h3*r0
+ &pmuludq ($D4,$T2); # h4*r0
+
+ &paddq ($T1,&QWP(16*1,"esp"));
+ &paddq ($D2,&QWP(16*2,"esp"));
+ &paddq ($D3,&QWP(16*3,"esp"));
+ &paddq ($D4,&QWP(16*4,"esp"));
+
+ &pmuladd_alt (sub { my $i=shift; &QWP(16*$i,"edx"); });
+
+ &lazy_reduction ();
+
+ &load_input (16*2,16*5);
+
+ &ja (&label("loop"));
+
+&set_label("skip_loop");
+ ################################################################
+ # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
+
+ &pshufd ($T2,&QWP(16*(0-9),"edx"),0x10);# r0^n
+ &add ("ecx",32);
+ &jnz (&label("long_tail"));
+
+ &paddd ($T0,$D0); # add hash value
+ &paddd ($T1,$D1);
+ &paddd ($D2,&QWP(16*7,"esp"));
+ &paddd ($D3,&QWP(16*8,"esp"));
+ &paddd ($D4,&QWP(16*9,"esp"));
+
+&set_label("long_tail");
+
+ &movdqa (&QWP(16*0,"eax"),$T0);
+ &movdqa (&QWP(16*1,"eax"),$T1);
+ &movdqa (&QWP(16*2,"eax"),$D2);
+ &movdqa (&QWP(16*3,"eax"),$D3);
+ &movdqa (&QWP(16*4,"eax"),$D4);
+
+ ################################################################
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+
+ &pmuludq ($T0,$T2); # h0*r0
+ &pmuludq ($T1,$T2); # h1*r0
+ &pmuludq ($D2,$T2); # h2*r0
+ &movdqa ($D0,$T0);
+ &pshufd ($T0,&QWP(16*(1-9),"edx"),0x10);# r1^n
+ &pmuludq ($D3,$T2); # h3*r0
+ &movdqa ($D1,$T1);
+ &pmuludq ($D4,$T2); # h4*r0
+
+ &pmuladd (sub { my ($reg,$i)=@_;
+ &pshufd ($reg,&QWP(16*($i-9),"edx"),0x10);
+ },"eax");
+
+ &jz (&label("short_tail"));
+
+ &load_input (-16*2,0);
+
+ &pshufd ($T2,&QWP(16*0,"edx"),0x10); # r0^n
+ &paddd ($T0,&QWP(16*5,"esp")); # add hash value
+ &paddd ($T1,&QWP(16*6,"esp"));
+ &paddd ($D2,&QWP(16*7,"esp"));
+ &paddd ($D3,&QWP(16*8,"esp"));
+ &paddd ($D4,&QWP(16*9,"esp"));
+
+ ################################################################
+ # multiply inp[0:1] by r^4:r^3 and accumulate
+
+ &movdqa (&QWP(16*0,"esp"),$T0);
+ &pmuludq ($T0,$T2); # h0*r0
+ &movdqa (&QWP(16*1,"esp"),$T1);
+ &pmuludq ($T1,$T2); # h1*r0
+ &paddq ($D0,$T0);
+ &movdqa ($T0,$D2);
+ &pmuludq ($D2,$T2); # h2*r0
+ &paddq ($D1,$T1);
+ &movdqa ($T1,$D3);
+ &pmuludq ($D3,$T2); # h3*r0
+ &paddq ($D2,&QWP(16*2,"esp"));
+ &movdqa (&QWP(16*2,"esp"),$T0);
+ &pshufd ($T0,&QWP(16*1,"edx"),0x10); # r1^n
+ &paddq ($D3,&QWP(16*3,"esp"));
+ &movdqa (&QWP(16*3,"esp"),$T1);
+ &movdqa ($T1,$D4);
+ &pmuludq ($D4,$T2); # h4*r0
+ &paddq ($D4,&QWP(16*4,"esp"));
+ &movdqa (&QWP(16*4,"esp"),$T1);
+
+ &pmuladd (sub { my ($reg,$i)=@_;
+ &pshufd ($reg,&QWP(16*$i,"edx"),0x10);
+ });
+
+&set_label("short_tail");
+
+ &lazy_reduction ();
+
+ ################################################################
+ # horizontal addition
+
+ &pshufd ($T1,$D0,0b01001110);
+ &pshufd ($T0,$D1,0b01001110);
+ &paddd ($D0,$T1);
+ &pshufd ($T1,$D2,0b01001110);
+ &paddd ($D1,$T0);
+ &pshufd ($T0,$D3,0b01001110);
+ &paddd ($D2,$T1);
+ &pshufd ($T1,$D4,0b01001110);
+ &paddd ($D3,$T0);
+ &paddd ($D4,$T1);
+
+&set_label("done");
+ &movd (&DWP(-16*3+4*0,"edi"),$D0); # store hash value
+ &movd (&DWP(-16*3+4*1,"edi"),$D1);
+ &movd (&DWP(-16*3+4*2,"edi"),$D2);
+ &movd (&DWP(-16*3+4*3,"edi"),$D3);
+ &movd (&DWP(-16*3+4*4,"edi"),$D4);
+&set_label("nodata");
+ &mov ("esp","ebp");
+&function_end("_poly1305_blocks_sse2");
+
+&align (32);
+&function_begin("_poly1305_emit_sse2");
+ &mov ("ebp",&wparam(0)); # context
+
+ &cmp (&DWP(4*5,"ebp"),0); # is_base2_26?
+ &je (&label("enter_emit"));
+
+ &mov ("eax",&DWP(4*0,"ebp")); # load hash value
+ &mov ("edi",&DWP(4*1,"ebp"));
+ &mov ("ecx",&DWP(4*2,"ebp"));
+ &mov ("edx",&DWP(4*3,"ebp"));
+ &mov ("esi",&DWP(4*4,"ebp"));
+
+ &mov ("ebx","edi"); # base 2^26 -> base 2^32
+ &shl ("edi",26);
+ &shr ("ebx",6);
+ &add ("eax","edi");
+ &mov ("edi","ecx");
+ &adc ("ebx",0);
+
+ &shl ("edi",20);
+ &shr ("ecx",12);
+ &add ("ebx","edi");
+ &mov ("edi","edx");
+ &adc ("ecx",0);
+
+ &shl ("edi",14);
+ &shr ("edx",18);
+ &add ("ecx","edi");
+ &mov ("edi","esi");
+ &adc ("edx",0);
+
+ &shl ("edi",8);
+ &shr ("esi",24);
+ &add ("edx","edi");
+ &adc ("esi",0); # can be partially reduced
+
+ &mov ("edi","esi"); # final reduction
+ &and ("esi",3);
+ &shr ("edi",2);
+ &lea ("ebp",&DWP(0,"edi","edi",4)); # *5
+ &mov ("edi",&wparam(1)); # output
+ add ("eax","ebp");
+ &mov ("ebp",&wparam(2)); # key
+ adc ("ebx",0);
+ adc ("ecx",0);
+ adc ("edx",0);
+
+ &movd ($D0,"eax"); # offload original hash value
+ &add ("eax",5); # compare to modulus
+ &movd ($D1,"ebx");
+ &adc ("ebx",0);
+ &movd ($D2,"ecx");
+ &adc ("ecx",0);
+ &movd ($D3,"edx");
+ &adc ("edx",0);
+ &adc ("esi",0);
+ &shr ("esi",2); # did it carry/borrow?
+
+ &neg ("esi"); # do we choose (hash-modulus) ...
+ &and ("eax","esi");
+ &and ("ebx","esi");
+ &and ("ecx","esi");
+ &and ("edx","esi");
+ &mov (&DWP(4*0,"edi"),"eax");
+ &movd ("eax",$D0);
+ &mov (&DWP(4*1,"edi"),"ebx");
+ &movd ("ebx",$D1);
+ &mov (&DWP(4*2,"edi"),"ecx");
+ &movd ("ecx",$D2);
+ &mov (&DWP(4*3,"edi"),"edx");
+ &movd ("edx",$D3);
+
+ &not ("esi"); # ... or original hash value?
+ &and ("eax","esi");
+ &and ("ebx","esi");
+ &or ("eax",&DWP(4*0,"edi"));
+ &and ("ecx","esi");
+ &or ("ebx",&DWP(4*1,"edi"));
+ &and ("edx","esi");
+ &or ("ecx",&DWP(4*2,"edi"));
+ &or ("edx",&DWP(4*3,"edi"));
+
+ &add ("eax",&DWP(4*0,"ebp")); # accumulate key
+ &adc ("ebx",&DWP(4*1,"ebp"));
+ &mov (&DWP(4*0,"edi"),"eax");
+ &adc ("ecx",&DWP(4*2,"ebp"));
+ &mov (&DWP(4*1,"edi"),"ebx");
+ &adc ("edx",&DWP(4*3,"ebp"));
+ &mov (&DWP(4*2,"edi"),"ecx");
+ &mov (&DWP(4*3,"edi"),"edx");
+&function_end("_poly1305_emit_sse2");
+
+if ($avx>1) {
+########################################################################
+# Note that poly1305_init_avx2 operates on %xmm, I could have used
+# poly1305_init_sse2...
+
+&align (32);
+&function_begin_B("_poly1305_init_avx2");
+ &vmovdqu ($D4,&QWP(4*6,"edi")); # key base 2^32
+ &lea ("edi",&DWP(16*3,"edi")); # size optimization
+ &mov ("ebp","esp");
+ &sub ("esp",16*(9+5));
+ &and ("esp",-16);
+
+ #&vpand ($D4,$D4,&QWP(96,"ebx")); # magic mask
+ &vmovdqa ($MASK,&QWP(64,"ebx"));
+
+ &vpand ($D0,$D4,$MASK); # -> base 2^26
+ &vpsrlq ($D1,$D4,26);
+ &vpsrldq ($D3,$D4,6);
+ &vpand ($D1,$D1,$MASK);
+ &vpsrlq ($D2,$D3,4)
+ &vpsrlq ($D3,$D3,30);
+ &vpand ($D2,$D2,$MASK);
+ &vpand ($D3,$D3,$MASK);
+ &vpsrldq ($D4,$D4,13);
+
+ &lea ("edx",&DWP(16*9,"esp")); # size optimization
+ &mov ("ecx",2);
+&set_label("square");
+ &vmovdqa (&QWP(16*0,"esp"),$D0);
+ &vmovdqa (&QWP(16*1,"esp"),$D1);
+ &vmovdqa (&QWP(16*2,"esp"),$D2);
+ &vmovdqa (&QWP(16*3,"esp"),$D3);
+ &vmovdqa (&QWP(16*4,"esp"),$D4);
+
+ &vpslld ($T1,$D1,2);
+ &vpslld ($T0,$D2,2);
+ &vpaddd ($T1,$T1,$D1); # *5
+ &vpaddd ($T0,$T0,$D2); # *5
+ &vmovdqa (&QWP(16*5,"esp"),$T1);
+ &vmovdqa (&QWP(16*6,"esp"),$T0);
+ &vpslld ($T1,$D3,2);
+ &vpslld ($T0,$D4,2);
+ &vpaddd ($T1,$T1,$D3); # *5
+ &vpaddd ($T0,$T0,$D4); # *5
+ &vmovdqa (&QWP(16*7,"esp"),$T1);
+ &vmovdqa (&QWP(16*8,"esp"),$T0);
+
+ &vpshufd ($T0,$D0,0b01000100);
+ &vmovdqa ($T1,$D1);
+ &vpshufd ($D1,$D1,0b01000100);
+ &vpshufd ($D2,$D2,0b01000100);
+ &vpshufd ($D3,$D3,0b01000100);
+ &vpshufd ($D4,$D4,0b01000100);
+ &vmovdqa (&QWP(16*0,"edx"),$T0);
+ &vmovdqa (&QWP(16*1,"edx"),$D1);
+ &vmovdqa (&QWP(16*2,"edx"),$D2);
+ &vmovdqa (&QWP(16*3,"edx"),$D3);
+ &vmovdqa (&QWP(16*4,"edx"),$D4);
+
+ ################################################################
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+
+ &vpmuludq ($D4,$D4,$D0); # h4*r0
+ &vpmuludq ($D3,$D3,$D0); # h3*r0
+ &vpmuludq ($D2,$D2,$D0); # h2*r0
+ &vpmuludq ($D1,$D1,$D0); # h1*r0
+ &vpmuludq ($D0,$T0,$D0); # h0*r0
+
+ &vpmuludq ($T0,$T1,&QWP(16*3,"edx")); # r1*h3
+ &vpaddq ($D4,$D4,$T0);
+ &vpmuludq ($T2,$T1,&QWP(16*2,"edx")); # r1*h2
+ &vpaddq ($D3,$D3,$T2);
+ &vpmuludq ($T0,$T1,&QWP(16*1,"edx")); # r1*h1
+ &vpaddq ($D2,$D2,$T0);
+ &vmovdqa ($T2,&QWP(16*5,"esp")); # s1
+ &vpmuludq ($T1,$T1,&QWP(16*0,"edx")); # r1*h0
+ &vpaddq ($D1,$D1,$T1);
+ &vmovdqa ($T0,&QWP(16*2,"esp")); # r2
+ &vpmuludq ($T2,$T2,&QWP(16*4,"edx")); # s1*h4
+ &vpaddq ($D0,$D0,$T2);
+
+ &vpmuludq ($T1,$T0,&QWP(16*2,"edx")); # r2*h2
+ &vpaddq ($D4,$D4,$T1);
+ &vpmuludq ($T2,$T0,&QWP(16*1,"edx")); # r2*h1
+ &vpaddq ($D3,$D3,$T2);
+ &vmovdqa ($T1,&QWP(16*6,"esp")); # s2
+ &vpmuludq ($T0,$T0,&QWP(16*0,"edx")); # r2*h0
+ &vpaddq ($D2,$D2,$T0);
+ &vpmuludq ($T2,$T1,&QWP(16*4,"edx")); # s2*h4
+ &vpaddq ($D1,$D1,$T2);
+ &vmovdqa ($T0,&QWP(16*3,"esp")); # r3
+ &vpmuludq ($T1,$T1,&QWP(16*3,"edx")); # s2*h3
+ &vpaddq ($D0,$D0,$T1);
+
+ &vpmuludq ($T2,$T0,&QWP(16*1,"edx")); # r3*h1
+ &vpaddq ($D4,$D4,$T2);
+ &vmovdqa ($T1,&QWP(16*7,"esp")); # s3
+ &vpmuludq ($T0,$T0,&QWP(16*0,"edx")); # r3*h0
+ &vpaddq ($D3,$D3,$T0);
+ &vpmuludq ($T2,$T1,&QWP(16*4,"edx")); # s3*h4
+ &vpaddq ($D2,$D2,$T2);
+ &vpmuludq ($T0,$T1,&QWP(16*3,"edx")); # s3*h3
+ &vpaddq ($D1,$D1,$T0);
+ &vmovdqa ($T2,&QWP(16*4,"esp")); # r4
+ &vpmuludq ($T1,$T1,&QWP(16*2,"edx")); # s3*h2
+ &vpaddq ($D0,$D0,$T1);
+
+ &vmovdqa ($T0,&QWP(16*8,"esp")); # s4
+ &vpmuludq ($T2,$T2,&QWP(16*0,"edx")); # r4*h0
+ &vpaddq ($D4,$D4,$T2);
+ &vpmuludq ($T1,$T0,&QWP(16*4,"edx")); # s4*h4
+ &vpaddq ($D3,$D3,$T1);
+ &vpmuludq ($T2,$T0,&QWP(16*1,"edx")); # s4*h1
+ &vpaddq ($D0,$D0,$T2);
+ &vpmuludq ($T1,$T0,&QWP(16*2,"edx")); # s4*h2
+ &vpaddq ($D1,$D1,$T1);
+ &vmovdqa ($MASK,&QWP(64,"ebx"));
+ &vpmuludq ($T0,$T0,&QWP(16*3,"edx")); # s4*h3
+ &vpaddq ($D2,$D2,$T0);
+
+ ################################################################
+ # lazy reduction
+ &vpsrlq ($T0,$D3,26);
+ &vpand ($D3,$D3,$MASK);
+ &vpsrlq ($T1,$D0,26);
+ &vpand ($D0,$D0,$MASK);
+ &vpaddq ($D4,$D4,$T0); # h3 -> h4
+ &vpaddq ($D1,$D1,$T1); # h0 -> h1
+ &vpsrlq ($T0,$D4,26);
+ &vpand ($D4,$D4,$MASK);
+ &vpsrlq ($T1,$D1,26);
+ &vpand ($D1,$D1,$MASK);
+ &vpaddq ($D2,$D2,$T1); # h1 -> h2
+ &vpaddd ($D0,$D0,$T0);
+ &vpsllq ($T0,$T0,2);
+ &vpsrlq ($T1,$D2,26);
+ &vpand ($D2,$D2,$MASK);
+ &vpaddd ($D0,$D0,$T0); # h4 -> h0
+ &vpaddd ($D3,$D3,$T1); # h2 -> h3
+ &vpsrlq ($T1,$D3,26);
+ &vpsrlq ($T0,$D0,26);
+ &vpand ($D0,$D0,$MASK);
+ &vpand ($D3,$D3,$MASK);
+ &vpaddd ($D1,$D1,$T0); # h0 -> h1
+ &vpaddd ($D4,$D4,$T1); # h3 -> h4
+
+ &dec ("ecx");
+ &jz (&label("square_break"));
+
+ &vpunpcklqdq ($D0,$D0,&QWP(16*0,"esp")); # 0:r^1:0:r^2
+ &vpunpcklqdq ($D1,$D1,&QWP(16*1,"esp"));
+ &vpunpcklqdq ($D2,$D2,&QWP(16*2,"esp"));
+ &vpunpcklqdq ($D3,$D3,&QWP(16*3,"esp"));
+ &vpunpcklqdq ($D4,$D4,&QWP(16*4,"esp"));
+ &jmp (&label("square"));
+
+&set_label("square_break");
+ &vpsllq ($D0,$D0,32); # -> r^3:0:r^4:0
+ &vpsllq ($D1,$D1,32);
+ &vpsllq ($D2,$D2,32);
+ &vpsllq ($D3,$D3,32);
+ &vpsllq ($D4,$D4,32);
+ &vpor ($D0,$D0,&QWP(16*0,"esp")); # r^3:r^1:r^4:r^2
+ &vpor ($D1,$D1,&QWP(16*1,"esp"));
+ &vpor ($D2,$D2,&QWP(16*2,"esp"));
+ &vpor ($D3,$D3,&QWP(16*3,"esp"));
+ &vpor ($D4,$D4,&QWP(16*4,"esp"));
+
+ &vpshufd ($D0,$D0,0b10001101); # -> r^1:r^2:r^3:r^4
+ &vpshufd ($D1,$D1,0b10001101);
+ &vpshufd ($D2,$D2,0b10001101);
+ &vpshufd ($D3,$D3,0b10001101);
+ &vpshufd ($D4,$D4,0b10001101);
+
+ &vmovdqu (&QWP(16*0,"edi"),$D0); # save the table
+ &vmovdqu (&QWP(16*1,"edi"),$D1);
+ &vmovdqu (&QWP(16*2,"edi"),$D2);
+ &vmovdqu (&QWP(16*3,"edi"),$D3);
+ &vmovdqu (&QWP(16*4,"edi"),$D4);
+
+ &vpslld ($T1,$D1,2);
+ &vpslld ($T0,$D2,2);
+ &vpaddd ($T1,$T1,$D1); # *5
+ &vpaddd ($T0,$T0,$D2); # *5
+ &vmovdqu (&QWP(16*5,"edi"),$T1);
+ &vmovdqu (&QWP(16*6,"edi"),$T0);
+ &vpslld ($T1,$D3,2);
+ &vpslld ($T0,$D4,2);
+ &vpaddd ($T1,$T1,$D3); # *5
+ &vpaddd ($T0,$T0,$D4); # *5
+ &vmovdqu (&QWP(16*7,"edi"),$T1);
+ &vmovdqu (&QWP(16*8,"edi"),$T0);
+
+ &mov ("esp","ebp");
+ &lea ("edi",&DWP(-16*3,"edi")); # size de-optimization
+ &ret ();
+&function_end_B("_poly1305_init_avx2");
+
+########################################################################
+# now it's time to switch to %ymm
+
+my ($D0,$D1,$D2,$D3,$D4,$T0,$T1,$T2)=map("ymm$_",(0..7));
+my $MASK=$T2;
+
+sub X { my $reg=shift; $reg=~s/^ymm/xmm/; $reg; }
+
+&align (32);
+&function_begin("_poly1305_blocks_avx2");
+ &mov ("edi",&wparam(0)); # ctx
+ &mov ("esi",&wparam(1)); # inp
+ &mov ("ecx",&wparam(2)); # len
+
+ &mov ("eax",&DWP(4*5,"edi")); # is_base2_26
+ &and ("ecx",-16);
+ &jz (&label("nodata"));
+ &cmp ("ecx",64);
+ &jae (&label("enter_avx2"));
+ &test ("eax","eax"); # is_base2_26?
+ &jz (&label("enter_blocks"));
+
+&set_label("enter_avx2",16);
+ &vzeroupper ();
+
+ &call (&label("pic_point"));
+&set_label("pic_point");
+ &blindpop("ebx");
+ &lea ("ebx",&DWP(&label("const_sse2")."-".&label("pic_point"),"ebx"));
+
+ &test ("eax","eax"); # is_base2_26?
+ &jnz (&label("base2_26"));
+
+ &call ("_poly1305_init_avx2");
+
+ ################################################# base 2^32 -> base 2^26
+ &mov ("eax",&DWP(0,"edi"));
+ &mov ("ecx",&DWP(3,"edi"));
+ &mov ("edx",&DWP(6,"edi"));
+ &mov ("esi",&DWP(9,"edi"));
+ &mov ("ebp",&DWP(13,"edi"));
+
+ &shr ("ecx",2);
+ &and ("eax",0x3ffffff);
+ &shr ("edx",4);
+ &and ("ecx",0x3ffffff);
+ &shr ("esi",6);
+ &and ("edx",0x3ffffff);
+
+ &mov (&DWP(4*0,"edi"),"eax");
+ &mov (&DWP(4*1,"edi"),"ecx");
+ &mov (&DWP(4*2,"edi"),"edx");
+ &mov (&DWP(4*3,"edi"),"esi");
+ &mov (&DWP(4*4,"edi"),"ebp");
+ &mov (&DWP(4*5,"edi"),1); # is_base2_26
+
+ &mov ("esi",&wparam(1)); # [reload] inp
+ &mov ("ecx",&wparam(2)); # [reload] len
+
+&set_label("base2_26");
+ &mov ("eax",&wparam(3)); # padbit
+ &mov ("ebp","esp");
+
+ &sub ("esp",32*(5+9));
+ &and ("esp",-512); # ensure that frame
+ # doesn't cross page
+ # boundary, which is
+ # essential for
+ # misaligned 32-byte
+ # loads
+
+ ################################################################
+ # expand and copy pre-calculated table to stack
+
+ &vmovdqu (&X($D0),&QWP(16*(3+0),"edi"));
+ &lea ("edx",&DWP(32*5+128,"esp")); # +128 size optimization
+ &vmovdqu (&X($D1),&QWP(16*(3+1),"edi"));
+ &vmovdqu (&X($D2),&QWP(16*(3+2),"edi"));
+ &vmovdqu (&X($D3),&QWP(16*(3+3),"edi"));
+ &vmovdqu (&X($D4),&QWP(16*(3+4),"edi"));
+ &lea ("edi",&DWP(16*3,"edi")); # size optimization
+ &vpermq ($D0,$D0,0b01000000); # 00001234 -> 12343434
+ &vpermq ($D1,$D1,0b01000000);
+ &vpermq ($D2,$D2,0b01000000);
+ &vpermq ($D3,$D3,0b01000000);
+ &vpermq ($D4,$D4,0b01000000);
+ &vpshufd ($D0,$D0,0b11001000); # 12343434 -> 14243444
+ &vpshufd ($D1,$D1,0b11001000);
+ &vpshufd ($D2,$D2,0b11001000);
+ &vpshufd ($D3,$D3,0b11001000);
+ &vpshufd ($D4,$D4,0b11001000);
+ &vmovdqa (&QWP(32*0-128,"edx"),$D0);
+ &vmovdqu (&X($D0),&QWP(16*5,"edi"));
+ &vmovdqa (&QWP(32*1-128,"edx"),$D1);
+ &vmovdqu (&X($D1),&QWP(16*6,"edi"));
+ &vmovdqa (&QWP(32*2-128,"edx"),$D2);
+ &vmovdqu (&X($D2),&QWP(16*7,"edi"));
+ &vmovdqa (&QWP(32*3-128,"edx"),$D3);
+ &vmovdqu (&X($D3),&QWP(16*8,"edi"));
+ &vmovdqa (&QWP(32*4-128,"edx"),$D4);
+ &vpermq ($D0,$D0,0b01000000);
+ &vpermq ($D1,$D1,0b01000000);
+ &vpermq ($D2,$D2,0b01000000);
+ &vpermq ($D3,$D3,0b01000000);
+ &vpshufd ($D0,$D0,0b11001000);
+ &vpshufd ($D1,$D1,0b11001000);
+ &vpshufd ($D2,$D2,0b11001000);
+ &vpshufd ($D3,$D3,0b11001000);
+ &vmovdqa (&QWP(32*5-128,"edx"),$D0);
+ &vmovd (&X($D0),&DWP(-16*3+4*0,"edi"));# load hash value
+ &vmovdqa (&QWP(32*6-128,"edx"),$D1);
+ &vmovd (&X($D1),&DWP(-16*3+4*1,"edi"));
+ &vmovdqa (&QWP(32*7-128,"edx"),$D2);
+ &vmovd (&X($D2),&DWP(-16*3+4*2,"edi"));
+ &vmovdqa (&QWP(32*8-128,"edx"),$D3);
+ &vmovd (&X($D3),&DWP(-16*3+4*3,"edi"));
+ &vmovd (&X($D4),&DWP(-16*3+4*4,"edi"));
+ &vmovdqa ($MASK,&QWP(64,"ebx"));
+ &neg ("eax"); # padbit
+
+ &test ("ecx",63);
+ &jz (&label("even"));
+
+ &mov ("edx","ecx");
+ &and ("ecx",-64);
+ &and ("edx",63);
+
+ &vmovdqu (&X($T0),&QWP(16*0,"esi"));
+ &cmp ("edx",32);
+ &jb (&label("one"));
+
+ &vmovdqu (&X($T1),&QWP(16*1,"esi"));
+ &je (&label("two"));
+
+ &vinserti128 ($T0,$T0,&QWP(16*2,"esi"),1);
+ &lea ("esi",&DWP(16*3,"esi"));
+ &lea ("ebx",&DWP(8,"ebx")); # three padbits
+ &lea ("edx",&DWP(32*5+128+8,"esp")); # --:r^1:r^2:r^3 (*)
+ &jmp (&label("tail"));
+
+&set_label("two");
+ &lea ("esi",&DWP(16*2,"esi"));
+ &lea ("ebx",&DWP(16,"ebx")); # two padbits
+ &lea ("edx",&DWP(32*5+128+16,"esp"));# --:--:r^1:r^2 (*)
+ &jmp (&label("tail"));
+
+&set_label("one");
+ &lea ("esi",&DWP(16*1,"esi"));
+ &vpxor ($T1,$T1,$T1);
+ &lea ("ebx",&DWP(32,"ebx","eax",8)); # one or no padbits
+ &lea ("edx",&DWP(32*5+128+24,"esp"));# --:--:--:r^1 (*)
+ &jmp (&label("tail"));
+
+# (*) spots marked with '--' are data from next table entry, but they
+# are multiplied by 0 and therefore rendered insignificant
+
+&set_label("even",32);
+ &vmovdqu (&X($T0),&QWP(16*0,"esi")); # load input
+ &vmovdqu (&X($T1),&QWP(16*1,"esi"));
+ &vinserti128 ($T0,$T0,&QWP(16*2,"esi"),1);
+ &vinserti128 ($T1,$T1,&QWP(16*3,"esi"),1);
+ &lea ("esi",&DWP(16*4,"esi"));
+ &sub ("ecx",64);
+ &jz (&label("tail"));
+
+&set_label("loop");
+ ################################################################
+ # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
+ # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
+ # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
+ # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
+ # \________/ \_______/
+ ################################################################
+
+sub vsplat_input {
+ &vmovdqa (&QWP(32*2,"esp"),$D2);
+ &vpsrldq ($D2,$T0,6); # splat input
+ &vmovdqa (&QWP(32*0,"esp"),$D0);
+ &vpsrldq ($D0,$T1,6);
+ &vmovdqa (&QWP(32*1,"esp"),$D1);
+ &vpunpckhqdq ($D1,$T0,$T1); # 4
+ &vpunpcklqdq ($T0,$T0,$T1); # 0:1
+ &vpunpcklqdq ($D2,$D2,$D0); # 2:3
+
+ &vpsrlq ($D0,$D2,30);
+ &vpsrlq ($D2,$D2,4);
+ &vpsrlq ($T1,$T0,26);
+ &vpsrlq ($D1,$D1,40); # 4
+ &vpand ($D2,$D2,$MASK); # 2
+ &vpand ($T0,$T0,$MASK); # 0
+ &vpand ($T1,$T1,$MASK); # 1
+ &vpand ($D0,$D0,$MASK); # 3 (*)
+ &vpor ($D1,$D1,&QWP(0,"ebx")); # padbit, yes, always
+
+ # (*) note that output is counterintuitive, inp[3:4] is
+ # returned in $D1-2, while $D3-4 are preserved;
+}
+ &vsplat_input ();
+
+sub vpmuladd {
+my $addr = shift;
+
+ &vpaddq ($D2,$D2,&QWP(32*2,"esp")); # add hash value
+ &vpaddq ($T0,$T0,&QWP(32*0,"esp"));
+ &vpaddq ($T1,$T1,&QWP(32*1,"esp"));
+ &vpaddq ($D0,$D0,$D3);
+ &vpaddq ($D1,$D1,$D4);
+
+ ################################################################
+ # d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4
+ # d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0
+ # d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1
+ # d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2
+ # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3
+
+ &vpmuludq ($D3,$D2,&$addr(1)); # d3 = h2*r1
+ &vmovdqa (QWP(32*1,"esp"),$T1);
+ &vpmuludq ($D4,$D2,&$addr(2)); # d4 = h2*r2
+ &vmovdqa (QWP(32*3,"esp"),$D0);
+ &vpmuludq ($D0,$D2,&$addr(7)); # d0 = h2*s3
+ &vmovdqa (QWP(32*4,"esp"),$D1);
+ &vpmuludq ($D1,$D2,&$addr(8)); # d1 = h2*s4
+ &vpmuludq ($D2,$D2,&$addr(0)); # d2 = h2*r0
+
+ &vpmuludq ($T2,$T0,&$addr(3)); # h0*r3
+ &vpaddq ($D3,$D3,$T2); # d3 += h0*r3
+ &vpmuludq ($T1,$T0,&$addr(4)); # h0*r4
+ &vpaddq ($D4,$D4,$T1); # d4 + h0*r4
+ &vpmuludq ($T2,$T0,&$addr(0)); # h0*r0
+ &vpaddq ($D0,$D0,$T2); # d0 + h0*r0
+ &vmovdqa ($T2,&QWP(32*1,"esp")); # h1
+ &vpmuludq ($T1,$T0,&$addr(1)); # h0*r1
+ &vpaddq ($D1,$D1,$T1); # d1 += h0*r1
+ &vpmuludq ($T0,$T0,&$addr(2)); # h0*r2
+ &vpaddq ($D2,$D2,$T0); # d2 += h0*r2
+
+ &vpmuludq ($T1,$T2,&$addr(2)); # h1*r2
+ &vpaddq ($D3,$D3,$T1); # d3 += h1*r2
+ &vpmuludq ($T0,$T2,&$addr(3)); # h1*r3
+ &vpaddq ($D4,$D4,$T0); # d4 += h1*r3
+ &vpmuludq ($T1,$T2,&$addr(8)); # h1*s4
+ &vpaddq ($D0,$D0,$T1); # d0 += h1*s4
+ &vmovdqa ($T1,&QWP(32*3,"esp")); # h3
+ &vpmuludq ($T0,$T2,&$addr(0)); # h1*r0
+ &vpaddq ($D1,$D1,$T0); # d1 += h1*r0
+ &vpmuludq ($T2,$T2,&$addr(1)); # h1*r1
+ &vpaddq ($D2,$D2,$T2); # d2 += h1*r1
+
+ &vpmuludq ($T0,$T1,&$addr(0)); # h3*r0
+ &vpaddq ($D3,$D3,$T0); # d3 += h3*r0
+ &vpmuludq ($T2,$T1,&$addr(1)); # h3*r1
+ &vpaddq ($D4,$D4,$T2); # d4 += h3*r1
+ &vpmuludq ($T0,$T1,&$addr(6)); # h3*s2
+ &vpaddq ($D0,$D0,$T0); # d0 += h3*s2
+ &vmovdqa ($T0,&QWP(32*4,"esp")); # h4
+ &vpmuludq ($T2,$T1,&$addr(7)); # h3*s3
+ &vpaddq ($D1,$D1,$T2); # d1+= h3*s3
+ &vpmuludq ($T1,$T1,&$addr(8)); # h3*s4
+ &vpaddq ($D2,$D2,$T1); # d2 += h3*s4
+
+ &vpmuludq ($T2,$T0,&$addr(8)); # h4*s4
+ &vpaddq ($D3,$D3,$T2); # d3 += h4*s4
+ &vpmuludq ($T1,$T0,&$addr(5)); # h4*s1
+ &vpaddq ($D0,$D0,$T1); # d0 += h4*s1
+ &vpmuludq ($T2,$T0,&$addr(0)); # h4*r0
+ &vpaddq ($D4,$D4,$T2); # d4 += h4*r0
+ &vmovdqa ($MASK,&QWP(64,"ebx"));
+ &vpmuludq ($T1,$T0,&$addr(6)); # h4*s2
+ &vpaddq ($D1,$D1,$T1); # d1 += h4*s2
+ &vpmuludq ($T0,$T0,&$addr(7)); # h4*s3
+ &vpaddq ($D2,$D2,$T0); # d2 += h4*s3
+}
+ &vpmuladd (sub { my $i=shift; &QWP(32*$i-128,"edx"); });
+
+sub vlazy_reduction {
+ ################################################################
+ # lazy reduction
+
+ &vpsrlq ($T0,$D3,26);
+ &vpand ($D3,$D3,$MASK);
+ &vpsrlq ($T1,$D0,26);
+ &vpand ($D0,$D0,$MASK);
+ &vpaddq ($D4,$D4,$T0); # h3 -> h4
+ &vpaddq ($D1,$D1,$T1); # h0 -> h1
+ &vpsrlq ($T0,$D4,26);
+ &vpand ($D4,$D4,$MASK);
+ &vpsrlq ($T1,$D1,26);
+ &vpand ($D1,$D1,$MASK);
+ &vpaddq ($D2,$D2,$T1); # h1 -> h2
+ &vpaddd ($D0,$D0,$T0);
+ &vpsllq ($T0,$T0,2);
+ &vpsrlq ($T1,$D2,26);
+ &vpand ($D2,$D2,$MASK);
+ &vpaddd ($D0,$D0,$T0); # h4 -> h0
+ &vpaddd ($D3,$D3,$T1); # h2 -> h3
+ &vpsrlq ($T1,$D3,26);
+ &vpsrlq ($T0,$D0,26);
+ &vpand ($D0,$D0,$MASK);
+ &vpand ($D3,$D3,$MASK);
+ &vpaddd ($D1,$D1,$T0); # h0 -> h1
+ &vpaddd ($D4,$D4,$T1); # h3 -> h4
+}
+ &vlazy_reduction();
+
+ &vmovdqu (&X($T0),&QWP(16*0,"esi")); # load input
+ &vmovdqu (&X($T1),&QWP(16*1,"esi"));
+ &vinserti128 ($T0,$T0,&QWP(16*2,"esi"),1);
+ &vinserti128 ($T1,$T1,&QWP(16*3,"esi"),1);
+ &lea ("esi",&DWP(16*4,"esi"));
+ &sub ("ecx",64);
+ &jnz (&label("loop"));
+
+&set_label("tail");
+ &vsplat_input ();
+ &and ("ebx",-64); # restore pointer
+
+ &vpmuladd (sub { my $i=shift; &QWP(4+32*$i-128,"edx"); });
+
+ &vlazy_reduction();
+
+ ################################################################
+ # horizontal addition
+
+ &vpsrldq ($T0,$D0,8);
+ &vpsrldq ($T1,$D1,8);
+ &vpaddq ($D0,$D0,$T0);
+ &vpsrldq ($T0,$D2,8);
+ &vpaddq ($D1,$D1,$T1);
+ &vpsrldq ($T1,$D3,8);
+ &vpaddq ($D2,$D2,$T0);
+ &vpsrldq ($T0,$D4,8);
+ &vpaddq ($D3,$D3,$T1);
+ &vpermq ($T1,$D0,2); # keep folding
+ &vpaddq ($D4,$D4,$T0);
+ &vpermq ($T0,$D1,2);
+ &vpaddq ($D0,$D0,$T1);
+ &vpermq ($T1,$D2,2);
+ &vpaddq ($D1,$D1,$T0);
+ &vpermq ($T0,$D3,2);
+ &vpaddq ($D2,$D2,$T1);
+ &vpermq ($T1,$D4,2);
+ &vpaddq ($D3,$D3,$T0);
+ &vpaddq ($D4,$D4,$T1);
+
+ &cmp ("ecx",0);
+ &je (&label("done"));
+
+ ################################################################
+ # clear all but single word
+
+ &vpshufd (&X($D0),&X($D0),0b11111100);
+ &lea ("edx",&DWP(32*5+128,"esp")); # restore pointer
+ &vpshufd (&X($D1),&X($D1),0b11111100);
+ &vpshufd (&X($D2),&X($D2),0b11111100);
+ &vpshufd (&X($D3),&X($D3),0b11111100);
+ &vpshufd (&X($D4),&X($D4),0b11111100);
+ &jmp (&label("even"));
+
+&set_label("done",16);
+ &vmovd (&DWP(-16*3+4*0,"edi"),"xmm0"); # store hash value
+ &vmovd (&DWP(-16*3+4*1,"edi"),"xmm1");
+ &vmovd (&DWP(-16*3+4*2,"edi"),"xmm2");
+ &vmovd (&DWP(-16*3+4*3,"edi"),"xmm3");
+ &vmovd (&DWP(-16*3+4*4,"edi"),"xmm4");
+ &vzeroupper ();
+&set_label("nodata");
+ &mov ("esp","ebp");
+&function_end("_poly1305_blocks_avx2");
+}
+&set_label("const_sse2",64);
+ &data_word(1<<24,0, 1<<24,0, 1<<24,0, 1<<24,0);
+ &data_word(0,0, 0,0, 0,0, 0,0);
+ &data_word(0x03ffffff,0,0x03ffffff,0, 0x03ffffff,0, 0x03ffffff,0);
+ &data_word(0x0fffffff,0x0ffffffc,0x0ffffffc,0x0ffffffc);
+}
+&asciz ("Poly1305 for x86, CRYPTOGAMS by <appro\@openssl.org>");
+&align (4);
+
+&asm_finish();
diff --git a/crypto/poly1305/asm/poly1305-x86_64.pl b/crypto/poly1305/asm/poly1305-x86_64.pl
new file mode 100755
index 00000000..b827d24b
--- /dev/null
+++ b/crypto/poly1305/asm/poly1305-x86_64.pl
@@ -0,0 +1,2246 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements Poly1305 hash for x86_64.
+#
+# March 2015
+#
+# Numbers are cycles per processed byte with poly1305_blocks alone,
+# measured with rdtsc at fixed clock frequency.
+#
+# IALU/gcc-4.8(*) AVX(**) AVX2
+# P4 4.90/+120% -
+# Core 2 2.39/+90% -
+# Westmere 1.86/+120% -
+# Sandy Bridge 1.39/+140% 1.10
+# Haswell 1.10/+175% 1.11 0.65
+# Skylake 1.12/+120% 0.96 0.51
+# Silvermont 2.83/+95% -
+# VIA Nano 1.82/+150% -
+# Sledgehammer 1.38/+160% -
+# Bulldozer 2.21/+130% 0.97
+#
+# (*) improvement coefficients relative to clang are more modest and
+# are ~50% on most processors, in both cases we are comparing to
+# __int128 code;
+# (**) SSE2 implementation was attempted, but among non-AVX processors
+# it was faster than integer-only code only on older Intel P4 and
+# Core processors, 50-30%, less newer processor is, but slower on
+# contemporary ones, for example almost 2x slower on Atom, and as
+# former are naturally disappearing, SSE2 is deemed unnecessary;
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.09) + ($1>=2.10);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $avx = ($1>=10) + ($1>=12);
+}
+
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
+ $avx = ($2>=3.0) + ($2>3.0);
+}
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
+my ($mac,$nonce)=($inp,$len); # *_emit arguments
+my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
+my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
+
+sub poly1305_iteration {
+# input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
+# output: $h0-$h2 *= $r0-$r1
+$code.=<<___;
+ mulq $h0 # h0*r1
+ mov %rax,$d2
+ mov $r0,%rax
+ mov %rdx,$d3
+
+ mulq $h0 # h0*r0
+ mov %rax,$h0 # future $h0
+ mov $r0,%rax
+ mov %rdx,$d1
+
+ mulq $h1 # h1*r0
+ add %rax,$d2
+ mov $s1,%rax
+ adc %rdx,$d3
+
+ mulq $h1 # h1*s1
+ mov $h2,$h1 # borrow $h1
+ add %rax,$h0
+ adc %rdx,$d1
+
+ imulq $s1,$h1 # h2*s1
+ add $h1,$d2
+ mov $d1,$h1
+ adc \$0,$d3
+
+ imulq $r0,$h2 # h2*r0
+ add $d2,$h1
+ mov \$-4,%rax # mask value
+ adc $h2,$d3
+
+ and $d3,%rax # last reduction step
+ mov $d3,$h2
+ shr \$2,$d3
+ and \$3,$h2
+ add $d3,%rax
+ add %rax,$h0
+ adc \$0,$h1
+___
+}
+
+########################################################################
+# Layout of opaque area is following.
+#
+# unsigned __int64 h[3]; # current hash value base 2^64
+# unsigned __int64 r[2]; # key value base 2^64
+
+$code.=<<___;
+.text
+
+.extern OPENSSL_ia32cap_P
+
+.globl poly1305_init
+.globl poly1305_blocks
+.globl poly1305_emit
+.type poly1305_init,\@function,3
+.align 32
+poly1305_init:
+ xor %rax,%rax
+ mov %rax,0($ctx) # initialize hash value
+ mov %rax,8($ctx)
+ mov %rax,16($ctx)
+
+ cmp \$0,$inp
+ je .Lno_key
+
+ lea poly1305_blocks(%rip),%r10
+ lea poly1305_emit(%rip),%r11
+___
+$code.=<<___ if ($avx);
+ mov OPENSSL_ia32cap_P+4(%rip),%r9
+ lea poly1305_blocks_avx(%rip),%rax
+ lea poly1305_emit_avx(%rip),%rcx
+ bt \$`60-32`,%r9 # AVX?
+ cmovc %rax,%r10
+ cmovc %rcx,%r11
+___
+$code.=<<___ if ($avx>1);
+ lea poly1305_blocks_avx2(%rip),%rax
+ bt \$`5+32`,%r9 # AVX2?
+ cmovc %rax,%r10
+___
+$code.=<<___;
+ mov \$0x0ffffffc0fffffff,%rax
+ mov \$0x0ffffffc0ffffffc,%rcx
+ and 0($inp),%rax
+ and 8($inp),%rcx
+ mov %rax,24($ctx)
+ mov %rcx,32($ctx)
+
+ mov %r10,0(%rdx)
+ mov %r11,8(%rdx)
+
+ mov \$1,%eax
+.Lno_key:
+ ret
+.size poly1305_init,.-poly1305_init
+
+.type poly1305_blocks,\@function,4
+.align 32
+poly1305_blocks:
+.Lblocks:
+ sub \$16,$len # too short?
+ jc .Lno_data
+
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lblocks_body:
+
+ mov $len,%r15 # reassign $len
+
+ mov 24($ctx),$r0 # load r
+ mov 32($ctx),$s1
+
+ mov 0($ctx),$h0 # load hash value
+ mov 8($ctx),$h1
+ mov 16($ctx),$h2
+
+ mov $s1,$r1
+ shr \$2,$s1
+ mov $r1,%rax
+ add $r1,$s1 # s1 = r1 + (r1 >> 2)
+ jmp .Loop
+
+.align 32
+.Loop:
+ add 0($inp),$h0 # accumulate input
+ adc 8($inp),$h1
+ lea 16($inp),$inp
+ adc $padbit,$h2
+___
+ &poly1305_iteration();
+$code.=<<___;
+ mov $r1,%rax
+ sub \$16,%r15 # len-=16
+ jnc .Loop
+
+ mov $h0,0($ctx) # store hash value
+ mov $h1,8($ctx)
+ mov $h2,16($ctx)
+
+ mov 0(%rsp),%r15
+ mov 8(%rsp),%r14
+ mov 16(%rsp),%r13
+ mov 24(%rsp),%r12
+ mov 32(%rsp),%rbp
+ mov 40(%rsp),%rbx
+ lea 48(%rsp),%rsp
+.Lno_data:
+.Lblocks_epilogue:
+ ret
+.size poly1305_blocks,.-poly1305_blocks
+
+.type poly1305_emit,\@function,3
+.align 32
+poly1305_emit:
+.Lemit:
+ mov 0($ctx),%r8 # load hash value
+ mov 8($ctx),%r9
+ mov 16($ctx),%r10
+
+ mov %r8,%rax
+ add \$5,%r8 # compare to modulus
+ mov %r9,%rcx
+ adc \$0,%r9
+ adc \$0,%r10
+ shr \$2,%r10 # did 130-bit value overfow?
+ cmovnz %r8,%rax
+ cmovnz %r9,%rcx
+
+ add 0($nonce),%rax # accumulate nonce
+ adc 8($nonce),%rcx
+ mov %rax,0($mac) # write result
+ mov %rcx,8($mac)
+
+ ret
+.size poly1305_emit,.-poly1305_emit
+___
+if ($avx) {
+
+########################################################################
+# Layout of opaque area is following.
+#
+# unsigned __int32 h[5]; # current hash value base 2^26
+# unsigned __int32 is_base2_26;
+# unsigned __int64 r[2]; # key value base 2^64
+# unsigned __int64 pad;
+# struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
+#
+# where r^n are base 2^26 digits of degrees of multiplier key. There are
+# 5 digits, but last four are interleaved with multiples of 5, totalling
+# in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
+
+my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
+ map("%xmm$_",(0..15));
+
+$code.=<<___;
+.type __poly1305_block,\@abi-omnipotent
+.align 32
+__poly1305_block:
+___
+ &poly1305_iteration();
+$code.=<<___;
+ ret
+.size __poly1305_block,.-__poly1305_block
+
+.type __poly1305_init_avx,\@abi-omnipotent
+.align 32
+__poly1305_init_avx:
+ mov $r0,$h0
+ mov $r1,$h1
+ xor $h2,$h2
+
+ lea 48+64($ctx),$ctx # size optimization
+
+ mov $r1,%rax
+ call __poly1305_block # r^2
+
+ mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
+ mov \$0x3ffffff,%edx
+ mov $h0,$d1
+ and $h0#d,%eax
+ mov $r0,$d2
+ and $r0#d,%edx
+ mov %eax,`16*0+0-64`($ctx)
+ shr \$26,$d1
+ mov %edx,`16*0+4-64`($ctx)
+ shr \$26,$d2
+
+ mov \$0x3ffffff,%eax
+ mov \$0x3ffffff,%edx
+ and $d1#d,%eax
+ and $d2#d,%edx
+ mov %eax,`16*1+0-64`($ctx)
+ lea (%rax,%rax,4),%eax # *5
+ mov %edx,`16*1+4-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ mov %eax,`16*2+0-64`($ctx)
+ shr \$26,$d1
+ mov %edx,`16*2+4-64`($ctx)
+ shr \$26,$d2
+
+ mov $h1,%rax
+ mov $r1,%rdx
+ shl \$12,%rax
+ shl \$12,%rdx
+ or $d1,%rax
+ or $d2,%rdx
+ and \$0x3ffffff,%eax
+ and \$0x3ffffff,%edx
+ mov %eax,`16*3+0-64`($ctx)
+ lea (%rax,%rax,4),%eax # *5
+ mov %edx,`16*3+4-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ mov %eax,`16*4+0-64`($ctx)
+ mov $h1,$d1
+ mov %edx,`16*4+4-64`($ctx)
+ mov $r1,$d2
+
+ mov \$0x3ffffff,%eax
+ mov \$0x3ffffff,%edx
+ shr \$14,$d1
+ shr \$14,$d2
+ and $d1#d,%eax
+ and $d2#d,%edx
+ mov %eax,`16*5+0-64`($ctx)
+ lea (%rax,%rax,4),%eax # *5
+ mov %edx,`16*5+4-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ mov %eax,`16*6+0-64`($ctx)
+ shr \$26,$d1
+ mov %edx,`16*6+4-64`($ctx)
+ shr \$26,$d2
+
+ mov $h2,%rax
+ shl \$24,%rax
+ or %rax,$d1
+ mov $d1#d,`16*7+0-64`($ctx)
+ lea ($d1,$d1,4),$d1 # *5
+ mov $d2#d,`16*7+4-64`($ctx)
+ lea ($d2,$d2,4),$d2 # *5
+ mov $d1#d,`16*8+0-64`($ctx)
+ mov $d2#d,`16*8+4-64`($ctx)
+
+ mov $r1,%rax
+ call __poly1305_block # r^3
+
+ mov \$0x3ffffff,%eax # save r^3 base 2^26
+ mov $h0,$d1
+ and $h0#d,%eax
+ shr \$26,$d1
+ mov %eax,`16*0+12-64`($ctx)
+
+ mov \$0x3ffffff,%edx
+ and $d1#d,%edx
+ mov %edx,`16*1+12-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ shr \$26,$d1
+ mov %edx,`16*2+12-64`($ctx)
+
+ mov $h1,%rax
+ shl \$12,%rax
+ or $d1,%rax
+ and \$0x3ffffff,%eax
+ mov %eax,`16*3+12-64`($ctx)
+ lea (%rax,%rax,4),%eax # *5
+ mov $h1,$d1
+ mov %eax,`16*4+12-64`($ctx)
+
+ mov \$0x3ffffff,%edx
+ shr \$14,$d1
+ and $d1#d,%edx
+ mov %edx,`16*5+12-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ shr \$26,$d1
+ mov %edx,`16*6+12-64`($ctx)
+
+ mov $h2,%rax
+ shl \$24,%rax
+ or %rax,$d1
+ mov $d1#d,`16*7+12-64`($ctx)
+ lea ($d1,$d1,4),$d1 # *5
+ mov $d1#d,`16*8+12-64`($ctx)
+
+ mov $r1,%rax
+ call __poly1305_block # r^4
+
+ mov \$0x3ffffff,%eax # save r^4 base 2^26
+ mov $h0,$d1
+ and $h0#d,%eax
+ shr \$26,$d1
+ mov %eax,`16*0+8-64`($ctx)
+
+ mov \$0x3ffffff,%edx
+ and $d1#d,%edx
+ mov %edx,`16*1+8-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ shr \$26,$d1
+ mov %edx,`16*2+8-64`($ctx)
+
+ mov $h1,%rax
+ shl \$12,%rax
+ or $d1,%rax
+ and \$0x3ffffff,%eax
+ mov %eax,`16*3+8-64`($ctx)
+ lea (%rax,%rax,4),%eax # *5
+ mov $h1,$d1
+ mov %eax,`16*4+8-64`($ctx)
+
+ mov \$0x3ffffff,%edx
+ shr \$14,$d1
+ and $d1#d,%edx
+ mov %edx,`16*5+8-64`($ctx)
+ lea (%rdx,%rdx,4),%edx # *5
+ shr \$26,$d1
+ mov %edx,`16*6+8-64`($ctx)
+
+ mov $h2,%rax
+ shl \$24,%rax
+ or %rax,$d1
+ mov $d1#d,`16*7+8-64`($ctx)
+ lea ($d1,$d1,4),$d1 # *5
+ mov $d1#d,`16*8+8-64`($ctx)
+
+ lea -48-64($ctx),$ctx # size [de-]optimization
+ ret
+.size __poly1305_init_avx,.-__poly1305_init_avx
+
+.type poly1305_blocks_avx,\@function,4
+.align 32
+poly1305_blocks_avx:
+ mov 20($ctx),%r8d # is_base2_26
+ cmp \$128,$len
+ jae .Lblocks_avx
+ test %r8d,%r8d
+ jz .Lblocks
+
+.Lblocks_avx:
+ and \$-16,$len
+ jz .Lno_data_avx
+
+ vzeroupper
+
+ test %r8d,%r8d
+ jz .Lbase2_64_avx
+
+ test \$31,$len
+ jz .Leven_avx
+
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lblocks_avx_body:
+
+ mov $len,%r15 # reassign $len
+
+ mov 0($ctx),$d1 # load hash value
+ mov 8($ctx),$d2
+ mov 16($ctx),$h2#d
+
+ mov 24($ctx),$r0 # load r
+ mov 32($ctx),$s1
+
+ ################################# base 2^26 -> base 2^64
+ mov $d1#d,$h0#d
+ and \$-1<<31,$d1
+ mov $d2,$r1 # borrow $r1
+ mov $d2#d,$h1#d
+ and \$-1<<31,$d2
+
+ shr \$6,$d1
+ shl \$52,$r1
+ add $d1,$h0
+ shr \$12,$h1
+ shr \$18,$d2
+ add $r1,$h0
+ adc $d2,$h1
+
+ mov $h2,$d1
+ shl \$40,$d1
+ shr \$24,$h2
+ add $d1,$h1
+ adc \$0,$h2 # can be partially reduced...
+
+ mov \$-4,$d2 # ... so reduce
+ mov $h2,$d1
+ and $h2,$d2
+ shr \$2,$d1
+ and \$3,$h2
+ add $d2,$d1 # =*5
+ add $d1,$h0
+ adc \$0,$h1
+
+ mov $s1,$r1
+ mov $s1,%rax
+ shr \$2,$s1
+ add $r1,$s1 # s1 = r1 + (r1 >> 2)
+
+ add 0($inp),$h0 # accumulate input
+ adc 8($inp),$h1
+ lea 16($inp),$inp
+ adc $padbit,$h2
+
+ call __poly1305_block
+
+ test $padbit,$padbit # if $padbit is zero,
+ jz .Lstore_base2_64_avx # store hash in base 2^64 format
+
+ ################################# base 2^64 -> base 2^26
+ mov $h0,%rax
+ mov $h0,%rdx
+ shr \$52,$h0
+ mov $h1,$r0
+ mov $h1,$r1
+ shr \$26,%rdx
+ and \$0x3ffffff,%rax # h[0]
+ shl \$12,$r0
+ and \$0x3ffffff,%rdx # h[1]
+ shr \$14,$h1
+ or $r0,$h0
+ shl \$24,$h2
+ and \$0x3ffffff,$h0 # h[2]
+ shr \$40,$r1
+ and \$0x3ffffff,$h1 # h[3]
+ or $r1,$h2 # h[4]
+
+ sub \$16,%r15
+ jz .Lstore_base2_26_avx
+
+ vmovd %rax#d,$H0
+ vmovd %rdx#d,$H1
+ vmovd $h0#d,$H2
+ vmovd $h1#d,$H3
+ vmovd $h2#d,$H4
+ jmp .Lproceed_avx
+
+.align 32
+.Lstore_base2_64_avx:
+ mov $h0,0($ctx)
+ mov $h1,8($ctx)
+ mov $h2,16($ctx) # note that is_base2_26 is zeroed
+ jmp .Ldone_avx
+
+.align 16
+.Lstore_base2_26_avx:
+ mov %rax#d,0($ctx) # store hash value base 2^26
+ mov %rdx#d,4($ctx)
+ mov $h0#d,8($ctx)
+ mov $h1#d,12($ctx)
+ mov $h2#d,16($ctx)
+.align 16
+.Ldone_avx:
+ mov 0(%rsp),%r15
+ mov 8(%rsp),%r14
+ mov 16(%rsp),%r13
+ mov 24(%rsp),%r12
+ mov 32(%rsp),%rbp
+ mov 40(%rsp),%rbx
+ lea 48(%rsp),%rsp
+.Lno_data_avx:
+.Lblocks_avx_epilogue:
+ ret
+
+.align 32
+.Lbase2_64_avx:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lbase2_64_avx_body:
+
+ mov $len,%r15 # reassign $len
+
+ mov 24($ctx),$r0 # load r
+ mov 32($ctx),$s1
+
+ mov 0($ctx),$h0 # load hash value
+ mov 8($ctx),$h1
+ mov 16($ctx),$h2#d
+
+ mov $s1,$r1
+ mov $s1,%rax
+ shr \$2,$s1
+ add $r1,$s1 # s1 = r1 + (r1 >> 2)
+
+ test \$31,$len
+ jz .Linit_avx
+
+ add 0($inp),$h0 # accumulate input
+ adc 8($inp),$h1
+ lea 16($inp),$inp
+ adc $padbit,$h2
+ sub \$16,%r15
+
+ call __poly1305_block
+
+.Linit_avx:
+ ################################# base 2^64 -> base 2^26
+ mov $h0,%rax
+ mov $h0,%rdx
+ shr \$52,$h0
+ mov $h1,$d1
+ mov $h1,$d2
+ shr \$26,%rdx
+ and \$0x3ffffff,%rax # h[0]
+ shl \$12,$d1
+ and \$0x3ffffff,%rdx # h[1]
+ shr \$14,$h1
+ or $d1,$h0
+ shl \$24,$h2
+ and \$0x3ffffff,$h0 # h[2]
+ shr \$40,$d2
+ and \$0x3ffffff,$h1 # h[3]
+ or $d2,$h2 # h[4]
+
+ vmovd %rax#d,$H0
+ vmovd %rdx#d,$H1
+ vmovd $h0#d,$H2
+ vmovd $h1#d,$H3
+ vmovd $h2#d,$H4
+ movl \$1,20($ctx) # set is_base2_26
+
+ call __poly1305_init_avx
+
+.Lproceed_avx:
+ mov %r15,$len
+
+ mov 0(%rsp),%r15
+ mov 8(%rsp),%r14
+ mov 16(%rsp),%r13
+ mov 24(%rsp),%r12
+ mov 32(%rsp),%rbp
+ mov 40(%rsp),%rbx
+ lea 48(%rsp),%rax
+ lea 48(%rsp),%rsp
+.Lbase2_64_avx_epilogue:
+ jmp .Ldo_avx
+
+.align 32
+.Leven_avx:
+ vmovd 4*0($ctx),$H0 # load hash value
+ vmovd 4*1($ctx),$H1
+ vmovd 4*2($ctx),$H2
+ vmovd 4*3($ctx),$H3
+ vmovd 4*4($ctx),$H4
+
+.Ldo_avx:
+___
+$code.=<<___ if (!$win64);
+ lea -0x58(%rsp),%r11
+ sub \$0x178,%rsp
+___
+$code.=<<___ if ($win64);
+ lea -0xf8(%rsp),%r11
+ sub \$0x218,%rsp
+ vmovdqa %xmm6,0x50(%r11)
+ vmovdqa %xmm7,0x60(%r11)
+ vmovdqa %xmm8,0x70(%r11)
+ vmovdqa %xmm9,0x80(%r11)
+ vmovdqa %xmm10,0x90(%r11)
+ vmovdqa %xmm11,0xa0(%r11)
+ vmovdqa %xmm12,0xb0(%r11)
+ vmovdqa %xmm13,0xc0(%r11)
+ vmovdqa %xmm14,0xd0(%r11)
+ vmovdqa %xmm15,0xe0(%r11)
+.Ldo_avx_body:
+___
+$code.=<<___;
+ sub \$64,$len
+ lea -32($inp),%rax
+ cmovc %rax,$inp
+
+ vmovdqu `16*3`($ctx),$D4 # preload r0^2
+ lea `16*3+64`($ctx),$ctx # size optimization
+ lea .Lconst(%rip),%rcx
+
+ ################################################################
+ # load input
+ vmovdqu 16*2($inp),$T0
+ vmovdqu 16*3($inp),$T1
+ vmovdqa 64(%rcx),$MASK # .Lmask26
+
+ vpsrldq \$6,$T0,$T2 # splat input
+ vpsrldq \$6,$T1,$T3
+ vpunpckhqdq $T1,$T0,$T4 # 4
+ vpunpcklqdq $T1,$T0,$T0 # 0:1
+ vpunpcklqdq $T3,$T2,$T3 # 2:3
+
+ vpsrlq \$40,$T4,$T4 # 4
+ vpsrlq \$26,$T0,$T1
+ vpand $MASK,$T0,$T0 # 0
+ vpsrlq \$4,$T3,$T2
+ vpand $MASK,$T1,$T1 # 1
+ vpsrlq \$30,$T3,$T3
+ vpand $MASK,$T2,$T2 # 2
+ vpand $MASK,$T3,$T3 # 3
+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always
+
+ jbe .Lskip_loop_avx
+
+ # expand and copy pre-calculated table to stack
+ vmovdqu `16*1-64`($ctx),$D1
+ vmovdqu `16*2-64`($ctx),$D2
+ vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434
+ vpshufd \$0x44,$D4,$D0 # xx12 -> 1212
+ vmovdqa $D3,-0x90(%r11)
+ vmovdqa $D0,0x00(%rsp)
+ vpshufd \$0xEE,$D1,$D4
+ vmovdqu `16*3-64`($ctx),$D0
+ vpshufd \$0x44,$D1,$D1
+ vmovdqa $D4,-0x80(%r11)
+ vmovdqa $D1,0x10(%rsp)
+ vpshufd \$0xEE,$D2,$D3
+ vmovdqu `16*4-64`($ctx),$D1
+ vpshufd \$0x44,$D2,$D2
+ vmovdqa $D3,-0x70(%r11)
+ vmovdqa $D2,0x20(%rsp)
+ vpshufd \$0xEE,$D0,$D4
+ vmovdqu `16*5-64`($ctx),$D2
+ vpshufd \$0x44,$D0,$D0
+ vmovdqa $D4,-0x60(%r11)
+ vmovdqa $D0,0x30(%rsp)
+ vpshufd \$0xEE,$D1,$D3
+ vmovdqu `16*6-64`($ctx),$D0
+ vpshufd \$0x44,$D1,$D1
+ vmovdqa $D3,-0x50(%r11)
+ vmovdqa $D1,0x40(%rsp)
+ vpshufd \$0xEE,$D2,$D4
+ vmovdqu `16*7-64`($ctx),$D1
+ vpshufd \$0x44,$D2,$D2
+ vmovdqa $D4,-0x40(%r11)
+ vmovdqa $D2,0x50(%rsp)
+ vpshufd \$0xEE,$D0,$D3
+ vmovdqu `16*8-64`($ctx),$D2
+ vpshufd \$0x44,$D0,$D0
+ vmovdqa $D3,-0x30(%r11)
+ vmovdqa $D0,0x60(%rsp)
+ vpshufd \$0xEE,$D1,$D4
+ vpshufd \$0x44,$D1,$D1
+ vmovdqa $D4,-0x20(%r11)
+ vmovdqa $D1,0x70(%rsp)
+ vpshufd \$0xEE,$D2,$D3
+ vmovdqa 0x00(%rsp),$D4 # preload r0^2
+ vpshufd \$0x44,$D2,$D2
+ vmovdqa $D3,-0x10(%r11)
+ vmovdqa $D2,0x80(%rsp)
+
+ jmp .Loop_avx
+
+.align 32
+.Loop_avx:
+ ################################################################
+ # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
+ # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
+ # \___________________/
+ # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
+ # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
+ # \___________________/ \____________________/
+ #
+ # Note that we start with inp[2:3]*r^2. This is because it
+ # doesn't depend on reduction in previous iteration.
+ ################################################################
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+ #
+ # though note that $Tx and $Hx are "reversed" in this section,
+ # and $D4 is preloaded with r0^2...
+
+ vpmuludq $T0,$D4,$D0 # d0 = h0*r0
+ vpmuludq $T1,$D4,$D1 # d1 = h1*r0
+ vmovdqa $H2,0x20(%r11) # offload hash
+ vpmuludq $T2,$D4,$D2 # d3 = h2*r0
+ vmovdqa 0x10(%rsp),$H2 # r1^2
+ vpmuludq $T3,$D4,$D3 # d3 = h3*r0
+ vpmuludq $T4,$D4,$D4 # d4 = h4*r0
+
+ vmovdqa $H0,0x00(%r11) #
+ vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
+ vmovdqa $H1,0x10(%r11) #
+ vpmuludq $T3,$H2,$H1 # h3*r1
+ vpaddq $H0,$D0,$D0 # d0 += h4*s1
+ vpaddq $H1,$D4,$D4 # d4 += h3*r1
+ vmovdqa $H3,0x30(%r11) #
+ vpmuludq $T2,$H2,$H0 # h2*r1
+ vpmuludq $T1,$H2,$H1 # h1*r1
+ vpaddq $H0,$D3,$D3 # d3 += h2*r1
+ vmovdqa 0x30(%rsp),$H3 # r2^2
+ vpaddq $H1,$D2,$D2 # d2 += h1*r1
+ vmovdqa $H4,0x40(%r11) #
+ vpmuludq $T0,$H2,$H2 # h0*r1
+ vpmuludq $T2,$H3,$H0 # h2*r2
+ vpaddq $H2,$D1,$D1 # d1 += h0*r1
+
+ vmovdqa 0x40(%rsp),$H4 # s2^2
+ vpaddq $H0,$D4,$D4 # d4 += h2*r2
+ vpmuludq $T1,$H3,$H1 # h1*r2
+ vpmuludq $T0,$H3,$H3 # h0*r2
+ vpaddq $H1,$D3,$D3 # d3 += h1*r2
+ vmovdqa 0x50(%rsp),$H2 # r3^2
+ vpaddq $H3,$D2,$D2 # d2 += h0*r2
+ vpmuludq $T4,$H4,$H0 # h4*s2
+ vpmuludq $T3,$H4,$H4 # h3*s2
+ vpaddq $H0,$D1,$D1 # d1 += h4*s2
+ vmovdqa 0x60(%rsp),$H3 # s3^2
+ vpaddq $H4,$D0,$D0 # d0 += h3*s2
+
+ vmovdqa 0x80(%rsp),$H4 # s4^2
+ vpmuludq $T1,$H2,$H1 # h1*r3
+ vpmuludq $T0,$H2,$H2 # h0*r3
+ vpaddq $H1,$D4,$D4 # d4 += h1*r3
+ vpaddq $H2,$D3,$D3 # d3 += h0*r3
+ vpmuludq $T4,$H3,$H0 # h4*s3
+ vpmuludq $T3,$H3,$H1 # h3*s3
+ vpaddq $H0,$D2,$D2 # d2 += h4*s3
+ vmovdqu 16*0($inp),$H0 # load input
+ vpaddq $H1,$D1,$D1 # d1 += h3*s3
+ vpmuludq $T2,$H3,$H3 # h2*s3
+ vpmuludq $T2,$H4,$T2 # h2*s4
+ vpaddq $H3,$D0,$D0 # d0 += h2*s3
+
+ vmovdqu 16*1($inp),$H1 #
+ vpaddq $T2,$D1,$D1 # d1 += h2*s4
+ vpmuludq $T3,$H4,$T3 # h3*s4
+ vpmuludq $T4,$H4,$T4 # h4*s4
+ vpsrldq \$6,$H0,$H2 # splat input
+ vpaddq $T3,$D2,$D2 # d2 += h3*s4
+ vpaddq $T4,$D3,$D3 # d3 += h4*s4
+ vpsrldq \$6,$H1,$H3 #
+ vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4
+ vpmuludq $T1,$H4,$T0 # h1*s4
+ vpunpckhqdq $H1,$H0,$H4 # 4
+ vpaddq $T4,$D4,$D4 # d4 += h0*r4
+ vmovdqa -0x90(%r11),$T4 # r0^4
+ vpaddq $T0,$D0,$D0 # d0 += h1*s4
+
+ vpunpcklqdq $H1,$H0,$H0 # 0:1
+ vpunpcklqdq $H3,$H2,$H3 # 2:3
+
+ #vpsrlq \$40,$H4,$H4 # 4
+ vpsrldq \$`40/8`,$H4,$H4 # 4
+ vpsrlq \$26,$H0,$H1
+ vpand $MASK,$H0,$H0 # 0
+ vpsrlq \$4,$H3,$H2
+ vpand $MASK,$H1,$H1 # 1
+ vpand 0(%rcx),$H4,$H4 # .Lmask24
+ vpsrlq \$30,$H3,$H3
+ vpand $MASK,$H2,$H2 # 2
+ vpand $MASK,$H3,$H3 # 3
+ vpor 32(%rcx),$H4,$H4 # padbit, yes, always
+
+ vpaddq 0x00(%r11),$H0,$H0 # add hash value
+ vpaddq 0x10(%r11),$H1,$H1
+ vpaddq 0x20(%r11),$H2,$H2
+ vpaddq 0x30(%r11),$H3,$H3
+ vpaddq 0x40(%r11),$H4,$H4
+
+ lea 16*2($inp),%rax
+ lea 16*4($inp),$inp
+ sub \$64,$len
+ cmovc %rax,$inp
+
+ ################################################################
+ # Now we accumulate (inp[0:1]+hash)*r^4
+ ################################################################
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+
+ vpmuludq $H0,$T4,$T0 # h0*r0
+ vpmuludq $H1,$T4,$T1 # h1*r0
+ vpaddq $T0,$D0,$D0
+ vpaddq $T1,$D1,$D1
+ vmovdqa -0x80(%r11),$T2 # r1^4
+ vpmuludq $H2,$T4,$T0 # h2*r0
+ vpmuludq $H3,$T4,$T1 # h3*r0
+ vpaddq $T0,$D2,$D2
+ vpaddq $T1,$D3,$D3
+ vpmuludq $H4,$T4,$T4 # h4*r0
+ vpmuludq -0x70(%r11),$H4,$T0 # h4*s1
+ vpaddq $T4,$D4,$D4
+
+ vpaddq $T0,$D0,$D0 # d0 += h4*s1
+ vpmuludq $H2,$T2,$T1 # h2*r1
+ vpmuludq $H3,$T2,$T0 # h3*r1
+ vpaddq $T1,$D3,$D3 # d3 += h2*r1
+ vmovdqa -0x60(%r11),$T3 # r2^4
+ vpaddq $T0,$D4,$D4 # d4 += h3*r1
+ vpmuludq $H1,$T2,$T1 # h1*r1
+ vpmuludq $H0,$T2,$T2 # h0*r1
+ vpaddq $T1,$D2,$D2 # d2 += h1*r1
+ vpaddq $T2,$D1,$D1 # d1 += h0*r1
+
+ vmovdqa -0x50(%r11),$T4 # s2^4
+ vpmuludq $H2,$T3,$T0 # h2*r2
+ vpmuludq $H1,$T3,$T1 # h1*r2
+ vpaddq $T0,$D4,$D4 # d4 += h2*r2
+ vpaddq $T1,$D3,$D3 # d3 += h1*r2
+ vmovdqa -0x40(%r11),$T2 # r3^4
+ vpmuludq $H0,$T3,$T3 # h0*r2
+ vpmuludq $H4,$T4,$T0 # h4*s2
+ vpaddq $T3,$D2,$D2 # d2 += h0*r2
+ vpaddq $T0,$D1,$D1 # d1 += h4*s2
+ vmovdqa -0x30(%r11),$T3 # s3^4
+ vpmuludq $H3,$T4,$T4 # h3*s2
+ vpmuludq $H1,$T2,$T1 # h1*r3
+ vpaddq $T4,$D0,$D0 # d0 += h3*s2
+
+ vmovdqa -0x10(%r11),$T4 # s4^4
+ vpaddq $T1,$D4,$D4 # d4 += h1*r3
+ vpmuludq $H0,$T2,$T2 # h0*r3
+ vpmuludq $H4,$T3,$T0 # h4*s3
+ vpaddq $T2,$D3,$D3 # d3 += h0*r3
+ vpaddq $T0,$D2,$D2 # d2 += h4*s3
+ vmovdqu 16*2($inp),$T0 # load input
+ vpmuludq $H3,$T3,$T2 # h3*s3
+ vpmuludq $H2,$T3,$T3 # h2*s3
+ vpaddq $T2,$D1,$D1 # d1 += h3*s3
+ vmovdqu 16*3($inp),$T1 #
+ vpaddq $T3,$D0,$D0 # d0 += h2*s3
+
+ vpmuludq $H2,$T4,$H2 # h2*s4
+ vpmuludq $H3,$T4,$H3 # h3*s4
+ vpsrldq \$6,$T0,$T2 # splat input
+ vpaddq $H2,$D1,$D1 # d1 += h2*s4
+ vpmuludq $H4,$T4,$H4 # h4*s4
+ vpsrldq \$6,$T1,$T3 #
+ vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4
+ vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4
+ vpmuludq -0x20(%r11),$H0,$H4 # h0*r4
+ vpmuludq $H1,$T4,$H0
+ vpunpckhqdq $T1,$T0,$T4 # 4
+ vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
+ vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
+
+ vpunpcklqdq $T1,$T0,$T0 # 0:1
+ vpunpcklqdq $T3,$T2,$T3 # 2:3
+
+ #vpsrlq \$40,$T4,$T4 # 4
+ vpsrldq \$`40/8`,$T4,$T4 # 4
+ vpsrlq \$26,$T0,$T1
+ vmovdqa 0x00(%rsp),$D4 # preload r0^2
+ vpand $MASK,$T0,$T0 # 0
+ vpsrlq \$4,$T3,$T2
+ vpand $MASK,$T1,$T1 # 1
+ vpand 0(%rcx),$T4,$T4 # .Lmask24
+ vpsrlq \$30,$T3,$T3
+ vpand $MASK,$T2,$T2 # 2
+ vpand $MASK,$T3,$T3 # 3
+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always
+
+ ################################################################
+ # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
+ # and P. Schwabe
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$D1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H4,$D0
+ vpand $MASK,$H4,$H4
+
+ vpsrlq \$26,$H1,$D1
+ vpand $MASK,$H1,$H1
+ vpaddq $D1,$H2,$H2 # h1 -> h2
+
+ vpaddq $D0,$H0,$H0
+ vpsllq \$2,$D0,$D0
+ vpaddq $D0,$H0,$H0 # h4 -> h0
+
+ vpsrlq \$26,$H2,$D2
+ vpand $MASK,$H2,$H2
+ vpaddq $D2,$H3,$H3 # h2 -> h3
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$H1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ ja .Loop_avx
+
+.Lskip_loop_avx:
+ ################################################################
+ # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
+
+ vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
+ add \$32,$len
+ jnz .Long_tail_avx
+
+ vpaddq $H2,$T2,$T2
+ vpaddq $H0,$T0,$T0
+ vpaddq $H1,$T1,$T1
+ vpaddq $H3,$T3,$T3
+ vpaddq $H4,$T4,$T4
+
+.Long_tail_avx:
+ vmovdqa $H2,0x20(%r11)
+ vmovdqa $H0,0x00(%r11)
+ vmovdqa $H1,0x10(%r11)
+ vmovdqa $H3,0x30(%r11)
+ vmovdqa $H4,0x40(%r11)
+
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+
+ vpmuludq $T2,$D4,$D2 # d2 = h2*r0
+ vpmuludq $T0,$D4,$D0 # d0 = h0*r0
+ vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n
+ vpmuludq $T1,$D4,$D1 # d1 = h1*r0
+ vpmuludq $T3,$D4,$D3 # d3 = h3*r0
+ vpmuludq $T4,$D4,$D4 # d4 = h4*r0
+
+ vpmuludq $T3,$H2,$H0 # h3*r1
+ vpaddq $H0,$D4,$D4 # d4 += h3*r1
+ vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n
+ vpmuludq $T2,$H2,$H1 # h2*r1
+ vpaddq $H1,$D3,$D3 # d3 += h2*r1
+ vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n
+ vpmuludq $T1,$H2,$H0 # h1*r1
+ vpaddq $H0,$D2,$D2 # d2 += h1*r1
+ vpmuludq $T0,$H2,$H2 # h0*r1
+ vpaddq $H2,$D1,$D1 # d1 += h0*r1
+ vpmuludq $T4,$H3,$H3 # h4*s1
+ vpaddq $H3,$D0,$D0 # d0 += h4*s1
+
+ vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n
+ vpmuludq $T2,$H4,$H1 # h2*r2
+ vpaddq $H1,$D4,$D4 # d4 += h2*r2
+ vpmuludq $T1,$H4,$H0 # h1*r2
+ vpaddq $H0,$D3,$D3 # d3 += h1*r2
+ vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n
+ vpmuludq $T0,$H4,$H4 # h0*r2
+ vpaddq $H4,$D2,$D2 # d2 += h0*r2
+ vpmuludq $T4,$H2,$H1 # h4*s2
+ vpaddq $H1,$D1,$D1 # d1 += h4*s2
+ vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n
+ vpmuludq $T3,$H2,$H2 # h3*s2
+ vpaddq $H2,$D0,$D0 # d0 += h3*s2
+
+ vpmuludq $T1,$H3,$H0 # h1*r3
+ vpaddq $H0,$D4,$D4 # d4 += h1*r3
+ vpmuludq $T0,$H3,$H3 # h0*r3
+ vpaddq $H3,$D3,$D3 # d3 += h0*r3
+ vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n
+ vpmuludq $T4,$H4,$H1 # h4*s3
+ vpaddq $H1,$D2,$D2 # d2 += h4*s3
+ vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n
+ vpmuludq $T3,$H4,$H0 # h3*s3
+ vpaddq $H0,$D1,$D1 # d1 += h3*s3
+ vpmuludq $T2,$H4,$H4 # h2*s3
+ vpaddq $H4,$D0,$D0 # d0 += h2*s3
+
+ vpmuludq $T0,$H2,$H2 # h0*r4
+ vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4
+ vpmuludq $T4,$H3,$H1 # h4*s4
+ vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4
+ vpmuludq $T3,$H3,$H0 # h3*s4
+ vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4
+ vpmuludq $T2,$H3,$H1 # h2*s4
+ vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4
+ vpmuludq $T1,$H3,$H3 # h1*s4
+ vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4
+
+ jz .Lshort_tail_avx
+
+ vmovdqu 16*0($inp),$H0 # load input
+ vmovdqu 16*1($inp),$H1
+
+ vpsrldq \$6,$H0,$H2 # splat input
+ vpsrldq \$6,$H1,$H3
+ vpunpckhqdq $H1,$H0,$H4 # 4
+ vpunpcklqdq $H1,$H0,$H0 # 0:1
+ vpunpcklqdq $H3,$H2,$H3 # 2:3
+
+ vpsrlq \$40,$H4,$H4 # 4
+ vpsrlq \$26,$H0,$H1
+ vpand $MASK,$H0,$H0 # 0
+ vpsrlq \$4,$H3,$H2
+ vpand $MASK,$H1,$H1 # 1
+ vpsrlq \$30,$H3,$H3
+ vpand $MASK,$H2,$H2 # 2
+ vpand $MASK,$H3,$H3 # 3
+ vpor 32(%rcx),$H4,$H4 # padbit, yes, always
+
+ vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
+ vpaddq 0x00(%r11),$H0,$H0
+ vpaddq 0x10(%r11),$H1,$H1
+ vpaddq 0x20(%r11),$H2,$H2
+ vpaddq 0x30(%r11),$H3,$H3
+ vpaddq 0x40(%r11),$H4,$H4
+
+ ################################################################
+ # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
+
+ vpmuludq $H0,$T4,$T0 # h0*r0
+ vpaddq $T0,$D0,$D0 # d0 += h0*r0
+ vpmuludq $H1,$T4,$T1 # h1*r0
+ vpaddq $T1,$D1,$D1 # d1 += h1*r0
+ vpmuludq $H2,$T4,$T0 # h2*r0
+ vpaddq $T0,$D2,$D2 # d2 += h2*r0
+ vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n
+ vpmuludq $H3,$T4,$T1 # h3*r0
+ vpaddq $T1,$D3,$D3 # d3 += h3*r0
+ vpmuludq $H4,$T4,$T4 # h4*r0
+ vpaddq $T4,$D4,$D4 # d4 += h4*r0
+
+ vpmuludq $H3,$T2,$T0 # h3*r1
+ vpaddq $T0,$D4,$D4 # d4 += h3*r1
+ vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1
+ vpmuludq $H2,$T2,$T1 # h2*r1
+ vpaddq $T1,$D3,$D3 # d3 += h2*r1
+ vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2
+ vpmuludq $H1,$T2,$T0 # h1*r1
+ vpaddq $T0,$D2,$D2 # d2 += h1*r1
+ vpmuludq $H0,$T2,$T2 # h0*r1
+ vpaddq $T2,$D1,$D1 # d1 += h0*r1
+ vpmuludq $H4,$T3,$T3 # h4*s1
+ vpaddq $T3,$D0,$D0 # d0 += h4*s1
+
+ vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2
+ vpmuludq $H2,$T4,$T1 # h2*r2
+ vpaddq $T1,$D4,$D4 # d4 += h2*r2
+ vpmuludq $H1,$T4,$T0 # h1*r2
+ vpaddq $T0,$D3,$D3 # d3 += h1*r2
+ vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3
+ vpmuludq $H0,$T4,$T4 # h0*r2
+ vpaddq $T4,$D2,$D2 # d2 += h0*r2
+ vpmuludq $H4,$T2,$T1 # h4*s2
+ vpaddq $T1,$D1,$D1 # d1 += h4*s2
+ vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3
+ vpmuludq $H3,$T2,$T2 # h3*s2
+ vpaddq $T2,$D0,$D0 # d0 += h3*s2
+
+ vpmuludq $H1,$T3,$T0 # h1*r3
+ vpaddq $T0,$D4,$D4 # d4 += h1*r3
+ vpmuludq $H0,$T3,$T3 # h0*r3
+ vpaddq $T3,$D3,$D3 # d3 += h0*r3
+ vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4
+ vpmuludq $H4,$T4,$T1 # h4*s3
+ vpaddq $T1,$D2,$D2 # d2 += h4*s3
+ vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4
+ vpmuludq $H3,$T4,$T0 # h3*s3
+ vpaddq $T0,$D1,$D1 # d1 += h3*s3
+ vpmuludq $H2,$T4,$T4 # h2*s3
+ vpaddq $T4,$D0,$D0 # d0 += h2*s3
+
+ vpmuludq $H0,$T2,$T2 # h0*r4
+ vpaddq $T2,$D4,$D4 # d4 += h0*r4
+ vpmuludq $H4,$T3,$T1 # h4*s4
+ vpaddq $T1,$D3,$D3 # d3 += h4*s4
+ vpmuludq $H3,$T3,$T0 # h3*s4
+ vpaddq $T0,$D2,$D2 # d2 += h3*s4
+ vpmuludq $H2,$T3,$T1 # h2*s4
+ vpaddq $T1,$D1,$D1 # d1 += h2*s4
+ vpmuludq $H1,$T3,$T3 # h1*s4
+ vpaddq $T3,$D0,$D0 # d0 += h1*s4
+
+.Lshort_tail_avx:
+ ################################################################
+ # lazy reduction
+
+ vpsrlq \$26,$D3,$H3
+ vpand $MASK,$D3,$D3
+ vpaddq $H3,$D4,$D4 # h3 -> h4
+
+ vpsrlq \$26,$D0,$H0
+ vpand $MASK,$D0,$D0
+ vpaddq $H0,$D1,$D1 # h0 -> h1
+
+ vpsrlq \$26,$D4,$H4
+ vpand $MASK,$D4,$D4
+
+ vpsrlq \$26,$D1,$H1
+ vpand $MASK,$D1,$D1
+ vpaddq $H1,$D2,$D2 # h1 -> h2
+
+ vpaddq $H4,$D0,$D0
+ vpsllq \$2,$H4,$H4
+ vpaddq $H4,$D0,$D0 # h4 -> h0
+
+ vpsrlq \$26,$D2,$H2
+ vpand $MASK,$D2,$D2
+ vpaddq $H2,$D3,$D3 # h2 -> h3
+
+ vpsrlq \$26,$D0,$H0
+ vpand $MASK,$D0,$D0
+ vpaddq $H0,$D1,$D1 # h0 -> h1
+
+ vpsrlq \$26,$D3,$H3
+ vpand $MASK,$D3,$D3
+ vpaddq $H3,$D4,$D4 # h3 -> h4
+
+ ################################################################
+ # horizontal addition
+
+ vpsrldq \$8,$D2,$T2
+ vpsrldq \$8,$D0,$T0
+ vpsrldq \$8,$D1,$T1
+ vpsrldq \$8,$D3,$T3
+ vpsrldq \$8,$D4,$T4
+ vpaddq $T2,$D2,$H2
+ vpaddq $T0,$D0,$H0
+ vpaddq $T1,$D1,$H1
+ vpaddq $T3,$D3,$H3
+ vpaddq $T4,$D4,$H4
+
+ vmovd $H0,`4*0-48-64`($ctx) # save partially reduced
+ vmovd $H1,`4*1-48-64`($ctx)
+ vmovd $H2,`4*2-48-64`($ctx)
+ vmovd $H3,`4*3-48-64`($ctx)
+ vmovd $H4,`4*4-48-64`($ctx)
+___
+$code.=<<___ if ($win64);
+ vmovdqa 0x50(%r11),%xmm6
+ vmovdqa 0x60(%r11),%xmm7
+ vmovdqa 0x70(%r11),%xmm8
+ vmovdqa 0x80(%r11),%xmm9
+ vmovdqa 0x90(%r11),%xmm10
+ vmovdqa 0xa0(%r11),%xmm11
+ vmovdqa 0xb0(%r11),%xmm12
+ vmovdqa 0xc0(%r11),%xmm13
+ vmovdqa 0xd0(%r11),%xmm14
+ vmovdqa 0xe0(%r11),%xmm15
+ lea 0xf8(%r11),%rsp
+.Ldo_avx_epilogue:
+___
+$code.=<<___ if (!$win64);
+ lea 0x58(%r11),%rsp
+___
+$code.=<<___;
+ vzeroupper
+ ret
+.size poly1305_blocks_avx,.-poly1305_blocks_avx
+
+.type poly1305_emit_avx,\@function,3
+.align 32
+poly1305_emit_avx:
+ cmpl \$0,20($ctx) # is_base2_26?
+ je .Lemit
+
+ mov 0($ctx),%eax # load hash value base 2^26
+ mov 4($ctx),%ecx
+ mov 8($ctx),%r8d
+ mov 12($ctx),%r11d
+ mov 16($ctx),%r10d
+
+ shl \$26,%rcx # base 2^26 -> base 2^64
+ mov %r8,%r9
+ shl \$52,%r8
+ add %rcx,%rax
+ shr \$12,%r9
+ add %rax,%r8 # h0
+ adc \$0,%r9
+
+ shl \$14,%r11
+ mov %r10,%rax
+ shr \$24,%r10
+ add %r11,%r9
+ shl \$40,%rax
+ add %rax,%r9 # h1
+ adc \$0,%r10 # h2
+
+ mov %r10,%rax # could be partially reduced, so reduce
+ mov %r10,%rcx
+ and \$3,%r10
+ shr \$2,%rax
+ and \$-4,%rcx
+ add %rcx,%rax
+ add %rax,%r8
+ adc \$0,%r9
+
+ mov %r8,%rax
+ add \$5,%r8 # compare to modulus
+ mov %r9,%rcx
+ adc \$0,%r9
+ adc \$0,%r10
+ shr \$2,%r10 # did 130-bit value overfow?
+ cmovnz %r8,%rax
+ cmovnz %r9,%rcx
+
+ add 0($nonce),%rax # accumulate nonce
+ adc 8($nonce),%rcx
+ mov %rax,0($mac) # write result
+ mov %rcx,8($mac)
+
+ ret
+.size poly1305_emit_avx,.-poly1305_emit_avx
+___
+
+if ($avx>1) {
+my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
+ map("%ymm$_",(0..15));
+my $S4=$MASK;
+
+$code.=<<___;
+.type poly1305_blocks_avx2,\@function,4
+.align 32
+poly1305_blocks_avx2:
+ mov 20($ctx),%r8d # is_base2_26
+ cmp \$128,$len
+ jae .Lblocks_avx2
+ test %r8d,%r8d
+ jz .Lblocks
+
+.Lblocks_avx2:
+ and \$-16,$len
+ jz .Lno_data_avx2
+
+ vzeroupper
+
+ test %r8d,%r8d
+ jz .Lbase2_64_avx2
+
+ test \$63,$len
+ jz .Leven_avx2
+
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lblocks_avx2_body:
+
+ mov $len,%r15 # reassign $len
+
+ mov 0($ctx),$d1 # load hash value
+ mov 8($ctx),$d2
+ mov 16($ctx),$h2#d
+
+ mov 24($ctx),$r0 # load r
+ mov 32($ctx),$s1
+
+ ################################# base 2^26 -> base 2^64
+ mov $d1#d,$h0#d
+ and \$-1<<31,$d1
+ mov $d2,$r1 # borrow $r1
+ mov $d2#d,$h1#d
+ and \$-1<<31,$d2
+
+ shr \$6,$d1
+ shl \$52,$r1
+ add $d1,$h0
+ shr \$12,$h1
+ shr \$18,$d2
+ add $r1,$h0
+ adc $d2,$h1
+
+ mov $h2,$d1
+ shl \$40,$d1
+ shr \$24,$h2
+ add $d1,$h1
+ adc \$0,$h2 # can be partially reduced...
+
+ mov \$-4,$d2 # ... so reduce
+ mov $h2,$d1
+ and $h2,$d2
+ shr \$2,$d1
+ and \$3,$h2
+ add $d2,$d1 # =*5
+ add $d1,$h0
+ adc \$0,$h1
+
+ mov $s1,$r1
+ mov $s1,%rax
+ shr \$2,$s1
+ add $r1,$s1 # s1 = r1 + (r1 >> 2)
+
+.Lbase2_26_pre_avx2:
+ add 0($inp),$h0 # accumulate input
+ adc 8($inp),$h1
+ lea 16($inp),$inp
+ adc $padbit,$h2
+ sub \$16,%r15
+
+ call __poly1305_block
+ mov $r1,%rax
+
+ test \$63,%r15
+ jnz .Lbase2_26_pre_avx2
+
+ test $padbit,$padbit # if $padbit is zero,
+ jz .Lstore_base2_64_avx2 # store hash in base 2^64 format
+
+ ################################# base 2^64 -> base 2^26
+ mov $h0,%rax
+ mov $h0,%rdx
+ shr \$52,$h0
+ mov $h1,$r0
+ mov $h1,$r1
+ shr \$26,%rdx
+ and \$0x3ffffff,%rax # h[0]
+ shl \$12,$r0
+ and \$0x3ffffff,%rdx # h[1]
+ shr \$14,$h1
+ or $r0,$h0
+ shl \$24,$h2
+ and \$0x3ffffff,$h0 # h[2]
+ shr \$40,$r1
+ and \$0x3ffffff,$h1 # h[3]
+ or $r1,$h2 # h[4]
+
+ test %r15,%r15
+ jz .Lstore_base2_26_avx2
+
+ vmovd %rax#d,%x#$H0
+ vmovd %rdx#d,%x#$H1
+ vmovd $h0#d,%x#$H2
+ vmovd $h1#d,%x#$H3
+ vmovd $h2#d,%x#$H4
+ jmp .Lproceed_avx2
+
+.align 32
+.Lstore_base2_64_avx2:
+ mov $h0,0($ctx)
+ mov $h1,8($ctx)
+ mov $h2,16($ctx) # note that is_base2_26 is zeroed
+ jmp .Ldone_avx2
+
+.align 16
+.Lstore_base2_26_avx2:
+ mov %rax#d,0($ctx) # store hash value base 2^26
+ mov %rdx#d,4($ctx)
+ mov $h0#d,8($ctx)
+ mov $h1#d,12($ctx)
+ mov $h2#d,16($ctx)
+.align 16
+.Ldone_avx2:
+ mov 0(%rsp),%r15
+ mov 8(%rsp),%r14
+ mov 16(%rsp),%r13
+ mov 24(%rsp),%r12
+ mov 32(%rsp),%rbp
+ mov 40(%rsp),%rbx
+ lea 48(%rsp),%rsp
+.Lno_data_avx2:
+.Lblocks_avx2_epilogue:
+ ret
+
+.align 32
+.Lbase2_64_avx2:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lbase2_64_avx2_body:
+
+ mov $len,%r15 # reassign $len
+
+ mov 24($ctx),$r0 # load r
+ mov 32($ctx),$s1
+
+ mov 0($ctx),$h0 # load hash value
+ mov 8($ctx),$h1
+ mov 16($ctx),$h2#d
+
+ mov $s1,$r1
+ mov $s1,%rax
+ shr \$2,$s1
+ add $r1,$s1 # s1 = r1 + (r1 >> 2)
+
+ test \$63,$len
+ jz .Linit_avx2
+
+.Lbase2_64_pre_avx2:
+ add 0($inp),$h0 # accumulate input
+ adc 8($inp),$h1
+ lea 16($inp),$inp
+ adc $padbit,$h2
+ sub \$16,%r15
+
+ call __poly1305_block
+ mov $r1,%rax
+
+ test \$63,%r15
+ jnz .Lbase2_64_pre_avx2
+
+.Linit_avx2:
+ ################################# base 2^64 -> base 2^26
+ mov $h0,%rax
+ mov $h0,%rdx
+ shr \$52,$h0
+ mov $h1,$d1
+ mov $h1,$d2
+ shr \$26,%rdx
+ and \$0x3ffffff,%rax # h[0]
+ shl \$12,$d1
+ and \$0x3ffffff,%rdx # h[1]
+ shr \$14,$h1
+ or $d1,$h0
+ shl \$24,$h2
+ and \$0x3ffffff,$h0 # h[2]
+ shr \$40,$d2
+ and \$0x3ffffff,$h1 # h[3]
+ or $d2,$h2 # h[4]
+
+ vmovd %rax#d,%x#$H0
+ vmovd %rdx#d,%x#$H1
+ vmovd $h0#d,%x#$H2
+ vmovd $h1#d,%x#$H3
+ vmovd $h2#d,%x#$H4
+ movl \$1,20($ctx) # set is_base2_26
+
+ call __poly1305_init_avx
+
+.Lproceed_avx2:
+ mov %r15,$len
+
+ mov 0(%rsp),%r15
+ mov 8(%rsp),%r14
+ mov 16(%rsp),%r13
+ mov 24(%rsp),%r12
+ mov 32(%rsp),%rbp
+ mov 40(%rsp),%rbx
+ lea 48(%rsp),%rax
+ lea 48(%rsp),%rsp
+.Lbase2_64_avx2_epilogue:
+ jmp .Ldo_avx2
+
+.align 32
+.Leven_avx2:
+ vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
+ vmovd 4*1($ctx),%x#$H1
+ vmovd 4*2($ctx),%x#$H2
+ vmovd 4*3($ctx),%x#$H3
+ vmovd 4*4($ctx),%x#$H4
+
+.Ldo_avx2:
+___
+$code.=<<___ if (!$win64);
+ lea -8(%rsp),%r11
+ sub \$0x128,%rsp
+___
+$code.=<<___ if ($win64);
+ lea -0xf8(%rsp),%r11
+ sub \$0x1c8,%rsp
+ vmovdqa %xmm6,0x50(%r11)
+ vmovdqa %xmm7,0x60(%r11)
+ vmovdqa %xmm8,0x70(%r11)
+ vmovdqa %xmm9,0x80(%r11)
+ vmovdqa %xmm10,0x90(%r11)
+ vmovdqa %xmm11,0xa0(%r11)
+ vmovdqa %xmm12,0xb0(%r11)
+ vmovdqa %xmm13,0xc0(%r11)
+ vmovdqa %xmm14,0xd0(%r11)
+ vmovdqa %xmm15,0xe0(%r11)
+.Ldo_avx2_body:
+___
+$code.=<<___;
+ lea 48+64($ctx),$ctx # size optimization
+ lea .Lconst(%rip),%rcx
+
+ # expand and copy pre-calculated table to stack
+ vmovdqu `16*0-64`($ctx),%x#$T2
+ and \$-512,%rsp
+ vmovdqu `16*1-64`($ctx),%x#$T3
+ vmovdqu `16*2-64`($ctx),%x#$T4
+ vmovdqu `16*3-64`($ctx),%x#$D0
+ vmovdqu `16*4-64`($ctx),%x#$D1
+ vmovdqu `16*5-64`($ctx),%x#$D2
+ vmovdqu `16*6-64`($ctx),%x#$D3
+ vpermq \$0x15,$T2,$T2 # 00003412 -> 12343434
+ vmovdqu `16*7-64`($ctx),%x#$D4
+ vpermq \$0x15,$T3,$T3
+ vpshufd \$0xc8,$T2,$T2 # 12343434 -> 14243444
+ vmovdqu `16*8-64`($ctx),%x#$MASK
+ vpermq \$0x15,$T4,$T4
+ vpshufd \$0xc8,$T3,$T3
+ vmovdqa $T2,0x00(%rsp)
+ vpermq \$0x15,$D0,$D0
+ vpshufd \$0xc8,$T4,$T4
+ vmovdqa $T3,0x20(%rsp)
+ vpermq \$0x15,$D1,$D1
+ vpshufd \$0xc8,$D0,$D0
+ vmovdqa $T4,0x40(%rsp)
+ vpermq \$0x15,$D2,$D2
+ vpshufd \$0xc8,$D1,$D1
+ vmovdqa $D0,0x60(%rsp)
+ vpermq \$0x15,$D3,$D3
+ vpshufd \$0xc8,$D2,$D2
+ vmovdqa $D1,0x80(%rsp)
+ vpermq \$0x15,$D4,$D4
+ vpshufd \$0xc8,$D3,$D3
+ vmovdqa $D2,0xa0(%rsp)
+ vpermq \$0x15,$MASK,$MASK
+ vpshufd \$0xc8,$D4,$D4
+ vmovdqa $D3,0xc0(%rsp)
+ vpshufd \$0xc8,$MASK,$MASK
+ vmovdqa $D4,0xe0(%rsp)
+ vmovdqa $MASK,0x100(%rsp)
+ vmovdqa 64(%rcx),$MASK # .Lmask26
+
+ ################################################################
+ # load input
+ vmovdqu 16*0($inp),%x#$T0
+ vmovdqu 16*1($inp),%x#$T1
+ vinserti128 \$1,16*2($inp),$T0,$T0
+ vinserti128 \$1,16*3($inp),$T1,$T1
+ lea 16*4($inp),$inp
+
+ vpsrldq \$6,$T0,$T2 # splat input
+ vpsrldq \$6,$T1,$T3
+ vpunpckhqdq $T1,$T0,$T4 # 4
+ vpunpcklqdq $T3,$T2,$T2 # 2:3
+ vpunpcklqdq $T1,$T0,$T0 # 0:1
+
+ vpsrlq \$30,$T2,$T3
+ vpsrlq \$4,$T2,$T2
+ vpsrlq \$26,$T0,$T1
+ vpsrlq \$40,$T4,$T4 # 4
+ vpand $MASK,$T2,$T2 # 2
+ vpand $MASK,$T0,$T0 # 0
+ vpand $MASK,$T1,$T1 # 1
+ vpand $MASK,$T3,$T3 # 3
+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always
+
+ lea 0x90(%rsp),%rax # size optimization
+ vpaddq $H2,$T2,$H2 # accumulate input
+ sub \$64,$len
+ jz .Ltail_avx2
+ jmp .Loop_avx2
+
+.align 32
+.Loop_avx2:
+ ################################################################
+ # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
+ # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
+ # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
+ # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
+ # \________/\________/
+ ################################################################
+ #vpaddq $H2,$T2,$H2 # accumulate input
+ vpaddq $H0,$T0,$H0
+ vmovdqa `32*0`(%rsp),$T0 # r0^4
+ vpaddq $H1,$T1,$H1
+ vmovdqa `32*1`(%rsp),$T1 # r1^4
+ vpaddq $H3,$T3,$H3
+ vmovdqa `32*3`(%rsp),$T2 # r2^4
+ vpaddq $H4,$T4,$H4
+ vmovdqa `32*6-0x90`(%rax),$T3 # s3^4
+ vmovdqa `32*8-0x90`(%rax),$S4 # s4^4
+
+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
+ #
+ # however, as h2 is "chronologically" first one available pull
+ # corresponding operations up, so it's
+ #
+ # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
+ # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
+ # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
+ # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
+
+ vpmuludq $H2,$T0,$D2 # d2 = h2*r0
+ vpmuludq $H2,$T1,$D3 # d3 = h2*r1
+ vpmuludq $H2,$T2,$D4 # d4 = h2*r2
+ vpmuludq $H2,$T3,$D0 # d0 = h2*s3
+ vpmuludq $H2,$S4,$D1 # d1 = h2*s4
+
+ vpmuludq $H0,$T1,$T4 # h0*r1
+ vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp
+ vpaddq $T4,$D1,$D1 # d1 += h0*r1
+ vpaddq $H2,$D2,$D2 # d2 += h1*r1
+ vpmuludq $H3,$T1,$T4 # h3*r1
+ vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
+ vpaddq $T4,$D4,$D4 # d4 += h3*r1
+ vpaddq $H2,$D0,$D0 # d0 += h4*s1
+ vmovdqa `32*4-0x90`(%rax),$T1 # s2
+
+ vpmuludq $H0,$T0,$T4 # h0*r0
+ vpmuludq $H1,$T0,$H2 # h1*r0
+ vpaddq $T4,$D0,$D0 # d0 += h0*r0
+ vpaddq $H2,$D1,$D1 # d1 += h1*r0
+ vpmuludq $H3,$T0,$T4 # h3*r0
+ vpmuludq $H4,$T0,$H2 # h4*r0
+ vmovdqu 16*0($inp),%x#$T0 # load input
+ vpaddq $T4,$D3,$D3 # d3 += h3*r0
+ vpaddq $H2,$D4,$D4 # d4 += h4*r0
+ vinserti128 \$1,16*2($inp),$T0,$T0
+
+ vpmuludq $H3,$T1,$T4 # h3*s2
+ vpmuludq $H4,$T1,$H2 # h4*s2
+ vmovdqu 16*1($inp),%x#$T1
+ vpaddq $T4,$D0,$D0 # d0 += h3*s2
+ vpaddq $H2,$D1,$D1 # d1 += h4*s2
+ vmovdqa `32*5-0x90`(%rax),$H2 # r3
+ vpmuludq $H1,$T2,$T4 # h1*r2
+ vpmuludq $H0,$T2,$T2 # h0*r2
+ vpaddq $T4,$D3,$D3 # d3 += h1*r2
+ vpaddq $T2,$D2,$D2 # d2 += h0*r2
+ vinserti128 \$1,16*3($inp),$T1,$T1
+ lea 16*4($inp),$inp
+
+ vpmuludq $H1,$H2,$T4 # h1*r3
+ vpmuludq $H0,$H2,$H2 # h0*r3
+ vpsrldq \$6,$T0,$T2 # splat input
+ vpaddq $T4,$D4,$D4 # d4 += h1*r3
+ vpaddq $H2,$D3,$D3 # d3 += h0*r3
+ vpmuludq $H3,$T3,$T4 # h3*s3
+ vpmuludq $H4,$T3,$H2 # h4*s3
+ vpsrldq \$6,$T1,$T3
+ vpaddq $T4,$D1,$D1 # d1 += h3*s3
+ vpaddq $H2,$D2,$D2 # d2 += h4*s3
+ vpunpckhqdq $T1,$T0,$T4 # 4
+
+ vpmuludq $H3,$S4,$H3 # h3*s4
+ vpmuludq $H4,$S4,$H4 # h4*s4
+ vpunpcklqdq $T1,$T0,$T0 # 0:1
+ vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
+ vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
+ vpunpcklqdq $T3,$T2,$T3 # 2:3
+ vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4
+ vpmuludq $H1,$S4,$H0 # h1*s4
+ vmovdqa 64(%rcx),$MASK # .Lmask26
+ vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
+ vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
+
+ ################################################################
+ # lazy reduction (interleaved with tail of input splat)
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$D1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H4,$D4
+ vpand $MASK,$H4,$H4
+
+ vpsrlq \$4,$T3,$T2
+
+ vpsrlq \$26,$H1,$D1
+ vpand $MASK,$H1,$H1
+ vpaddq $D1,$H2,$H2 # h1 -> h2
+
+ vpaddq $D4,$H0,$H0
+ vpsllq \$2,$D4,$D4
+ vpaddq $D4,$H0,$H0 # h4 -> h0
+
+ vpand $MASK,$T2,$T2 # 2
+ vpsrlq \$26,$T0,$T1
+
+ vpsrlq \$26,$H2,$D2
+ vpand $MASK,$H2,$H2
+ vpaddq $D2,$H3,$H3 # h2 -> h3
+
+ vpaddq $T2,$H2,$H2 # modulo-scheduled
+ vpsrlq \$30,$T3,$T3
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$H1,$H1 # h0 -> h1
+
+ vpsrlq \$40,$T4,$T4 # 4
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ vpand $MASK,$T0,$T0 # 0
+ vpand $MASK,$T1,$T1 # 1
+ vpand $MASK,$T3,$T3 # 3
+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always
+
+ sub \$64,$len
+ jnz .Loop_avx2
+
+ .byte 0x66,0x90
+.Ltail_avx2:
+ ################################################################
+ # while above multiplications were by r^4 in all lanes, in last
+ # iteration we multiply least significant lane by r^4 and most
+ # significant one by r, so copy of above except that references
+ # to the precomputed table are displaced by 4...
+
+ #vpaddq $H2,$T2,$H2 # accumulate input
+ vpaddq $H0,$T0,$H0
+ vmovdqu `32*0+4`(%rsp),$T0 # r0^4
+ vpaddq $H1,$T1,$H1
+ vmovdqu `32*1+4`(%rsp),$T1 # r1^4
+ vpaddq $H3,$T3,$H3
+ vmovdqu `32*3+4`(%rsp),$T2 # r2^4
+ vpaddq $H4,$T4,$H4
+ vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4
+ vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4
+
+ vpmuludq $H2,$T0,$D2 # d2 = h2*r0
+ vpmuludq $H2,$T1,$D3 # d3 = h2*r1
+ vpmuludq $H2,$T2,$D4 # d4 = h2*r2
+ vpmuludq $H2,$T3,$D0 # d0 = h2*s3
+ vpmuludq $H2,$S4,$D1 # d1 = h2*s4
+
+ vpmuludq $H0,$T1,$T4 # h0*r1
+ vpmuludq $H1,$T1,$H2 # h1*r1
+ vpaddq $T4,$D1,$D1 # d1 += h0*r1
+ vpaddq $H2,$D2,$D2 # d2 += h1*r1
+ vpmuludq $H3,$T1,$T4 # h3*r1
+ vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
+ vpaddq $T4,$D4,$D4 # d4 += h3*r1
+ vpaddq $H2,$D0,$D0 # d0 += h4*s1
+
+ vpmuludq $H0,$T0,$T4 # h0*r0
+ vpmuludq $H1,$T0,$H2 # h1*r0
+ vpaddq $T4,$D0,$D0 # d0 += h0*r0
+ vmovdqu `32*4+4-0x90`(%rax),$T1 # s2
+ vpaddq $H2,$D1,$D1 # d1 += h1*r0
+ vpmuludq $H3,$T0,$T4 # h3*r0
+ vpmuludq $H4,$T0,$H2 # h4*r0
+ vpaddq $T4,$D3,$D3 # d3 += h3*r0
+ vpaddq $H2,$D4,$D4 # d4 += h4*r0
+
+ vpmuludq $H3,$T1,$T4 # h3*s2
+ vpmuludq $H4,$T1,$H2 # h4*s2
+ vpaddq $T4,$D0,$D0 # d0 += h3*s2
+ vpaddq $H2,$D1,$D1 # d1 += h4*s2
+ vmovdqu `32*5+4-0x90`(%rax),$H2 # r3
+ vpmuludq $H1,$T2,$T4 # h1*r2
+ vpmuludq $H0,$T2,$T2 # h0*r2
+ vpaddq $T4,$D3,$D3 # d3 += h1*r2
+ vpaddq $T2,$D2,$D2 # d2 += h0*r2
+
+ vpmuludq $H1,$H2,$T4 # h1*r3
+ vpmuludq $H0,$H2,$H2 # h0*r3
+ vpaddq $T4,$D4,$D4 # d4 += h1*r3
+ vpaddq $H2,$D3,$D3 # d3 += h0*r3
+ vpmuludq $H3,$T3,$T4 # h3*s3
+ vpmuludq $H4,$T3,$H2 # h4*s3
+ vpaddq $T4,$D1,$D1 # d1 += h3*s3
+ vpaddq $H2,$D2,$D2 # d2 += h4*s3
+
+ vpmuludq $H3,$S4,$H3 # h3*s4
+ vpmuludq $H4,$S4,$H4 # h4*s4
+ vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
+ vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
+ vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
+ vpmuludq $H1,$S4,$H0 # h1*s4
+ vmovdqa 64(%rcx),$MASK # .Lmask26
+ vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
+ vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
+
+ ################################################################
+ # lazy reduction
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$D1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H4,$D4
+ vpand $MASK,$H4,$H4
+
+ vpsrlq \$26,$H1,$D1
+ vpand $MASK,$H1,$H1
+ vpaddq $D1,$H2,$H2 # h1 -> h2
+
+ vpaddq $D4,$H0,$H0
+ vpsllq \$2,$D4,$D4
+ vpaddq $D4,$H0,$H0 # h4 -> h0
+
+ vpsrlq \$26,$H2,$D2
+ vpand $MASK,$H2,$H2
+ vpaddq $D2,$H3,$H3 # h2 -> h3
+
+ vpsrlq \$26,$H0,$D0
+ vpand $MASK,$H0,$H0
+ vpaddq $D0,$H1,$H1 # h0 -> h1
+
+ vpsrlq \$26,$H3,$D3
+ vpand $MASK,$H3,$H3
+ vpaddq $D3,$H4,$H4 # h3 -> h4
+
+ ################################################################
+ # horizontal addition
+
+ vpsrldq \$8,$H2,$T2
+ vpsrldq \$8,$H0,$T0
+ vpsrldq \$8,$H1,$T1
+ vpsrldq \$8,$H3,$T3
+ vpsrldq \$8,$H4,$T4
+ vpaddq $T2,$H2,$H2
+ vpaddq $T0,$H0,$H0
+ vpaddq $T1,$H1,$H1
+ vpaddq $T3,$H3,$H3
+ vpaddq $T4,$H4,$H4
+
+ vpermq \$0x2,$H2,$T2
+ vpermq \$0x2,$H0,$T0
+ vpermq \$0x2,$H1,$T1
+ vpermq \$0x2,$H3,$T3
+ vpermq \$0x2,$H4,$T4
+ vpaddq $T2,$H2,$H2
+ vpaddq $T0,$H0,$H0
+ vpaddq $T1,$H1,$H1
+ vpaddq $T3,$H3,$H3
+ vpaddq $T4,$H4,$H4
+
+ vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
+ vmovd %x#$H1,`4*1-48-64`($ctx)
+ vmovd %x#$H2,`4*2-48-64`($ctx)
+ vmovd %x#$H3,`4*3-48-64`($ctx)
+ vmovd %x#$H4,`4*4-48-64`($ctx)
+___
+$code.=<<___ if ($win64);
+ vmovdqa 0x50(%r11),%xmm6
+ vmovdqa 0x60(%r11),%xmm7
+ vmovdqa 0x70(%r11),%xmm8
+ vmovdqa 0x80(%r11),%xmm9
+ vmovdqa 0x90(%r11),%xmm10
+ vmovdqa 0xa0(%r11),%xmm11
+ vmovdqa 0xb0(%r11),%xmm12
+ vmovdqa 0xc0(%r11),%xmm13
+ vmovdqa 0xd0(%r11),%xmm14
+ vmovdqa 0xe0(%r11),%xmm15
+ lea 0xf8(%r11),%rsp
+.Ldo_avx2_epilogue:
+___
+$code.=<<___ if (!$win64);
+ lea 8(%r11),%rsp
+___
+$code.=<<___;
+ vzeroupper
+ ret
+.size poly1305_blocks_avx2,.-poly1305_blocks_avx2
+___
+}
+$code.=<<___;
+.align 64
+.Lconst:
+.Lmask24:
+.long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
+.L129:
+.long 1<<24,0,1<<24,0,1<<24,0,1<<24,0
+.Lmask26:
+.long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
+.Lfive:
+.long 5,0,5,0,5,0,5,0
+___
+}
+
+$code.=<<___;
+.asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 16
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<.Lprologue
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=.Lepilogue
+ jae .Lcommon_seh_tail
+
+ lea 48(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R14
+
+ jmp .Lcommon_seh_tail
+.size se_handler,.-se_handler
+
+.type avx_handler,\@abi-omnipotent
+.align 16
+avx_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ mov 208($context),%rax # pull context->R11
+
+ lea 0x50(%rax),%rsi
+ lea 0xf8(%rax),%rax
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$20,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size avx_handler,.-avx_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_poly1305_init
+ .rva .LSEH_end_poly1305_init
+ .rva .LSEH_info_poly1305_init
+
+ .rva .LSEH_begin_poly1305_blocks
+ .rva .LSEH_end_poly1305_blocks
+ .rva .LSEH_info_poly1305_blocks
+
+ .rva .LSEH_begin_poly1305_emit
+ .rva .LSEH_end_poly1305_emit
+ .rva .LSEH_info_poly1305_emit
+___
+$code.=<<___ if ($avx);
+ .rva .LSEH_begin_poly1305_blocks_avx
+ .rva .Lbase2_64_avx
+ .rva .LSEH_info_poly1305_blocks_avx_1
+
+ .rva .Lbase2_64_avx
+ .rva .Leven_avx
+ .rva .LSEH_info_poly1305_blocks_avx_2
+
+ .rva .Leven_avx
+ .rva .LSEH_end_poly1305_blocks_avx
+ .rva .LSEH_info_poly1305_blocks_avx_3
+
+ .rva .LSEH_begin_poly1305_emit_avx
+ .rva .LSEH_end_poly1305_emit_avx
+ .rva .LSEH_info_poly1305_emit_avx
+___
+$code.=<<___ if ($avx>1);
+ .rva .LSEH_begin_poly1305_blocks_avx2
+ .rva .Lbase2_64_avx2
+ .rva .LSEH_info_poly1305_blocks_avx2_1
+
+ .rva .Lbase2_64_avx2
+ .rva .Leven_avx2
+ .rva .LSEH_info_poly1305_blocks_avx2_2
+
+ .rva .Leven_avx2
+ .rva .LSEH_end_poly1305_blocks_avx2
+ .rva .LSEH_info_poly1305_blocks_avx2_3
+___
+$code.=<<___;
+.section .xdata
+.align 8
+.LSEH_info_poly1305_init:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
+
+.LSEH_info_poly1305_blocks:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lblocks_body,.Lblocks_epilogue
+
+.LSEH_info_poly1305_emit:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
+___
+$code.=<<___ if ($avx);
+.LSEH_info_poly1305_blocks_avx_1:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[]
+
+.LSEH_info_poly1305_blocks_avx_2:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[]
+
+.LSEH_info_poly1305_blocks_avx_3:
+ .byte 9,0,0,0
+ .rva avx_handler
+ .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[]
+
+.LSEH_info_poly1305_emit_avx:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_poly1305_blocks_avx2_1:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[]
+
+.LSEH_info_poly1305_blocks_avx2_2:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[]
+
+.LSEH_info_poly1305_blocks_avx2_3:
+ .byte 9,0,0,0
+ .rva avx_handler
+ .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[]
+___
+}
+
+foreach (split('\n',$code)) {
+ s/\`([^\`]*)\`/eval($1)/ge;
+ s/%r([a-z]+)#d/%e$1/g;
+ s/%r([0-9]+)#d/%r$1d/g;
+ s/%x#%y/%x/g;
+
+ print $_,"\n";
+}
+close STDOUT;