Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/boringssl.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorAdam Langley <agl@chromium.org>2014-06-20 23:00:00 +0400
committerAdam Langley <agl@chromium.org>2014-06-21 00:17:42 +0400
commitcb5dd63e5e5eecaf8ce12be2d92cecfbff298b4b (patch)
tree899ceae061aec2105aa561a3368f61a6b8cae192 /crypto
parent87750b433a5d2d252ee5bd879cae553c046fd0e7 (diff)
Add support for Intel SHA extension.
(Imported from upstream's 70fddbe32a7b3400a6ad0a9265f2c0ed72988d27)
Diffstat (limited to 'crypto')
-rw-r--r--crypto/sha/asm/sha1-586.pl120
-rw-r--r--crypto/sha/asm/sha1-x86_64.pl200
-rw-r--r--crypto/sha/asm/sha256-586.pl151
-rw-r--r--crypto/sha/asm/sha512-x86_64.pl240
4 files changed, 702 insertions, 9 deletions
diff --git a/crypto/sha/asm/sha1-586.pl b/crypto/sha/asm/sha1-586.pl
index 17b84c8b..acc4f639 100644
--- a/crypto/sha/asm/sha1-586.pl
+++ b/crypto/sha/asm/sha1-586.pl
@@ -79,6 +79,10 @@
# strongly, it's probably more appropriate to discuss possibility of
# using vector rotate XOP on AMD...
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
+
######################################################################
# Current performance is summarized in following table. Numbers are
# CPU clock cycles spent to process single byte (less is better).
@@ -303,6 +307,7 @@ if ($alt) {
&function_begin("sha1_block_data_order");
if ($xmm) {
+ &static_label("shaext_shortcut");
&static_label("ssse3_shortcut");
&static_label("avx_shortcut") if ($ymm);
&static_label("K_XX_XX");
@@ -317,8 +322,11 @@ if ($xmm) {
&mov ($D,&DWP(4,$T));
&test ($D,1<<9); # check SSSE3 bit
&jz (&label("x86"));
+ &mov ($C,&DWP(8,$T));
&test ($A,1<<24); # check FXSR bit
&jz (&label("x86"));
+ &test ($C,1<<29); # check SHA bit
+ &jnz (&label("shaext_shortcut"));
if ($ymm) {
&and ($D,1<<28); # mask AVX bit
&and ($A,1<<30); # mask "Intel CPU" bit
@@ -397,6 +405,117 @@ if ($xmm) {
&function_end("sha1_block_data_order");
if ($xmm) {
+{
+######################################################################
+# Intel SHA Extensions implementation of SHA1 update function.
+#
+my ($ctx,$inp,$num)=("edi","esi","ecx");
+my ($ABCD,$E,$E_,$BSWAP)=map("xmm$_",(0..3));
+my @MSG=map("xmm$_",(4..7));
+
+sub sha1rnds4 {
+ my ($dst,$src,$imm)=@_;
+ if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+ { &data_byte(0x0f,0x3a,0xcc,0xc0|($1<<3)|$2,$imm); }
+}
+sub sha1op38 {
+ my ($opcodelet,$dst,$src)=@_;
+ if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+ { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
+}
+sub sha1nexte { sha1op38(0xc8,@_); }
+sub sha1msg1 { sha1op38(0xc9,@_); }
+sub sha1msg2 { sha1op38(0xca,@_); }
+
+&function_begin("_sha1_block_data_order_shaext");
+ &call (&label("pic_point")); # make it PIC!
+ &set_label("pic_point");
+ &blindpop($tmp1);
+ &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
+&set_label("shaext_shortcut");
+ &mov ($ctx,&wparam(0));
+ &mov ("ebx","esp");
+ &mov ($inp,&wparam(1));
+ &mov ($num,&wparam(2));
+ &sub ("esp",32);
+
+ &movdqu ($ABCD,&QWP(0,$ctx));
+ &movd ($E,&QWP(16,$ctx));
+ &and ("esp",-32);
+ &movdqa ($BSWAP,&QWP(0x50,$tmp1)); # byte-n-word swap
+
+ &movdqu (@MSG[0],&QWP(0,$inp));
+ &pshufd ($ABCD,$ABCD,0b00011011); # flip word order
+ &movdqu (@MSG[1],&QWP(0x10,$inp));
+ &pshufd ($E,$E,0b00011011); # flip word order
+ &movdqu (@MSG[2],&QWP(0x20,$inp));
+ &pshufb (@MSG[0],$BSWAP);
+ &movdqu (@MSG[3],&QWP(0x30,$inp));
+ &pshufb (@MSG[1],$BSWAP);
+ &pshufb (@MSG[2],$BSWAP);
+ &pshufb (@MSG[3],$BSWAP);
+ &jmp (&label("loop_shaext"));
+
+&set_label("loop_shaext",16);
+ &dec ($num);
+ &lea ("eax",&DWP(0x40,$inp));
+ &movdqa (&QWP(0,"esp"),$E); # offload $E
+ &paddd ($E,@MSG[0]);
+ &cmovne ($inp,"eax");
+ &movdqa (&QWP(16,"esp"),$ABCD); # offload $ABCD
+
+for($i=0;$i<20-4;$i+=2) {
+ &sha1msg1 (@MSG[0],@MSG[1]);
+ &movdqa ($E_,$ABCD);
+ &sha1rnds4 ($ABCD,$E,int($i/5)); # 0-3...
+ &sha1nexte ($E_,@MSG[1]);
+ &pxor (@MSG[0],@MSG[2]);
+ &sha1msg1 (@MSG[1],@MSG[2]);
+ &sha1msg2 (@MSG[0],@MSG[3]);
+
+ &movdqa ($E,$ABCD);
+ &sha1rnds4 ($ABCD,$E_,int(($i+1)/5));
+ &sha1nexte ($E,@MSG[2]);
+ &pxor (@MSG[1],@MSG[3]);
+ &sha1msg2 (@MSG[1],@MSG[0]);
+
+ push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG));
+}
+ &movdqu (@MSG[0],&QWP(0,$inp));
+ &movdqa ($E_,$ABCD);
+ &sha1rnds4 ($ABCD,$E,3); # 64-67
+ &sha1nexte ($E_,@MSG[1]);
+ &movdqu (@MSG[1],&QWP(0x10,$inp));
+ &pshufb (@MSG[0],$BSWAP);
+
+ &movdqa ($E,$ABCD);
+ &sha1rnds4 ($ABCD,$E_,3); # 68-71
+ &sha1nexte ($E,@MSG[2]);
+ &movdqu (@MSG[2],&QWP(0x20,$inp));
+ &pshufb (@MSG[1],$BSWAP);
+
+ &movdqa ($E_,$ABCD);
+ &sha1rnds4 ($ABCD,$E,3); # 72-75
+ &sha1nexte ($E_,@MSG[3]);
+ &movdqu (@MSG[3],&QWP(0x30,$inp));
+ &pshufb (@MSG[2],$BSWAP);
+
+ &movdqa ($E,$ABCD);
+ &sha1rnds4 ($ABCD,$E_,3); # 76-79
+ &movdqa ($E_,&QWP(0,"esp"));
+ &pshufb (@MSG[3],$BSWAP);
+ &sha1nexte ($E,$E_);
+ &paddd ($ABCD,&QWP(16,"esp"));
+
+ &jnz (&label("loop_shaext"));
+
+ &pshufd ($ABCD,$ABCD,0b00011011);
+ &pshufd ($E,$E,0b00011011);
+ &movdqu (&QWP(0,$ctx),$ABCD)
+ &movd (&DWP(16,$ctx),$E);
+ &mov ("esp","ebx");
+&function_end("_sha1_block_data_order_shaext");
+}
######################################################################
# The SSSE3 implementation.
#
@@ -1340,6 +1459,7 @@ sub Xtail_avx()
&data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59
&data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79
&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask
+&data_byte(0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0);
}
&asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
diff --git a/crypto/sha/asm/sha1-x86_64.pl b/crypto/sha/asm/sha1-x86_64.pl
index 784249f9..ea288c15 100644
--- a/crypto/sha/asm/sha1-x86_64.pl
+++ b/crypto/sha/asm/sha1-x86_64.pl
@@ -57,6 +57,10 @@
# hint regarding the number of Xupdate iterations to pre-compute in
# advance was provided by Ilya Albrekht of Intel Corp.
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
+
######################################################################
# Current performance is summarized in following table. Numbers are
# CPU clock cycles spent to process single byte (less is better).
@@ -71,7 +75,7 @@
# Haswell 5.45 4.15/+31% 3.57/+53%
# Bulldozer 9.11 5.95/+53%
# VIA Nano 9.32 7.15/+30%
-# Atom [10.5?] [9.23?]/+14%
+# Atom 10.3 9.17/+12%
# Silvermont 13.1(*) 9.37/+40%
#
# (*) obviously suboptimal result, nothing was done about it,
@@ -241,6 +245,9 @@ sha1_block_data_order:
mov OPENSSL_ia32cap_P+8(%rip),%r10d
test \$`1<<9`,%r8d # check SSSE3 bit
jz .Lialu
+
+ test \$`1<<29`,%r10d # check SHA bit
+ jnz _shaext_shortcut
___
$code.=<<___ if ($avx>1);
and \$`1<<3|1<<5|1<<8`,%r10d # check AVX2+BMI1+BMI2
@@ -315,6 +322,120 @@ $code.=<<___;
.size sha1_block_data_order,.-sha1_block_data_order
___
{{{
+######################################################################
+# Intel SHA Extensions implementation of SHA1 update function.
+#
+my ($ctx,$inp,$num)=("%rdi","%rsi","%rdx");
+my ($ABCD,$E,$E_,$BSWAP,$ABCD_SAVE,$E_SAVE)=map("%xmm$_",(0..3,8,9));
+my @MSG=map("%xmm$_",(4..7));
+
+$code.=<<___;
+.type sha1_block_data_order_shaext,\@function,3
+.align 32
+sha1_block_data_order_shaext:
+_shaext_shortcut:
+___
+$code.=<<___ if ($win64);
+ lea `-8-4*16`(%rsp),%rsp
+ movaps %xmm6,-8-4*16(%rax)
+ movaps %xmm7,-8-3*16(%rax)
+ movaps %xmm8,-8-2*16(%rax)
+ movaps %xmm9,-8-1*16(%rax)
+.Lprologue_shaext:
+___
+$code.=<<___;
+ movdqu ($ctx),$ABCD
+ movd 16($ctx),$E
+ movdqa K_XX_XX+0xa0(%rip),$BSWAP # byte-n-word swap
+
+ movdqu ($inp),@MSG[0]
+ pshufd \$0b00011011,$ABCD,$ABCD # flip word order
+ movdqu 0x10($inp),@MSG[1]
+ pshufd \$0b00011011,$E,$E # flip word order
+ movdqu 0x20($inp),@MSG[2]
+ pshufb $BSWAP,@MSG[0]
+ movdqu 0x30($inp),@MSG[3]
+ pshufb $BSWAP,@MSG[1]
+ pshufb $BSWAP,@MSG[2]
+ movdqa $E,$E_SAVE # offload $E
+ pshufb $BSWAP,@MSG[3]
+ jmp .Loop_shaext
+
+.align 16
+.Loop_shaext:
+ dec $num
+ lea 0x40($inp),%rax # next input block
+ paddd @MSG[0],$E
+ cmovne %rax,$inp
+ movdqa $ABCD,$ABCD_SAVE # offload $ABCD
+___
+for($i=0;$i<20-4;$i+=2) {
+$code.=<<___;
+ sha1msg1 @MSG[1],@MSG[0]
+ movdqa $ABCD,$E_
+ sha1rnds4 \$`int($i/5)`,$E,$ABCD # 0-3...
+ sha1nexte @MSG[1],$E_
+ pxor @MSG[2],@MSG[0]
+ sha1msg1 @MSG[2],@MSG[1]
+ sha1msg2 @MSG[3],@MSG[0]
+
+ movdqa $ABCD,$E
+ sha1rnds4 \$`int(($i+1)/5)`,$E_,$ABCD
+ sha1nexte @MSG[2],$E
+ pxor @MSG[3],@MSG[1]
+ sha1msg2 @MSG[0],@MSG[1]
+___
+ push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+ movdqu ($inp),@MSG[0]
+ movdqa $ABCD,$E_
+ sha1rnds4 \$3,$E,$ABCD # 64-67
+ sha1nexte @MSG[1],$E_
+ movdqu 0x10($inp),@MSG[1]
+ pshufb $BSWAP,@MSG[0]
+
+ movdqa $ABCD,$E
+ sha1rnds4 \$3,$E_,$ABCD # 68-71
+ sha1nexte @MSG[2],$E
+ movdqu 0x20($inp),@MSG[2]
+ pshufb $BSWAP,@MSG[1]
+
+ movdqa $ABCD,$E_
+ sha1rnds4 \$3,$E,$ABCD # 72-75
+ sha1nexte @MSG[3],$E_
+ movdqu 0x30($inp),@MSG[3]
+ pshufb $BSWAP,@MSG[2]
+
+ movdqa $ABCD,$E
+ sha1rnds4 \$3,$E_,$ABCD # 76-79
+ sha1nexte $E_SAVE,$E
+ pshufb $BSWAP,@MSG[3]
+
+ paddd $ABCD_SAVE,$ABCD
+ movdqa $E,$E_SAVE # offload $E
+
+ jnz .Loop_shaext
+
+ pshufd \$0b00011011,$ABCD,$ABCD
+ pshufd \$0b00011011,$E,$E
+ movdqu $ABCD,($ctx)
+ movd $E,16($ctx)
+___
+$code.=<<___ if ($win64);
+ movaps -8-4*16(%rax),%xmm6
+ movaps -8-3*16(%rax),%xmm7
+ movaps -8-2*16(%rax),%xmm8
+ movaps -8-1*16(%rax),%xmm9
+ mov %rax,%rsp
+.Lepilogue_shaext:
+___
+$code.=<<___;
+ ret
+.size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext
+___
+}}}
+{{{
my $Xi=4;
my @X=map("%xmm$_",(4..7,0..3));
my @Tx=map("%xmm$_",(8..10));
@@ -1646,6 +1767,7 @@ K_XX_XX:
.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap mask
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap mask
+.byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
___
}}}
$code.=<<___;
@@ -1706,6 +1828,39 @@ se_handler:
jmp .Lcommon_seh_tail
.size se_handler,.-se_handler
+.type shaext_handler,\@abi-omnipotent
+.align 16
+shaext_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lprologue_shaext(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lprologue
+ jb .Lcommon_seh_tail
+
+ lea .Lepilogue_shaext(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=.Lepilogue
+ jae .Lcommon_seh_tail
+
+ lea -8-4*16(%rax),%rsi
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$8,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
+ jmp .Lcommon_seh_tail
+.size shaext_handler,.-shaext_handler
+
.type ssse3_handler,\@abi-omnipotent
.align 16
ssse3_handler:
@@ -1801,6 +1956,9 @@ ssse3_handler:
.rva .LSEH_begin_sha1_block_data_order
.rva .LSEH_end_sha1_block_data_order
.rva .LSEH_info_sha1_block_data_order
+ .rva .LSEH_begin_sha1_block_data_order_shaext
+ .rva .LSEH_end_sha1_block_data_order_shaext
+ .rva .LSEH_info_sha1_block_data_order_shaext
.rva .LSEH_begin_sha1_block_data_order_ssse3
.rva .LSEH_end_sha1_block_data_order_ssse3
.rva .LSEH_info_sha1_block_data_order_ssse3
@@ -1821,6 +1979,9 @@ $code.=<<___;
.LSEH_info_sha1_block_data_order:
.byte 9,0,0,0
.rva se_handler
+.LSEH_info_sha1_block_data_order_shaext:
+ .byte 9,0,0,0
+ .rva shaext_handler
.LSEH_info_sha1_block_data_order_ssse3:
.byte 9,0,0,0
.rva ssse3_handler
@@ -1842,8 +2003,41 @@ ___
####################################################################
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
+sub sha1rnds4 {
+ if (@_[0] =~ /\$([x0-9a-f]+),\s*%xmm([0-7]),\s*%xmm([0-7])/) {
+ my @opcode=(0x0f,0x3a,0xcc);
+ push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
+ my $c=$1;
+ push @opcode,$c=~/^0/?oct($c):$c;
+ return ".byte\t".join(',',@opcode);
+ } else {
+ return "sha1rnds4\t".@_[0];
+ }
+}
-print $code;
+sub sha1op38 {
+ my $instr = shift;
+ my %opcodelet = (
+ "sha1nexte" => 0xc8,
+ "sha1msg1" => 0xc9,
+ "sha1msg2" => 0xca );
+
+ if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-7]),\s*%xmm([0-7])/) {
+ my @opcode=(0x0f,0x38);
+ push @opcode,$opcodelet{$instr};
+ push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
+ return ".byte\t".join(',',@opcode);
+ } else {
+ return $instr."\t".@_[0];
+ }
+}
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\b(sha1rnds4)\s+(.*)/sha1rnds4($2)/geo or
+ s/\b(sha1[^\s]*)\s+(.*)/sha1op38($1,$2)/geo;
+
+ print $_,"\n";
+}
close STDOUT;
diff --git a/crypto/sha/asm/sha256-586.pl b/crypto/sha/asm/sha256-586.pl
index a3117f03..09648a82 100644
--- a/crypto/sha/asm/sha256-586.pl
+++ b/crypto/sha/asm/sha256-586.pl
@@ -34,6 +34,10 @@
# (Biggest improvement coefficient is on upcoming Atom Silvermont,
# not shown.) Add AVX+BMI code path.
#
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
+#
# Performance in clock cycles per processed byte (less is better):
#
# gcc icc x86 asm(*) SIMD x86_64 asm(**)
@@ -196,8 +200,13 @@ sub BODY_00_15() {
&mov ("ebx",&DWP(4,"edx"));
&test ("ecx",1<<20); # check for P4
&jnz (&label("loop"));
+ &mov ("edx",&DWP(8,"edx")) if ($xmm);
+ &test ("ecx",1<<24); # check for FXSR
+ &jz ($unroll_after?&label("no_xmm"):&label("loop"));
&and ("ecx",1<<30); # mask "Intel CPU" bit
&and ("ebx",1<<28|1<<9); # mask AVX and SSSE3 bits
+ &test ("edx",1<<29) if ($xmm); # check for SHA
+ &jnz (&label("shaext")) if ($xmm);
&or ("ecx","ebx");
&and ("ecx",1<<28|1<<30);
&cmp ("ecx",1<<28|1<<30);
@@ -209,6 +218,7 @@ sub BODY_00_15() {
&je (&label("loop_shrd"));
}
if ($unroll_after) {
+&set_label("no_xmm");
&sub ("eax","edi");
&cmp ("eax",$unroll_after);
&jae (&label("unrolled"));
@@ -495,6 +505,146 @@ my @AH=($A,$K256);
&function_end_A();
}
if (!$i386 && $xmm) {{{
+{
+######################################################################
+# Intel SHA Extensions implementation of SHA256 update function.
+#
+my ($ctx,$inp,$end)=("esi","edi","eax");
+my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
+my @MSG=map("xmm$_",(3..6));
+
+sub sha256op38 {
+ my ($opcodelet,$dst,$src)=@_;
+ if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+ { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
+}
+sub sha256rnds2 { sha256op38(0xcb,@_); }
+sub sha256msg1 { sha256op38(0xcc,@_); }
+sub sha256msg2 { sha256op38(0xcd,@_); }
+
+&set_label("shaext",32);
+ &sub ("esp",32);
+
+ &movdqu ($ABEF,&QWP(0,$ctx)); # DCBA
+ &lea ($K256,&DWP(0x80,$K256));
+ &movdqu ($CDGH,&QWP(16,$ctx)); # HGFE
+ &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
+
+ &pshufd ($Wi,$ABEF,0x1b); # ABCD
+ &pshufd ($ABEF,$ABEF,0xb1); # CDAB
+ &pshufd ($CDGH,$CDGH,0x1b); # EFGH
+ &palignr ($ABEF,$CDGH,8); # ABEF
+ &punpcklqdq ($CDGH,$Wi); # CDGH
+ &jmp (&label("loop_shaext"));
+
+&set_label("loop_shaext",16);
+ &movdqu (@MSG[0],&QWP(0,$inp));
+ &movdqu (@MSG[1],&QWP(0x10,$inp));
+ &movdqu (@MSG[2],&QWP(0x20,$inp));
+ &pshufb (@MSG[0],$TMP);
+ &movdqu (@MSG[3],&QWP(0x30,$inp));
+ &movdqa (&QWP(16,"esp"),$CDGH); # offload
+
+ &movdqa ($Wi,&QWP(0*16-0x80,$K256));
+ &paddd ($Wi,@MSG[0]);
+ &pshufb (@MSG[1],$TMP);
+ &sha256rnds2 ($CDGH,$ABEF); # 0-3
+ &pshufd ($Wi,$Wi,0x0e);
+ &nop ();
+ &movdqa (&QWP(0,"esp"),$ABEF); # offload
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ &movdqa ($Wi,&QWP(1*16-0x80,$K256));
+ &paddd ($Wi,@MSG[1]);
+ &pshufb (@MSG[2],$TMP);
+ &sha256rnds2 ($CDGH,$ABEF); # 4-7
+ &pshufd ($Wi,$Wi,0x0e);
+ &lea ($inp,&DWP(0x40,$inp));
+ &sha256msg1 (@MSG[0],@MSG[1]);
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ &movdqa ($Wi,&QWP(2*16-0x80,$K256));
+ &paddd ($Wi,@MSG[2]);
+ &pshufb (@MSG[3],$TMP);
+ &sha256rnds2 ($CDGH,$ABEF); # 8-11
+ &pshufd ($Wi,$Wi,0x0e);
+ &movdqa ($TMP,@MSG[3]);
+ &palignr ($TMP,@MSG[2],4);
+ &nop ();
+ &paddd (@MSG[0],$TMP);
+ &sha256msg1 (@MSG[1],@MSG[2]);
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ &movdqa ($Wi,&QWP(3*16-0x80,$K256));
+ &paddd ($Wi,@MSG[3]);
+ &sha256msg2 (@MSG[0],@MSG[3]);
+ &sha256rnds2 ($CDGH,$ABEF); # 12-15
+ &pshufd ($Wi,$Wi,0x0e);
+ &movdqa ($TMP,@MSG[0]);
+ &palignr ($TMP,@MSG[3],4);
+ &nop ();
+ &paddd (@MSG[1],$TMP);
+ &sha256msg1 (@MSG[2],@MSG[3]);
+ &sha256rnds2 ($ABEF,$CDGH);
+
+for($i=4;$i<16-3;$i++) {
+ &movdqa ($Wi,&QWP($i*16-0x80,$K256));
+ &paddd ($Wi,@MSG[0]);
+ &sha256msg2 (@MSG[1],@MSG[0]);
+ &sha256rnds2 ($CDGH,$ABEF); # 16-19...
+ &pshufd ($Wi,$Wi,0x0e);
+ &movdqa ($TMP,@MSG[1]);
+ &palignr ($TMP,@MSG[0],4);
+ &nop ();
+ &paddd (@MSG[2],$TMP);
+ &sha256msg1 (@MSG[3],@MSG[0]);
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ push(@MSG,shift(@MSG));
+}
+ &movdqa ($Wi,&QWP(13*16-0x80,$K256));
+ &paddd ($Wi,@MSG[0]);
+ &sha256msg2 (@MSG[1],@MSG[0]);
+ &sha256rnds2 ($CDGH,$ABEF); # 52-55
+ &pshufd ($Wi,$Wi,0x0e);
+ &movdqa ($TMP,@MSG[1])
+ &palignr ($TMP,@MSG[0],4);
+ &sha256rnds2 ($ABEF,$CDGH);
+ &paddd (@MSG[2],$TMP);
+
+ &movdqa ($Wi,&QWP(14*16-0x80,$K256));
+ &paddd ($Wi,@MSG[1]);
+ &sha256rnds2 ($CDGH,$ABEF); # 56-59
+ &pshufd ($Wi,$Wi,0x0e);
+ &sha256msg2 (@MSG[2],@MSG[1]);
+ &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ &movdqa ($Wi,&QWP(15*16-0x80,$K256));
+ &paddd ($Wi,@MSG[2]);
+ &nop ();
+ &sha256rnds2 ($CDGH,$ABEF); # 60-63
+ &pshufd ($Wi,$Wi,0x0e);
+ &cmp ($end,$inp);
+ &nop ();
+ &sha256rnds2 ($ABEF,$CDGH);
+
+ &paddd ($CDGH,&QWP(16,"esp"));
+ &paddd ($ABEF,&QWP(0,"esp"));
+ &jnz (&label("loop_shaext"));
+
+ &pshufd ($CDGH,$CDGH,0xb1); # DCHG
+ &pshufd ($TMP,$ABEF,0x1b); # FEBA
+ &pshufd ($ABEF,$ABEF,0xb1); # BAFE
+ &punpckhqdq ($ABEF,$CDGH); # DCBA
+ &palignr ($CDGH,$TMP,8); # HGFE
+
+ &mov ("esp",&DWP(32+12,"esp"));
+ &movdqu (&QWP(0,$ctx),$ABEF);
+ &movdqu (&QWP(16,$ctx),$CDGH);
+&function_end_A();
+}
+
my @X = map("xmm$_",(0..3));
my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
my @AH = ($A,$T);
@@ -811,7 +961,6 @@ sub body_00_15 () {
if ($avx) {
&set_label("AVX",32);
if ($avx>1) {
- &mov ("edx",&DWP(8,"edx"));
&and ("edx",1<<8|1<<3); # check for BMI2+BMI1
&cmp ("edx",1<<8|1<<3);
&je (&label("AVX_BMI"));
diff --git a/crypto/sha/asm/sha512-x86_64.pl b/crypto/sha/asm/sha512-x86_64.pl
index bde067be..e2253f1a 100644
--- a/crypto/sha/asm/sha512-x86_64.pl
+++ b/crypto/sha/asm/sha512-x86_64.pl
@@ -67,7 +67,12 @@
# significant 128-bit halves and data from second to most significant.
# The data is then processed with same SIMD instruction sequence as
# for AVX, but with %ymm as operands. Side effect is increased stack
-# frame, 448 additional bytes in SHA256 and 1152 in SHA512.
+# frame, 448 additional bytes in SHA256 and 1152 in SHA512, and 1.2KB
+# code size increase.
+#
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
######################################################################
# Current performance in cycles per processed byte (less is better):
@@ -254,6 +259,10 @@ $code.=<<___ if ($SZ==4 || $avx);
mov 4(%r11),%r10d
mov 8(%r11),%r11d
___
+$code.=<<___ if ($SZ==4);
+ test \$`1<<29`,%r11d # check for SHA
+ jnz _shaext_shortcut
+___
$code.=<<___ if ($avx && $SZ==8);
test \$`1<<11`,%r10d # check for XOP
jnz .Lxop_shortcut
@@ -509,6 +518,166 @@ ___
######################################################################
# SIMD code paths
#
+if ($SZ==4) {{{
+######################################################################
+# Intel SHA Extensions implementation of SHA256 update function.
+#
+my ($ctx,$inp,$num,$Tbl)=("%rdi","%rsi","%rdx","%rcx");
+
+my ($Wi,$ABEF,$CDGH,$TMP,$BSWAP,$ABEF_SAVE,$CDGH_SAVE)=map("%xmm$_",(0..2,7..10));
+my @MSG=map("%xmm$_",(3..6));
+
+$code.=<<___;
+.type sha256_block_data_order_shaext,\@function,3
+.align 64
+sha256_block_data_order_shaext:
+_shaext_shortcut:
+___
+$code.=<<___ if ($win64);
+ lea `-8-5*16`(%rsp),%rsp
+ movaps %xmm6,-8-5*16(%rax)
+ movaps %xmm7,-8-4*16(%rax)
+ movaps %xmm8,-8-3*16(%rax)
+ movaps %xmm9,-8-2*16(%rax)
+ movaps %xmm10,-8-1*16(%rax)
+.Lprologue_shaext:
+___
+$code.=<<___;
+ lea K256+0x80(%rip),$Tbl
+ movdqu ($ctx),$ABEF # DCBA
+ movdqu 16($ctx),$CDGH # HGFE
+ movdqa 0x200-0x80($Tbl),$TMP # byte swap mask
+
+ pshufd \$0x1b,$ABEF,$Wi # ABCD
+ pshufd \$0xb1,$ABEF,$ABEF # CDAB
+ pshufd \$0x1b,$CDGH,$CDGH # EFGH
+ movdqa $TMP,$BSWAP # offload
+ palignr \$8,$CDGH,$ABEF # ABEF
+ punpcklqdq $Wi,$CDGH # CDGH
+ jmp .Loop_shaext
+
+.align 16
+.Loop_shaext:
+ movdqu ($inp),@MSG[0]
+ movdqu 0x10($inp),@MSG[1]
+ movdqu 0x20($inp),@MSG[2]
+ pshufb $TMP,@MSG[0]
+ movdqu 0x30($inp),@MSG[3]
+
+ movdqa 0*32-0x80($Tbl),$Wi
+ paddd @MSG[0],$Wi
+ pshufb $TMP,@MSG[1]
+ movdqa $CDGH,$CDGH_SAVE # offload
+ sha256rnds2 $ABEF,$CDGH # 0-3
+ pshufd \$0x0e,$Wi,$Wi
+ nop
+ movdqa $ABEF,$ABEF_SAVE # offload
+ sha256rnds2 $CDGH,$ABEF
+
+ movdqa 1*32-0x80($Tbl),$Wi
+ paddd @MSG[1],$Wi
+ pshufb $TMP,@MSG[2]
+ sha256rnds2 $ABEF,$CDGH # 4-7
+ pshufd \$0x0e,$Wi,$Wi
+ lea 0x40($inp),$inp
+ sha256msg1 @MSG[1],@MSG[0]
+ sha256rnds2 $CDGH,$ABEF
+
+ movdqa 2*32-0x80($Tbl),$Wi
+ paddd @MSG[2],$Wi
+ pshufb $TMP,@MSG[3]
+ sha256rnds2 $ABEF,$CDGH # 8-11
+ pshufd \$0x0e,$Wi,$Wi
+ movdqa @MSG[3],$TMP
+ palignr \$4,@MSG[2],$TMP
+ nop
+ paddd $TMP,@MSG[0]
+ sha256msg1 @MSG[2],@MSG[1]
+ sha256rnds2 $CDGH,$ABEF
+
+ movdqa 3*32-0x80($Tbl),$Wi
+ paddd @MSG[3],$Wi
+ sha256msg2 @MSG[3],@MSG[0]
+ sha256rnds2 $ABEF,$CDGH # 12-15
+ pshufd \$0x0e,$Wi,$Wi
+ movdqa @MSG[0],$TMP
+ palignr \$4,@MSG[3],$TMP
+ nop
+ paddd $TMP,@MSG[1]
+ sha256msg1 @MSG[3],@MSG[2]
+ sha256rnds2 $CDGH,$ABEF
+___
+for($i=4;$i<16-3;$i++) {
+$code.=<<___;
+ movdqa $i*32-0x80($Tbl),$Wi
+ paddd @MSG[0],$Wi
+ sha256msg2 @MSG[0],@MSG[1]
+ sha256rnds2 $ABEF,$CDGH # 16-19...
+ pshufd \$0x0e,$Wi,$Wi
+ movdqa @MSG[1],$TMP
+ palignr \$4,@MSG[0],$TMP
+ nop
+ paddd $TMP,@MSG[2]
+ sha256msg1 @MSG[0],@MSG[3]
+ sha256rnds2 $CDGH,$ABEF
+___
+ push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+ movdqa 13*32-0x80($Tbl),$Wi
+ paddd @MSG[0],$Wi
+ sha256msg2 @MSG[0],@MSG[1]
+ sha256rnds2 $ABEF,$CDGH # 52-55
+ pshufd \$0x0e,$Wi,$Wi
+ movdqa @MSG[1],$TMP
+ palignr \$4,@MSG[0],$TMP
+ sha256rnds2 $CDGH,$ABEF
+ paddd $TMP,@MSG[2]
+
+ movdqa 14*32-0x80($Tbl),$Wi
+ paddd @MSG[1],$Wi
+ sha256rnds2 $ABEF,$CDGH # 56-59
+ pshufd \$0x0e,$Wi,$Wi
+ sha256msg2 @MSG[1],@MSG[2]
+ movdqa $BSWAP,$TMP
+ sha256rnds2 $CDGH,$ABEF
+
+ movdqa 15*32-0x80($Tbl),$Wi
+ paddd @MSG[2],$Wi
+ nop
+ sha256rnds2 $ABEF,$CDGH # 60-63
+ pshufd \$0x0e,$Wi,$Wi
+ dec $num
+ nop
+ sha256rnds2 $CDGH,$ABEF
+
+ paddd $CDGH_SAVE,$CDGH
+ paddd $ABEF_SAVE,$ABEF
+ jnz .Loop_shaext
+
+ pshufd \$0xb1,$CDGH,$CDGH # DCHG
+ pshufd \$0x1b,$ABEF,$TMP # FEBA
+ pshufd \$0xb1,$ABEF,$ABEF # BAFE
+ punpckhqdq $CDGH,$ABEF # DCBA
+ palignr \$8,$TMP,$CDGH # HGFE
+
+ movdqu $ABEF,($ctx)
+ movdqu $CDGH,16($ctx)
+___
+$code.=<<___ if ($win64);
+ movaps -8-5*16(%rax),%xmm6
+ movaps -8-4*16(%rax),%xmm7
+ movaps -8-3*16(%rax),%xmm8
+ movaps -8-2*16(%rax),%xmm9
+ movaps -8-1*16(%rax),%xmm10
+ mov %rax,%rsp
+.Lepilogue_shaext:
+___
+$code.=<<___;
+ ret
+.size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext
+___
+}}}
{{{
my $a4=$T1;
@@ -620,13 +789,13 @@ $code.=<<___;
movdqu 0x00($inp),@X[0]
movdqu 0x10($inp),@X[1]
movdqu 0x20($inp),@X[2]
- movdqu 0x30($inp),@X[3]
pshufb $t3,@X[0]
+ movdqu 0x30($inp),@X[3]
lea $TABLE(%rip),$Tbl
pshufb $t3,@X[1]
movdqa 0x00($Tbl),$t0
- pshufb $t3,@X[2]
movdqa 0x20($Tbl),$t1
+ pshufb $t3,@X[2]
paddd @X[0],$t0
movdqa 0x40($Tbl),$t2
pshufb $t3,@X[3]
@@ -2087,6 +2256,39 @@ $code.=<<___;
ret
.size se_handler,.-se_handler
+.type shaext_handler,\@abi-omnipotent
+.align 16
+shaext_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lprologue_shaext(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lprologue
+ jb .Lin_prologue
+
+ lea .Lepilogue_shaext(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=.Lepilogue
+ jae .Lin_prologue
+
+ lea -8-5*16(%rax),%rsi
+ lea 512($context),%rdi # &context.Xmm6
+ mov \$10,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
+ jmp .Lin_prologue
+.size shaext_handler,.-shaext_handler
+
.section .pdata
.align 4
.rva .LSEH_begin_$func
@@ -2094,6 +2296,9 @@ $code.=<<___;
.rva .LSEH_info_$func
___
$code.=<<___ if ($SZ==4);
+ .rva .LSEH_begin_${func}_shaext
+ .rva .LSEH_end_${func}_shaext
+ .rva .LSEH_info_${func}_shaext
.rva .LSEH_begin_${func}_ssse3
.rva .LSEH_end_${func}_ssse3
.rva .LSEH_info_${func}_ssse3
@@ -2122,6 +2327,9 @@ $code.=<<___;
.rva .Lprologue,.Lepilogue # HandlerData[]
___
$code.=<<___ if ($SZ==4);
+.LSEH_info_${func}_shaext:
+ .byte 9,0,0,0
+ .rva shaext_handler
.LSEH_info_${func}_ssse3:
.byte 9,0,0,0
.rva se_handler
@@ -2147,6 +2355,28 @@ $code.=<<___ if ($avx>1);
___
}
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
+sub sha256op38 {
+ my $instr = shift;
+ my %opcodelet = (
+ "sha256rnds2" => 0xcb,
+ "sha256msg1" => 0xcc,
+ "sha256msg2" => 0xcd );
+
+ if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-7]),\s*%xmm([0-7])/) {
+ my @opcode=(0x0f,0x38);
+ push @opcode,$opcodelet{$instr};
+ push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
+ return ".byte\t".join(',',@opcode);
+ } else {
+ return $instr."\t".@_[0];
+ }
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\b(sha256[^\s]*)\s+(.*)/sha256op38($1,$2)/geo;
+
+ print $_,"\n";
+}
close STDOUT;