diff options
author | Parménides GV <parmegv@sdf.org> | 2014-09-26 09:46:26 +0200 |
---|---|---|
committer | Parménides GV <parmegv@sdf.org> | 2014-09-26 09:46:26 +0200 |
commit | 394451dbae3e71282611058e00b5fd16c865f147 (patch) | |
tree | 17b71034d9350a2848603f5edf0a8b13025909be /app/openssl/crypto/sha/asm/sha256-armv4.pl | |
parent | 644fd02cf8da95b0b5a99fb9f2142628dd27f7c2 (diff) |
Revert "Updated native subprojects from ics-openvpn."
This reverts commit d0e7ba3029b2fd42582413aa95773fe7dbdede90.
I'll postpone this work for the next cycle, it's not trivial because it doesn't link properly.
Diffstat (limited to 'app/openssl/crypto/sha/asm/sha256-armv4.pl')
-rw-r--r-- | app/openssl/crypto/sha/asm/sha256-armv4.pl | 585 |
1 files changed, 70 insertions, 515 deletions
diff --git a/app/openssl/crypto/sha/asm/sha256-armv4.pl b/app/openssl/crypto/sha/asm/sha256-armv4.pl index 505ca8f3..9c84e8d9 100644 --- a/app/openssl/crypto/sha/asm/sha256-armv4.pl +++ b/app/openssl/crypto/sha/asm/sha256-armv4.pl @@ -1,7 +1,7 @@ #!/usr/bin/env perl # ==================================================================== -# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. @@ -21,27 +21,15 @@ # February 2011. # # Profiler-assisted and platform-specific optimization resulted in 16% -# improvement on Cortex A8 core and ~15.4 cycles per processed byte. - -# September 2013. -# -# Add NEON implementation. On Cortex A8 it was measured to process one -# byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon -# S4 does it in 12.5 cycles too, but it's 50% faster than integer-only -# code (meaning that latter performs sub-optimally, nothing was done -# about it). - -# May 2014. -# -# Add ARMv8 code path performing at 2.0 cpb on Apple A7. +# improvement on Cortex A8 core and ~17 cycles per processed byte. while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} open STDOUT,">$output"; $ctx="r0"; $t0="r0"; -$inp="r1"; $t4="r1"; +$inp="r1"; $t3="r1"; $len="r2"; $t1="r2"; -$T1="r3"; $t3="r3"; +$T1="r3"; $A="r4"; $B="r5"; $C="r6"; @@ -64,88 +52,71 @@ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; $code.=<<___ if ($i<16); #if __ARM_ARCH__>=7 - @ ldr $t1,[$inp],#4 @ $i -# if $i==15 - str $inp,[sp,#17*4] @ make room for $t4 -# endif - eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` - add $a,$a,$t2 @ h+=Maj(a,b,c) from the past - eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e) - rev $t1,$t1 + ldr $T1,[$inp],#4 #else - @ ldrb $t1,[$inp,#3] @ $i - add $a,$a,$t2 @ h+=Maj(a,b,c) from the past + ldrb $T1,[$inp,#3] @ $i ldrb $t2,[$inp,#2] - ldrb $t0,[$inp,#1] - orr $t1,$t1,$t2,lsl#8 - ldrb $t2,[$inp],#4 - orr $t1,$t1,$t0,lsl#16 -# if $i==15 - str $inp,[sp,#17*4] @ make room for $t4 -# endif - eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` - orr $t1,$t1,$t2,lsl#24 - eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e) + ldrb $t1,[$inp,#1] + ldrb $t0,[$inp],#4 + orr $T1,$T1,$t2,lsl#8 + orr $T1,$T1,$t1,lsl#16 + orr $T1,$T1,$t0,lsl#24 #endif ___ $code.=<<___; + mov $t0,$e,ror#$Sigma1[0] ldr $t2,[$Ktbl],#4 @ *K256++ - add $h,$h,$t1 @ h+=X[i] - str $t1,[sp,#`$i%16`*4] + eor $t0,$t0,$e,ror#$Sigma1[1] eor $t1,$f,$g - add $h,$h,$t0,ror#$Sigma1[0] @ h+=Sigma1(e) +#if $i>=16 + add $T1,$T1,$t3 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev $T1,$T1 +#endif +#if $i==15 + str $inp,[sp,#17*4] @ leave room for $t3 +#endif + eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e) and $t1,$t1,$e - add $h,$h,$t2 @ h+=K256[i] + str $T1,[sp,#`$i%16`*4] + add $T1,$T1,$t0 eor $t1,$t1,$g @ Ch(e,f,g) - eor $t0,$a,$a,ror#`$Sigma0[1]-$Sigma0[0]` - add $h,$h,$t1 @ h+=Ch(e,f,g) -#if $i==31 - and $t2,$t2,#0xff - cmp $t2,#0xf2 @ done? + add $T1,$T1,$h + mov $h,$a,ror#$Sigma0[0] + add $T1,$T1,$t1 + eor $h,$h,$a,ror#$Sigma0[1] + add $T1,$T1,$t2 + eor $h,$h,$a,ror#$Sigma0[2] @ Sigma0(a) +#if $i>=15 + ldr $t3,[sp,#`($i+2)%16`*4] @ from BODY_16_xx #endif -#if $i<15 -# if __ARM_ARCH__>=7 - ldr $t1,[$inp],#4 @ prefetch -# else - ldrb $t1,[$inp,#3] -# endif - eor $t2,$a,$b @ a^b, b^c in next round -#else - ldr $t1,[sp,#`($i+2)%16`*4] @ from future BODY_16_xx - eor $t2,$a,$b @ a^b, b^c in next round - ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx -#endif - eor $t0,$t0,$a,ror#`$Sigma0[2]-$Sigma0[0]` @ Sigma0(a) - and $t3,$t3,$t2 @ (b^c)&=(a^b) - add $d,$d,$h @ d+=h - eor $t3,$t3,$b @ Maj(a,b,c) - add $h,$h,$t0,ror#$Sigma0[0] @ h+=Sigma0(a) - @ add $h,$h,$t3 @ h+=Maj(a,b,c) + orr $t0,$a,$b + and $t1,$a,$b + and $t0,$t0,$c + add $h,$h,$T1 + orr $t0,$t0,$t1 @ Maj(a,b,c) + add $d,$d,$T1 + add $h,$h,$t0 ___ - ($t2,$t3)=($t3,$t2); } sub BODY_16_XX { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; $code.=<<___; - @ ldr $t1,[sp,#`($i+1)%16`*4] @ $i - @ ldr $t4,[sp,#`($i+14)%16`*4] - mov $t0,$t1,ror#$sigma0[0] - add $a,$a,$t2 @ h+=Maj(a,b,c) from the past - mov $t2,$t4,ror#$sigma1[0] - eor $t0,$t0,$t1,ror#$sigma0[1] - eor $t2,$t2,$t4,ror#$sigma1[1] - eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1]) - ldr $t1,[sp,#`($i+0)%16`*4] - eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14]) - ldr $t4,[sp,#`($i+9)%16`*4] - - add $t2,$t2,$t0 - eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` @ from BODY_00_15 - add $t1,$t1,$t2 - eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e) - add $t1,$t1,$t4 @ X[i] + @ ldr $t3,[sp,#`($i+1)%16`*4] @ $i + ldr $t2,[sp,#`($i+14)%16`*4] + mov $t0,$t3,ror#$sigma0[0] + ldr $T1,[sp,#`($i+0)%16`*4] + eor $t0,$t0,$t3,ror#$sigma0[1] + ldr $t1,[sp,#`($i+9)%16`*4] + eor $t0,$t0,$t3,lsr#$sigma0[2] @ sigma0(X[i+1]) + mov $t3,$t2,ror#$sigma1[0] + add $T1,$T1,$t0 + eor $t3,$t3,$t2,ror#$sigma1[1] + add $T1,$T1,$t1 + eor $t3,$t3,$t2,lsr#$sigma1[2] @ sigma1(X[i+14]) + @ add $T1,$T1,$t3 ___ &BODY_00_15(@_); } @@ -176,64 +147,46 @@ K256: .word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .size K256,.-K256 -.word 0 @ terminator -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-sha256_block_data_order -.align 5 .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: sub r3,pc,#8 @ sha256_block_data_order add $len,$inp,$len,lsl#6 @ len to point at the end of inp -#if __ARM_ARCH__>=7 - ldr r12,.LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P - tst r12,#ARMV8_SHA256 - bne .LARMv8 - tst r12,#ARMV7_NEON - bne .LNEON -#endif stmdb sp!,{$ctx,$inp,$len,r4-r11,lr} ldmia $ctx,{$A,$B,$C,$D,$E,$F,$G,$H} - sub $Ktbl,r3,#256+32 @ K256 + sub $Ktbl,r3,#256 @ K256 sub sp,sp,#16*4 @ alloca(X[16]) .Loop: -# if __ARM_ARCH__>=7 - ldr $t1,[$inp],#4 -# else - ldrb $t1,[$inp,#3] -# endif - eor $t3,$B,$C @ magic - eor $t2,$t2,$t2 ___ for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); } $code.=".Lrounds_16_xx:\n"; for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); } $code.=<<___; - ldreq $t3,[sp,#16*4] @ pull ctx + and $t2,$t2,#0xff + cmp $t2,#0xf2 bne .Lrounds_16_xx - add $A,$A,$t2 @ h+=Maj(a,b,c) from the past - ldr $t0,[$t3,#0] - ldr $t1,[$t3,#4] - ldr $t2,[$t3,#8] + ldr $T1,[sp,#16*4] @ pull ctx + ldr $t0,[$T1,#0] + ldr $t1,[$T1,#4] + ldr $t2,[$T1,#8] add $A,$A,$t0 - ldr $t0,[$t3,#12] + ldr $t0,[$T1,#12] add $B,$B,$t1 - ldr $t1,[$t3,#16] + ldr $t1,[$T1,#16] add $C,$C,$t2 - ldr $t2,[$t3,#20] + ldr $t2,[$T1,#20] add $D,$D,$t0 - ldr $t0,[$t3,#24] + ldr $t0,[$T1,#24] add $E,$E,$t1 - ldr $t1,[$t3,#28] + ldr $t1,[$T1,#28] add $F,$F,$t2 ldr $inp,[sp,#17*4] @ pull inp ldr $t2,[sp,#18*4] @ pull inp+len add $G,$G,$t0 add $H,$H,$t1 - stmia $t3,{$A,$B,$C,$D,$E,$F,$G,$H} + stmia $T1,{$A,$B,$C,$D,$E,$F,$G,$H} cmp $inp,$t2 sub $Ktbl,$Ktbl,#256 @ rewind Ktbl bne .Loop @@ -247,410 +200,12 @@ $code.=<<___; moveq pc,lr @ be binary compatible with V4, yet bx lr @ interoperable with Thumb ISA:-) #endif -.size sha256_block_data_order,.-sha256_block_data_order -___ -###################################################################### -# NEON stuff -# -{{{ -my @X=map("q$_",(0..3)); -my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25"); -my $Xfer=$t4; -my $j=0; - -sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; } -sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; } - -sub AUTOLOAD() # thunk [simplified] x86-style perlasm -{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./; - my $arg = pop; - $arg = "#$arg" if ($arg*1 eq $arg); - $code .= "\t$opcode\t".join(',',@_,$arg)."\n"; -} - -sub Xupdate() -{ use integer; - my $body = shift; - my @insns = (&$body,&$body,&$body,&$body); - my ($a,$b,$c,$d,$e,$f,$g,$h); - - &vext_8 ($T0,@X[0],@X[1],4); # X[1..4] - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vext_8 ($T1,@X[2],@X[3],4); # X[9..12] - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T2,$T0,$sigma0[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += X[9..12] - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T1,$T0,$sigma0[2]); - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T2,$T0,32-$sigma0[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T3,$T0,$sigma0[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T1,$T1,$T2); - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T3,$T0,32-$sigma0[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T1,$T1,$T3); # sigma0(X[1..4]) - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T5,&Dhi(@X[3]),$sigma1[2]); - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4]) - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T5,$T5,$T4); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T5,$T5,$T4); # sigma1(X[14..15]) - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (&Dlo(@X[0]),&Dlo(@X[0]),$T5);# X[0..1] += sigma1(X[14..15]) - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T5,&Dlo(@X[0]),$sigma1[2]); - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T5,$T5,$T4); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &vld1_32 ("{$T0}","[$Ktbl,:128]!"); - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T5,$T5,$T4); # sigma1(X[16..17]) - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (&Dhi(@X[0]),&Dhi(@X[0]),$T5);# X[2..3] += sigma1(X[16..17]) - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 ($T0,$T0,@X[0]); - while($#insns>=2) { eval(shift(@insns)); } - &vst1_32 ("{$T0}","[$Xfer,:128]!"); - eval(shift(@insns)); - eval(shift(@insns)); - - push(@X,shift(@X)); # "rotate" X[] -} - -sub Xpreload() -{ use integer; - my $body = shift; - my @insns = (&$body,&$body,&$body,&$body); - my ($a,$b,$c,$d,$e,$f,$g,$h); - - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vld1_32 ("{$T0}","[$Ktbl,:128]!"); - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vrev32_8 (@X[0],@X[0]); - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 ($T0,$T0,@X[0]); - foreach (@insns) { eval; } # remaining instructions - &vst1_32 ("{$T0}","[$Xfer,:128]!"); - - push(@X,shift(@X)); # "rotate" X[] -} - -sub body_00_15 () { - ( - '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'. - '&add ($h,$h,$t1)', # h+=X[i]+K[i] - '&eor ($t1,$f,$g)', - '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))', - '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past - '&and ($t1,$t1,$e)', - '&eor ($t2,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e) - '&eor ($t0,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))', - '&eor ($t1,$t1,$g)', # Ch(e,f,g) - '&add ($h,$h,$t2,"ror#$Sigma1[0]")', # h+=Sigma1(e) - '&eor ($t2,$a,$b)', # a^b, b^c in next round - '&eor ($t0,$t0,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a) - '&add ($h,$h,$t1)', # h+=Ch(e,f,g) - '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'. - '&ldr ($t1,"[$Ktbl]") if ($j==15);'. - '&ldr ($t1,"[sp,#64]") if ($j==31)', - '&and ($t3,$t3,$t2)', # (b^c)&=(a^b) - '&add ($d,$d,$h)', # d+=h - '&add ($h,$h,$t0,"ror#$Sigma0[0]");'. # h+=Sigma0(a) - '&eor ($t3,$t3,$b)', # Maj(a,b,c) - '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);' - ) -} - -$code.=<<___; -#if __ARM_ARCH__>=7 -.fpu neon - -.type sha256_block_data_order_neon,%function -.align 4 -sha256_block_data_order_neon: -.LNEON: - stmdb sp!,{r4-r12,lr} - - mov $t2,sp - sub sp,sp,#16*4+16 @ alloca - sub $Ktbl,r3,#256+32 @ K256 - bic sp,sp,#15 @ align for 128-bit stores - - vld1.8 {@X[0]},[$inp]! - vld1.8 {@X[1]},[$inp]! - vld1.8 {@X[2]},[$inp]! - vld1.8 {@X[3]},[$inp]! - vld1.32 {$T0},[$Ktbl,:128]! - vld1.32 {$T1},[$Ktbl,:128]! - vld1.32 {$T2},[$Ktbl,:128]! - vld1.32 {$T3},[$Ktbl,:128]! - vrev32.8 @X[0],@X[0] @ yes, even on - str $ctx,[sp,#64] - vrev32.8 @X[1],@X[1] @ big-endian - str $inp,[sp,#68] - mov $Xfer,sp - vrev32.8 @X[2],@X[2] - str $len,[sp,#72] - vrev32.8 @X[3],@X[3] - str $t2,[sp,#76] @ save original sp - vadd.i32 $T0,$T0,@X[0] - vadd.i32 $T1,$T1,@X[1] - vst1.32 {$T0},[$Xfer,:128]! - vadd.i32 $T2,$T2,@X[2] - vst1.32 {$T1},[$Xfer,:128]! - vadd.i32 $T3,$T3,@X[3] - vst1.32 {$T2},[$Xfer,:128]! - vst1.32 {$T3},[$Xfer,:128]! - - ldmia $ctx,{$A-$H} - sub $Xfer,$Xfer,#64 - ldr $t1,[sp,#0] - eor $t2,$t2,$t2 - eor $t3,$B,$C - b .L_00_48 - -.align 4 -.L_00_48: -___ - &Xupdate(\&body_00_15); - &Xupdate(\&body_00_15); - &Xupdate(\&body_00_15); - &Xupdate(\&body_00_15); -$code.=<<___; - teq $t1,#0 @ check for K256 terminator - ldr $t1,[sp,#0] - sub $Xfer,$Xfer,#64 - bne .L_00_48 - - ldr $inp,[sp,#68] - ldr $t0,[sp,#72] - sub $Ktbl,$Ktbl,#256 @ rewind $Ktbl - teq $inp,$t0 - subeq $inp,$inp,#64 @ avoid SEGV - vld1.8 {@X[0]},[$inp]! @ load next input block - vld1.8 {@X[1]},[$inp]! - vld1.8 {@X[2]},[$inp]! - vld1.8 {@X[3]},[$inp]! - strne $inp,[sp,#68] - mov $Xfer,sp -___ - &Xpreload(\&body_00_15); - &Xpreload(\&body_00_15); - &Xpreload(\&body_00_15); - &Xpreload(\&body_00_15); -$code.=<<___; - ldr $t0,[$t1,#0] - add $A,$A,$t2 @ h+=Maj(a,b,c) from the past - ldr $t2,[$t1,#4] - ldr $t3,[$t1,#8] - ldr $t4,[$t1,#12] - add $A,$A,$t0 @ accumulate - ldr $t0,[$t1,#16] - add $B,$B,$t2 - ldr $t2,[$t1,#20] - add $C,$C,$t3 - ldr $t3,[$t1,#24] - add $D,$D,$t4 - ldr $t4,[$t1,#28] - add $E,$E,$t0 - str $A,[$t1],#4 - add $F,$F,$t2 - str $B,[$t1],#4 - add $G,$G,$t3 - str $C,[$t1],#4 - add $H,$H,$t4 - str $D,[$t1],#4 - stmia $t1,{$E-$H} - - movne $Xfer,sp - ldrne $t1,[sp,#0] - eorne $t2,$t2,$t2 - ldreq sp,[sp,#76] @ restore original sp - eorne $t3,$B,$C - bne .L_00_48 - - ldmia sp!,{r4-r12,pc} -.size sha256_block_data_order_neon,.-sha256_block_data_order_neon -#endif -___ -}}} -###################################################################### -# ARMv8 stuff -# -{{{ -my ($ABCD,$EFGH,$abcd)=map("q$_",(0..2)); -my @MSG=map("q$_",(8..11)); -my ($W0,$W1,$ABCD_SAVE,$EFGH_SAVE)=map("q$_",(12..15)); -my $Ktbl="r3"; - -$code.=<<___; -#if __ARM_ARCH__>=7 -.type sha256_block_data_order_armv8,%function -.align 5 -sha256_block_data_order_armv8: -.LARMv8: - vld1.32 {$ABCD,$EFGH},[$ctx] - sub $Ktbl,r3,#sha256_block_data_order-K256 - -.Loop_v8: - vld1.8 {@MSG[0]-@MSG[1]},[$inp]! - vld1.8 {@MSG[2]-@MSG[3]},[$inp]! - vld1.32 {$W0},[$Ktbl]! - vrev32.8 @MSG[0],@MSG[0] - vrev32.8 @MSG[1],@MSG[1] - vrev32.8 @MSG[2],@MSG[2] - vrev32.8 @MSG[3],@MSG[3] - vmov $ABCD_SAVE,$ABCD @ offload - vmov $EFGH_SAVE,$EFGH - teq $inp,$len -___ -for($i=0;$i<12;$i++) { -$code.=<<___; - vld1.32 {$W1},[$Ktbl]! - vadd.i32 $W0,$W0,@MSG[0] - sha256su0 @MSG[0],@MSG[1] - vmov $abcd,$ABCD - sha256h $ABCD,$EFGH,$W0 - sha256h2 $EFGH,$abcd,$W0 - sha256su1 @MSG[0],@MSG[2],@MSG[3] -___ - ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG)); -} -$code.=<<___; - vld1.32 {$W1},[$Ktbl]! - vadd.i32 $W0,$W0,@MSG[0] - vmov $abcd,$ABCD - sha256h $ABCD,$EFGH,$W0 - sha256h2 $EFGH,$abcd,$W0 - - vld1.32 {$W0},[$Ktbl]! - vadd.i32 $W1,$W1,@MSG[1] - vmov $abcd,$ABCD - sha256h $ABCD,$EFGH,$W1 - sha256h2 $EFGH,$abcd,$W1 - - vld1.32 {$W1},[$Ktbl] - vadd.i32 $W0,$W0,@MSG[2] - sub $Ktbl,$Ktbl,#256-16 @ rewind - vmov $abcd,$ABCD - sha256h $ABCD,$EFGH,$W0 - sha256h2 $EFGH,$abcd,$W0 - - vadd.i32 $W1,$W1,@MSG[3] - vmov $abcd,$ABCD - sha256h $ABCD,$EFGH,$W1 - sha256h2 $EFGH,$abcd,$W1 - - vadd.i32 $ABCD,$ABCD,$ABCD_SAVE - vadd.i32 $EFGH,$EFGH,$EFGH_SAVE - bne .Loop_v8 - - vst1.32 {$ABCD,$EFGH},[$ctx] - - ret @ bx lr -.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8 -#endif -___ -}}} -$code.=<<___; -.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>" +.size sha256_block_data_order,.-sha256_block_data_order +.asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>" .align 2 -.comm OPENSSL_armcap_P,4,4 ___ -{ my %opcode = ( - "sha256h" => 0xf3000c40, "sha256h2" => 0xf3100c40, - "sha256su0" => 0xf3ba03c0, "sha256su1" => 0xf3200c40 ); - - sub unsha256 { - my ($mnemonic,$arg)=@_; - - if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) { - my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19) - |(($2&7)<<17)|(($2&8)<<4) - |(($3&7)<<1) |(($3&8)<<2); - # since ARMv7 instructions are always encoded little-endian. - # correct solution is to use .inst directive, but older - # assemblers don't implement it:-( - sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s", - $word&0xff,($word>>8)&0xff, - ($word>>16)&0xff,($word>>24)&0xff, - $mnemonic,$arg; - } - } -} - -foreach (split($/,$code)) { - - s/\`([^\`]*)\`/eval $1/geo; - - s/\b(sha256\w+)\s+(q.*)/unsha256($1,$2)/geo; - - s/\bret\b/bx lr/go or - s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4 - - print $_,"\n"; -} - +$code =~ s/\`([^\`]*)\`/eval $1/gem; +$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 +print $code; close STDOUT; # enforce flush |