From 394451dbae3e71282611058e00b5fd16c865f147 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Parm=C3=A9nides=20GV?= Date: Fri, 26 Sep 2014 09:46:26 +0200 Subject: Revert "Updated native subprojects from ics-openvpn." This reverts commit d0e7ba3029b2fd42582413aa95773fe7dbdede90. I'll postpone this work for the next cycle, it's not trivial because it doesn't link properly. --- app/openssl/crypto/sha/asm/sha1-armv4-large.pl | 446 +-- app/openssl/crypto/sha/asm/sha1-armv4-large.s | 1008 +------ app/openssl/crypto/sha/asm/sha1-armv8.S | 1211 -------- app/openssl/crypto/sha/asm/sha1-armv8.pl | 333 --- app/openssl/crypto/sha/asm/sha256-armv4.pl | 585 +--- app/openssl/crypto/sha/asm/sha256-armv4.s | 3729 ++++++++---------------- app/openssl/crypto/sha/asm/sha256-armv8.S | 1141 -------- app/openssl/crypto/sha/asm/sha512-armv4.pl | 3 +- app/openssl/crypto/sha/asm/sha512-armv4.s | 2 +- app/openssl/crypto/sha/asm/sha512-armv8.S | 1021 ------- app/openssl/crypto/sha/asm/sha512-armv8.pl | 414 --- 11 files changed, 1363 insertions(+), 8530 deletions(-) delete mode 100644 app/openssl/crypto/sha/asm/sha1-armv8.S delete mode 100644 app/openssl/crypto/sha/asm/sha1-armv8.pl delete mode 100644 app/openssl/crypto/sha/asm/sha256-armv8.S delete mode 100644 app/openssl/crypto/sha/asm/sha512-armv8.S delete mode 100644 app/openssl/crypto/sha/asm/sha512-armv8.pl (limited to 'app/openssl/crypto/sha') diff --git a/app/openssl/crypto/sha/asm/sha1-armv4-large.pl b/app/openssl/crypto/sha/asm/sha1-armv4-large.pl index 50bd07b3..33da3e0e 100644 --- a/app/openssl/crypto/sha/asm/sha1-armv4-large.pl +++ b/app/openssl/crypto/sha/asm/sha1-armv4-large.pl @@ -1,7 +1,7 @@ #!/usr/bin/env perl # ==================================================================== -# Written by Andy Polyakov for the OpenSSL +# Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. @@ -52,20 +52,6 @@ # Profiler-assisted and platform-specific optimization resulted in 10% # improvement on Cortex A8 core and 12.2 cycles per byte. -# September 2013. -# -# Add NEON implementation (see sha1-586.pl for background info). On -# Cortex A8 it was measured to process one byte in 6.7 cycles or >80% -# faster than integer-only code. Because [fully unrolled] NEON code -# is ~2.5x larger and there are some redundant instructions executed -# when processing last block, improvement is not as big for smallest -# blocks, only ~30%. Snapdragon S4 is a tad faster, 6.4 cycles per -# byte, which is also >80% faster than integer-only code. - -# May 2014. -# -# Add ARMv8 code path performing at 2.35 cpb on Apple A7. - while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} open STDOUT,">$output"; @@ -167,22 +153,12 @@ $code=<<___; #include "arm_arch.h" .text -.code 32 .global sha1_block_data_order .type sha1_block_data_order,%function -.align 5 +.align 2 sha1_block_data_order: -#if __ARM_ARCH__>=7 - sub r3,pc,#8 @ sha1_block_data_order - ldr r12,.LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P - tst r12,#ARMV8_SHA1 - bne .LARMv8 - tst r12,#ARMV7_NEON - bne .LNEON -#endif stmdb sp!,{r4-r12,lr} add $len,$inp,$len,lsl#6 @ $len to point at the end of $inp ldmia $ctx,{$a,$b,$c,$d,$e} @@ -257,422 +233,16 @@ $code.=<<___; moveq pc,lr @ be binary compatible with V4, yet bx lr @ interoperable with Thumb ISA:-) #endif -.size sha1_block_data_order,.-sha1_block_data_order - -.align 5 +.align 2 .LK_00_19: .word 0x5a827999 .LK_20_39: .word 0x6ed9eba1 .LK_40_59: .word 0x8f1bbcdc .LK_60_79: .word 0xca62c1d6 -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-sha1_block_data_order -.asciz "SHA1 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by " -.align 5 -___ -##################################################################### -# NEON stuff -# -{{{ -my @V=($a,$b,$c,$d,$e); -my ($K_XX_XX,$Ki,$t0,$t1,$Xfer,$saved_sp)=map("r$_",(8..12,14)); -my $Xi=4; -my @X=map("q$_",(8..11,0..3)); -my @Tx=("q12","q13"); -my ($K,$zero)=("q14","q15"); -my $j=0; - -sub AUTOLOAD() # thunk [simplified] x86-style perlasm -{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./; - my $arg = pop; - $arg = "#$arg" if ($arg*1 eq $arg); - $code .= "\t$opcode\t".join(',',@_,$arg)."\n"; -} - -sub body_00_19 () { - ( - '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'. - '&bic ($t0,$d,$b)', - '&add ($e,$e,$Ki)', # e+=X[i]+K - '&and ($t1,$c,$b)', - '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))', - '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27) - '&eor ($t1,$t1,$t0)', # F_00_19 - '&mov ($b,$b,"ror#2")', # b=ROR(b,2) - '&add ($e,$e,$t1);'. # e+=F_00_19 - '$j++; unshift(@V,pop(@V));' - ) -} -sub body_20_39 () { - ( - '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'. - '&eor ($t0,$b,$d)', - '&add ($e,$e,$Ki)', # e+=X[i]+K - '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15)) if ($j<79)', - '&eor ($t1,$t0,$c)', # F_20_39 - '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27) - '&mov ($b,$b,"ror#2")', # b=ROR(b,2) - '&add ($e,$e,$t1);'. # e+=F_20_39 - '$j++; unshift(@V,pop(@V));' - ) -} -sub body_40_59 () { - ( - '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'. - '&add ($e,$e,$Ki)', # e+=X[i]+K - '&and ($t0,$c,$d)', - '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))', - '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27) - '&eor ($t1,$c,$d)', - '&add ($e,$e,$t0)', - '&and ($t1,$t1,$b)', - '&mov ($b,$b,"ror#2")', # b=ROR(b,2) - '&add ($e,$e,$t1);'. # e+=F_40_59 - '$j++; unshift(@V,pop(@V));' - ) -} - -sub Xupdate_16_31 () -{ use integer; - my $body = shift; - my @insns = (&$body,&$body,&$body,&$body); - my ($a,$b,$c,$d,$e); - - &vext_8 (@X[0],@X[-4&7],@X[-3&7],8); # compose "X[-14]" in "X[0]" - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (@Tx[1],@X[-1&7],$K); - eval(shift(@insns)); - &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!") if ($Xi%5==0); - eval(shift(@insns)); - &vext_8 (@Tx[0],@X[-1&7],$zero,4); # "X[-3]", 3 words - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &veor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]" - eval(shift(@insns)); - eval(shift(@insns)); - &veor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]" - eval(shift(@insns)); - eval(shift(@insns)); - &veor (@Tx[0],@Tx[0],@X[0]); # "X[0]"^="X[-3]"^"X[-8] - eval(shift(@insns)); - eval(shift(@insns)); - &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!"); # X[]+K xfer - &sub ($Xfer,$Xfer,64) if ($Xi%4==0); - eval(shift(@insns)); - eval(shift(@insns)); - &vext_8 (@Tx[1],$zero,@Tx[0],4); # "X[0]"<<96, extract one dword - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (@X[0],@Tx[0],@Tx[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &vsri_32 (@X[0],@Tx[0],31); # "X[0]"<<<=1 - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 (@Tx[0],@Tx[1],30); - eval(shift(@insns)); - eval(shift(@insns)); - &vshl_u32 (@Tx[1],@Tx[1],2); - eval(shift(@insns)); - eval(shift(@insns)); - &veor (@X[0],@X[0],@Tx[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &veor (@X[0],@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2 - - foreach (@insns) { eval; } # remaining instructions [if any] - - $Xi++; push(@X,shift(@X)); # "rotate" X[] -} - -sub Xupdate_32_79 () -{ use integer; - my $body = shift; - my @insns = (&$body,&$body,&$body,&$body); - my ($a,$b,$c,$d,$e); - - &vext_8 (@Tx[0],@X[-2&7],@X[-1&7],8); # compose "X[-6]" - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &veor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" - eval(shift(@insns)); - eval(shift(@insns)); - &veor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]" - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (@Tx[1],@X[-1&7],$K); - eval(shift(@insns)); - &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!") if ($Xi%5==0); - eval(shift(@insns)); - &veor (@Tx[0],@Tx[0],@X[0]); # "X[-6]"^="X[0]" - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 (@X[0],@Tx[0],30); - eval(shift(@insns)); - eval(shift(@insns)); - &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!"); # X[]+K xfer - &sub ($Xfer,$Xfer,64) if ($Xi%4==0); - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 (@X[0],@Tx[0],2); # "X[0]"="X[-6]"<<<2 - - foreach (@insns) { eval; } # remaining instructions [if any] - - $Xi++; push(@X,shift(@X)); # "rotate" X[] -} - -sub Xuplast_80 () -{ use integer; - my $body = shift; - my @insns = (&$body,&$body,&$body,&$body); - my ($a,$b,$c,$d,$e); - - &vadd_i32 (@Tx[1],@X[-1&7],$K); - eval(shift(@insns)); - eval(shift(@insns)); - &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!"); - &sub ($Xfer,$Xfer,64); - - &teq ($inp,$len); - &sub ($K_XX_XX,$K_XX_XX,16); # rewind $K_XX_XX - &subeq ($inp,$inp,64); # reload last block to avoid SEGV - &vld1_8 ("{@X[-4&7]-@X[-3&7]}","[$inp]!"); - eval(shift(@insns)); - eval(shift(@insns)); - &vld1_8 ("{@X[-2&7]-@X[-1&7]}","[$inp]!"); - eval(shift(@insns)); - eval(shift(@insns)); - &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!"); # load K_00_19 - eval(shift(@insns)); - eval(shift(@insns)); - &vrev32_8 (@X[-4&7],@X[-4&7]); - - foreach (@insns) { eval; } # remaining instructions - - $Xi=0; -} - -sub Xloop() -{ use integer; - my $body = shift; - my @insns = (&$body,&$body,&$body,&$body); - my ($a,$b,$c,$d,$e); - - &vrev32_8 (@X[($Xi-3)&7],@X[($Xi-3)&7]); - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (@X[$Xi&7],@X[($Xi-4)&7],$K); - eval(shift(@insns)); - eval(shift(@insns)); - &vst1_32 ("{@X[$Xi&7]}","[$Xfer,:128]!");# X[]+K xfer to IALU - - foreach (@insns) { eval; } - - $Xi++; -} - -$code.=<<___; -#if __ARM_ARCH__>=7 -.fpu neon - -.type sha1_block_data_order_neon,%function -.align 4 -sha1_block_data_order_neon: -.LNEON: - stmdb sp!,{r4-r12,lr} - add $len,$inp,$len,lsl#6 @ $len to point at the end of $inp - @ dmb @ errata #451034 on early Cortex A8 - @ vstmdb sp!,{d8-d15} @ ABI specification says so - mov $saved_sp,sp - sub sp,sp,#64 @ alloca - adr $K_XX_XX,.LK_00_19 - bic sp,sp,#15 @ align for 128-bit stores - - ldmia $ctx,{$a,$b,$c,$d,$e} @ load context - mov $Xfer,sp - - vld1.8 {@X[-4&7]-@X[-3&7]},[$inp]! @ handles unaligned - veor $zero,$zero,$zero - vld1.8 {@X[-2&7]-@X[-1&7]},[$inp]! - vld1.32 {${K}\[]},[$K_XX_XX,:32]! @ load K_00_19 - vrev32.8 @X[-4&7],@X[-4&7] @ yes, even on - vrev32.8 @X[-3&7],@X[-3&7] @ big-endian... - vrev32.8 @X[-2&7],@X[-2&7] - vadd.i32 @X[0],@X[-4&7],$K - vrev32.8 @X[-1&7],@X[-1&7] - vadd.i32 @X[1],@X[-3&7],$K - vst1.32 {@X[0]},[$Xfer,:128]! - vadd.i32 @X[2],@X[-2&7],$K - vst1.32 {@X[1]},[$Xfer,:128]! - vst1.32 {@X[2]},[$Xfer,:128]! - ldr $Ki,[sp] @ big RAW stall - -.Loop_neon: -___ - &Xupdate_16_31(\&body_00_19); - &Xupdate_16_31(\&body_00_19); - &Xupdate_16_31(\&body_00_19); - &Xupdate_16_31(\&body_00_19); - &Xupdate_32_79(\&body_00_19); - &Xupdate_32_79(\&body_20_39); - &Xupdate_32_79(\&body_20_39); - &Xupdate_32_79(\&body_20_39); - &Xupdate_32_79(\&body_20_39); - &Xupdate_32_79(\&body_20_39); - &Xupdate_32_79(\&body_40_59); - &Xupdate_32_79(\&body_40_59); - &Xupdate_32_79(\&body_40_59); - &Xupdate_32_79(\&body_40_59); - &Xupdate_32_79(\&body_40_59); - &Xupdate_32_79(\&body_20_39); - &Xuplast_80(\&body_20_39); - &Xloop(\&body_20_39); - &Xloop(\&body_20_39); - &Xloop(\&body_20_39); -$code.=<<___; - ldmia $ctx,{$Ki,$t0,$t1,$Xfer} @ accumulate context - add $a,$a,$Ki - ldr $Ki,[$ctx,#16] - add $b,$b,$t0 - add $c,$c,$t1 - add $d,$d,$Xfer - moveq sp,$saved_sp - add $e,$e,$Ki - ldrne $Ki,[sp] - stmia $ctx,{$a,$b,$c,$d,$e} - addne $Xfer,sp,#3*16 - bne .Loop_neon - - @ vldmia sp!,{d8-d15} - ldmia sp!,{r4-r12,pc} -.size sha1_block_data_order_neon,.-sha1_block_data_order_neon -#endif -___ -}}} -##################################################################### -# ARMv8 stuff -# -{{{ -my ($ABCD,$E,$E0,$E1)=map("q$_",(0..3)); -my @MSG=map("q$_",(4..7)); -my @Kxx=map("q$_",(8..11)); -my ($W0,$W1,$ABCD_SAVE)=map("q$_",(12..14)); - -$code.=<<___; -#if __ARM_ARCH__>=7 -.type sha1_block_data_order_armv8,%function -.align 5 -sha1_block_data_order_armv8: -.LARMv8: - vstmdb sp!,{d8-d15} @ ABI specification says so - - veor $E,$E,$E - adr r3,.LK_00_19 - vld1.32 {$ABCD},[$ctx]! - vld1.32 {$E\[0]},[$ctx] - sub $ctx,$ctx,#16 - vld1.32 {@Kxx[0]\[]},[r3,:32]! - vld1.32 {@Kxx[1]\[]},[r3,:32]! - vld1.32 {@Kxx[2]\[]},[r3,:32]! - vld1.32 {@Kxx[3]\[]},[r3,:32] - -.Loop_v8: - vld1.8 {@MSG[0]-@MSG[1]},[$inp]! - vld1.8 {@MSG[2]-@MSG[3]},[$inp]! - vrev32.8 @MSG[0],@MSG[0] - vrev32.8 @MSG[1],@MSG[1] - - vadd.i32 $W0,@Kxx[0],@MSG[0] - vrev32.8 @MSG[2],@MSG[2] - vmov $ABCD_SAVE,$ABCD @ offload - subs $len,$len,#1 - - vadd.i32 $W1,@Kxx[0],@MSG[1] - vrev32.8 @MSG[3],@MSG[3] - sha1h $E1,$ABCD @ 0 - sha1c $ABCD,$E,$W0 - vadd.i32 $W0,@Kxx[$j],@MSG[2] - sha1su0 @MSG[0],@MSG[1],@MSG[2] -___ -for ($j=0,$i=1;$i<20-3;$i++) { -my $f=("c","p","m","p")[$i/5]; -$code.=<<___; - sha1h $E0,$ABCD @ $i - sha1$f $ABCD,$E1,$W1 - vadd.i32 $W1,@Kxx[$j],@MSG[3] - sha1su1 @MSG[0],@MSG[3] -___ -$code.=<<___ if ($i<20-4); - sha1su0 @MSG[1],@MSG[2],@MSG[3] -___ - ($E0,$E1)=($E1,$E0); ($W0,$W1)=($W1,$W0); - push(@MSG,shift(@MSG)); $j++ if ((($i+3)%5)==0); -} -$code.=<<___; - sha1h $E0,$ABCD @ $i - sha1p $ABCD,$E1,$W1 - vadd.i32 $W1,@Kxx[$j],@MSG[3] - - sha1h $E1,$ABCD @ 18 - sha1p $ABCD,$E0,$W0 - - sha1h $E0,$ABCD @ 19 - sha1p $ABCD,$E1,$W1 - - vadd.i32 $E,$E,$E0 - vadd.i32 $ABCD,$ABCD,$ABCD_SAVE - bne .Loop_v8 - - vst1.32 {$ABCD},[$ctx]! - vst1.32 {$E\[0]},[$ctx] - - vldmia sp!,{d8-d15} - ret @ bx lr -.size sha1_block_data_order_armv8,.-sha1_block_data_order_armv8 -#endif -___ -}}} -$code.=<<___; -.comm OPENSSL_armcap_P,4,4 +.size sha1_block_data_order,.-sha1_block_data_order +.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by " +.align 2 ___ -{ my %opcode = ( - "sha1c" => 0xf2000c40, "sha1p" => 0xf2100c40, - "sha1m" => 0xf2200c40, "sha1su0" => 0xf2300c40, - "sha1h" => 0xf3b902c0, "sha1su1" => 0xf3ba0380 ); - - sub unsha1 { - my ($mnemonic,$arg)=@_; - - if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) { - my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19) - |(($2&7)<<17)|(($2&8)<<4) - |(($3&7)<<1) |(($3&8)<<2); - # since ARMv7 instructions are always encoded little-endian. - # correct solution is to use .inst directive, but older - # assemblers don't implement it:-( - sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s", - $word&0xff,($word>>8)&0xff, - ($word>>16)&0xff,($word>>24)&0xff, - $mnemonic,$arg; - } - } -} - -foreach (split($/,$code)) { - s/{q([0-9]+)\[\]}/sprintf "{d%d[],d%d[]}",2*$1,2*$1+1/eo or - s/{q([0-9]+)\[0\]}/sprintf "{d%d[0]}",2*$1/eo; - - s/\b(sha1\w+)\s+(q.*)/unsha1($1,$2)/geo; - - s/\bret\b/bx lr/o or - s/\bbx\s+lr\b/.word\t0xe12fff1e/o; # make it possible to compile with -march=armv4 - - print $_,$/; -} - +$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 +print $code; close STDOUT; # enforce flush diff --git a/app/openssl/crypto/sha/asm/sha1-armv4-large.s b/app/openssl/crypto/sha/asm/sha1-armv4-large.s index a1562883..639ae78a 100644 --- a/app/openssl/crypto/sha/asm/sha1-armv4-large.s +++ b/app/openssl/crypto/sha/asm/sha1-armv4-large.s @@ -1,22 +1,12 @@ #include "arm_arch.h" .text -.code 32 .global sha1_block_data_order .type sha1_block_data_order,%function -.align 5 +.align 2 sha1_block_data_order: -#if __ARM_ARCH__>=7 - sub r3,pc,#8 @ sha1_block_data_order - ldr r12,.LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P - tst r12,#ARMV8_SHA1 - bne .LARMv8 - tst r12,#ARMV7_NEON - bne .LNEON -#endif stmdb sp!,{r4-r12,lr} add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 ldmia r0,{r3,r4,r5,r6,r7} @@ -452,999 +442,11 @@ sha1_block_data_order: moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif -.size sha1_block_data_order,.-sha1_block_data_order - -.align 5 +.align 2 .LK_00_19: .word 0x5a827999 .LK_20_39: .word 0x6ed9eba1 .LK_40_59: .word 0x8f1bbcdc .LK_60_79: .word 0xca62c1d6 -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-sha1_block_data_order -.asciz "SHA1 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by " -.align 5 -#if __ARM_ARCH__>=7 -.fpu neon - -.type sha1_block_data_order_neon,%function -.align 4 -sha1_block_data_order_neon: -.LNEON: - stmdb sp!,{r4-r12,lr} - add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 - @ dmb @ errata #451034 on early Cortex A8 - @ vstmdb sp!,{d8-d15} @ ABI specification says so - mov r14,sp - sub sp,sp,#64 @ alloca - adr r8,.LK_00_19 - bic sp,sp,#15 @ align for 128-bit stores - - ldmia r0,{r3,r4,r5,r6,r7} @ load context - mov r12,sp - - vld1.8 {q0-q1},[r1]! @ handles unaligned - veor q15,q15,q15 - vld1.8 {q2-q3},[r1]! - vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19 - vrev32.8 q0,q0 @ yes, even on - vrev32.8 q1,q1 @ big-endian... - vrev32.8 q2,q2 - vadd.i32 q8,q0,q14 - vrev32.8 q3,q3 - vadd.i32 q9,q1,q14 - vst1.32 {q8},[r12,:128]! - vadd.i32 q10,q2,q14 - vst1.32 {q9},[r12,:128]! - vst1.32 {q10},[r12,:128]! - ldr r9,[sp] @ big RAW stall - -.Loop_neon: - vext.8 q8,q0,q1,#8 - bic r10,r6,r4 - add r7,r7,r9 - and r11,r5,r4 - vadd.i32 q13,q3,q14 - ldr r9,[sp,#4] - add r7,r7,r3,ror#27 - vext.8 q12,q3,q15,#4 - eor r11,r11,r10 - mov r4,r4,ror#2 - add r7,r7,r11 - veor q8,q8,q0 - bic r10,r5,r3 - add r6,r6,r9 - veor q12,q12,q2 - and r11,r4,r3 - ldr r9,[sp,#8] - veor q12,q12,q8 - add r6,r6,r7,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q13,q15,q12,#4 - bic r10,r4,r7 - add r5,r5,r9 - vadd.i32 q8,q12,q12 - and r11,r3,r7 - ldr r9,[sp,#12] - vsri.32 q8,q12,#31 - add r5,r5,r6,ror#27 - eor r11,r11,r10 - mov r7,r7,ror#2 - vshr.u32 q12,q13,#30 - add r5,r5,r11 - bic r10,r3,r6 - vshl.u32 q13,q13,#2 - add r4,r4,r9 - and r11,r7,r6 - veor q8,q8,q12 - ldr r9,[sp,#16] - add r4,r4,r5,ror#27 - veor q8,q8,q13 - eor r11,r11,r10 - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q9,q1,q2,#8 - bic r10,r7,r5 - add r3,r3,r9 - and r11,r6,r5 - vadd.i32 q13,q8,q14 - ldr r9,[sp,#20] - vld1.32 {d28[],d29[]},[r8,:32]! - add r3,r3,r4,ror#27 - vext.8 q12,q8,q15,#4 - eor r11,r11,r10 - mov r5,r5,ror#2 - add r3,r3,r11 - veor q9,q9,q1 - bic r10,r6,r4 - add r7,r7,r9 - veor q12,q12,q3 - and r11,r5,r4 - ldr r9,[sp,#24] - veor q12,q12,q9 - add r7,r7,r3,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q13,q15,q12,#4 - bic r10,r5,r3 - add r6,r6,r9 - vadd.i32 q9,q12,q12 - and r11,r4,r3 - ldr r9,[sp,#28] - vsri.32 q9,q12,#31 - add r6,r6,r7,ror#27 - eor r11,r11,r10 - mov r3,r3,ror#2 - vshr.u32 q12,q13,#30 - add r6,r6,r11 - bic r10,r4,r7 - vshl.u32 q13,q13,#2 - add r5,r5,r9 - and r11,r3,r7 - veor q9,q9,q12 - ldr r9,[sp,#32] - add r5,r5,r6,ror#27 - veor q9,q9,q13 - eor r11,r11,r10 - mov r7,r7,ror#2 - add r5,r5,r11 - vext.8 q10,q2,q3,#8 - bic r10,r3,r6 - add r4,r4,r9 - and r11,r7,r6 - vadd.i32 q13,q9,q14 - ldr r9,[sp,#36] - add r4,r4,r5,ror#27 - vext.8 q12,q9,q15,#4 - eor r11,r11,r10 - mov r6,r6,ror#2 - add r4,r4,r11 - veor q10,q10,q2 - bic r10,r7,r5 - add r3,r3,r9 - veor q12,q12,q8 - and r11,r6,r5 - ldr r9,[sp,#40] - veor q12,q12,q10 - add r3,r3,r4,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q13,q15,q12,#4 - bic r10,r6,r4 - add r7,r7,r9 - vadd.i32 q10,q12,q12 - and r11,r5,r4 - ldr r9,[sp,#44] - vsri.32 q10,q12,#31 - add r7,r7,r3,ror#27 - eor r11,r11,r10 - mov r4,r4,ror#2 - vshr.u32 q12,q13,#30 - add r7,r7,r11 - bic r10,r5,r3 - vshl.u32 q13,q13,#2 - add r6,r6,r9 - and r11,r4,r3 - veor q10,q10,q12 - ldr r9,[sp,#48] - add r6,r6,r7,ror#27 - veor q10,q10,q13 - eor r11,r11,r10 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q11,q3,q8,#8 - bic r10,r4,r7 - add r5,r5,r9 - and r11,r3,r7 - vadd.i32 q13,q10,q14 - ldr r9,[sp,#52] - add r5,r5,r6,ror#27 - vext.8 q12,q10,q15,#4 - eor r11,r11,r10 - mov r7,r7,ror#2 - add r5,r5,r11 - veor q11,q11,q3 - bic r10,r3,r6 - add r4,r4,r9 - veor q12,q12,q9 - and r11,r7,r6 - ldr r9,[sp,#56] - veor q12,q12,q11 - add r4,r4,r5,ror#27 - eor r11,r11,r10 - vst1.32 {q13},[r12,:128]! - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q13,q15,q12,#4 - bic r10,r7,r5 - add r3,r3,r9 - vadd.i32 q11,q12,q12 - and r11,r6,r5 - ldr r9,[sp,#60] - vsri.32 q11,q12,#31 - add r3,r3,r4,ror#27 - eor r11,r11,r10 - mov r5,r5,ror#2 - vshr.u32 q12,q13,#30 - add r3,r3,r11 - bic r10,r6,r4 - vshl.u32 q13,q13,#2 - add r7,r7,r9 - and r11,r5,r4 - veor q11,q11,q12 - ldr r9,[sp,#0] - add r7,r7,r3,ror#27 - veor q11,q11,q13 - eor r11,r11,r10 - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q12,q10,q11,#8 - bic r10,r5,r3 - add r6,r6,r9 - and r11,r4,r3 - veor q0,q0,q8 - ldr r9,[sp,#4] - add r6,r6,r7,ror#27 - veor q0,q0,q1 - eor r11,r11,r10 - mov r3,r3,ror#2 - vadd.i32 q13,q11,q14 - add r6,r6,r11 - bic r10,r4,r7 - veor q12,q12,q0 - add r5,r5,r9 - and r11,r3,r7 - vshr.u32 q0,q12,#30 - ldr r9,[sp,#8] - add r5,r5,r6,ror#27 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - eor r11,r11,r10 - mov r7,r7,ror#2 - vsli.32 q0,q12,#2 - add r5,r5,r11 - bic r10,r3,r6 - add r4,r4,r9 - and r11,r7,r6 - ldr r9,[sp,#12] - add r4,r4,r5,ror#27 - eor r11,r11,r10 - mov r6,r6,ror#2 - add r4,r4,r11 - bic r10,r7,r5 - add r3,r3,r9 - and r11,r6,r5 - ldr r9,[sp,#16] - add r3,r3,r4,ror#27 - eor r11,r11,r10 - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q12,q11,q0,#8 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#20] - veor q1,q1,q9 - eor r11,r10,r5 - add r7,r7,r3,ror#27 - veor q1,q1,q2 - mov r4,r4,ror#2 - add r7,r7,r11 - vadd.i32 q13,q0,q14 - eor r10,r3,r5 - add r6,r6,r9 - veor q12,q12,q1 - ldr r9,[sp,#24] - eor r11,r10,r4 - vshr.u32 q1,q12,#30 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - vst1.32 {q13},[r12,:128]! - add r6,r6,r11 - eor r10,r7,r4 - vsli.32 q1,q12,#2 - add r5,r5,r9 - ldr r9,[sp,#28] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#32] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q12,q0,q1,#8 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#36] - veor q2,q2,q10 - eor r11,r10,r6 - add r3,r3,r4,ror#27 - veor q2,q2,q3 - mov r5,r5,ror#2 - add r3,r3,r11 - vadd.i32 q13,q1,q14 - eor r10,r4,r6 - vld1.32 {d28[],d29[]},[r8,:32]! - add r7,r7,r9 - veor q12,q12,q2 - ldr r9,[sp,#40] - eor r11,r10,r5 - vshr.u32 q2,q12,#30 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - vst1.32 {q13},[r12,:128]! - add r7,r7,r11 - eor r10,r3,r5 - vsli.32 q2,q12,#2 - add r6,r6,r9 - ldr r9,[sp,#44] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#48] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - vext.8 q12,q1,q2,#8 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#52] - veor q3,q3,q11 - eor r11,r10,r7 - add r4,r4,r5,ror#27 - veor q3,q3,q8 - mov r6,r6,ror#2 - add r4,r4,r11 - vadd.i32 q13,q2,q14 - eor r10,r5,r7 - add r3,r3,r9 - veor q12,q12,q3 - ldr r9,[sp,#56] - eor r11,r10,r6 - vshr.u32 q3,q12,#30 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - vst1.32 {q13},[r12,:128]! - add r3,r3,r11 - eor r10,r4,r6 - vsli.32 q3,q12,#2 - add r7,r7,r9 - ldr r9,[sp,#60] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#0] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q12,q2,q3,#8 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#4] - veor q8,q8,q0 - eor r11,r10,r3 - add r5,r5,r6,ror#27 - veor q8,q8,q9 - mov r7,r7,ror#2 - add r5,r5,r11 - vadd.i32 q13,q3,q14 - eor r10,r6,r3 - add r4,r4,r9 - veor q12,q12,q8 - ldr r9,[sp,#8] - eor r11,r10,r7 - vshr.u32 q8,q12,#30 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - add r4,r4,r11 - eor r10,r5,r7 - vsli.32 q8,q12,#2 - add r3,r3,r9 - ldr r9,[sp,#12] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#16] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q12,q3,q8,#8 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#20] - veor q9,q9,q1 - eor r11,r10,r4 - add r6,r6,r7,ror#27 - veor q9,q9,q10 - mov r3,r3,ror#2 - add r6,r6,r11 - vadd.i32 q13,q8,q14 - eor r10,r7,r4 - add r5,r5,r9 - veor q12,q12,q9 - ldr r9,[sp,#24] - eor r11,r10,r3 - vshr.u32 q9,q12,#30 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - vst1.32 {q13},[r12,:128]! - add r5,r5,r11 - eor r10,r6,r3 - vsli.32 q9,q12,#2 - add r4,r4,r9 - ldr r9,[sp,#28] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#32] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q12,q8,q9,#8 - add r7,r7,r9 - and r10,r5,r6 - ldr r9,[sp,#36] - veor q10,q10,q2 - add r7,r7,r3,ror#27 - eor r11,r5,r6 - veor q10,q10,q11 - add r7,r7,r10 - and r11,r11,r4 - vadd.i32 q13,q9,q14 - mov r4,r4,ror#2 - add r7,r7,r11 - veor q12,q12,q10 - add r6,r6,r9 - and r10,r4,r5 - vshr.u32 q10,q12,#30 - ldr r9,[sp,#40] - add r6,r6,r7,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r4,r5 - add r6,r6,r10 - vsli.32 q10,q12,#2 - and r11,r11,r3 - mov r3,r3,ror#2 - add r6,r6,r11 - add r5,r5,r9 - and r10,r3,r4 - ldr r9,[sp,#44] - add r5,r5,r6,ror#27 - eor r11,r3,r4 - add r5,r5,r10 - and r11,r11,r7 - mov r7,r7,ror#2 - add r5,r5,r11 - add r4,r4,r9 - and r10,r7,r3 - ldr r9,[sp,#48] - add r4,r4,r5,ror#27 - eor r11,r7,r3 - add r4,r4,r10 - and r11,r11,r6 - mov r6,r6,ror#2 - add r4,r4,r11 - vext.8 q12,q9,q10,#8 - add r3,r3,r9 - and r10,r6,r7 - ldr r9,[sp,#52] - veor q11,q11,q3 - add r3,r3,r4,ror#27 - eor r11,r6,r7 - veor q11,q11,q0 - add r3,r3,r10 - and r11,r11,r5 - vadd.i32 q13,q10,q14 - mov r5,r5,ror#2 - vld1.32 {d28[],d29[]},[r8,:32]! - add r3,r3,r11 - veor q12,q12,q11 - add r7,r7,r9 - and r10,r5,r6 - vshr.u32 q11,q12,#30 - ldr r9,[sp,#56] - add r7,r7,r3,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r5,r6 - add r7,r7,r10 - vsli.32 q11,q12,#2 - and r11,r11,r4 - mov r4,r4,ror#2 - add r7,r7,r11 - add r6,r6,r9 - and r10,r4,r5 - ldr r9,[sp,#60] - add r6,r6,r7,ror#27 - eor r11,r4,r5 - add r6,r6,r10 - and r11,r11,r3 - mov r3,r3,ror#2 - add r6,r6,r11 - add r5,r5,r9 - and r10,r3,r4 - ldr r9,[sp,#0] - add r5,r5,r6,ror#27 - eor r11,r3,r4 - add r5,r5,r10 - and r11,r11,r7 - mov r7,r7,ror#2 - add r5,r5,r11 - vext.8 q12,q10,q11,#8 - add r4,r4,r9 - and r10,r7,r3 - ldr r9,[sp,#4] - veor q0,q0,q8 - add r4,r4,r5,ror#27 - eor r11,r7,r3 - veor q0,q0,q1 - add r4,r4,r10 - and r11,r11,r6 - vadd.i32 q13,q11,q14 - mov r6,r6,ror#2 - add r4,r4,r11 - veor q12,q12,q0 - add r3,r3,r9 - and r10,r6,r7 - vshr.u32 q0,q12,#30 - ldr r9,[sp,#8] - add r3,r3,r4,ror#27 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - eor r11,r6,r7 - add r3,r3,r10 - vsli.32 q0,q12,#2 - and r11,r11,r5 - mov r5,r5,ror#2 - add r3,r3,r11 - add r7,r7,r9 - and r10,r5,r6 - ldr r9,[sp,#12] - add r7,r7,r3,ror#27 - eor r11,r5,r6 - add r7,r7,r10 - and r11,r11,r4 - mov r4,r4,ror#2 - add r7,r7,r11 - add r6,r6,r9 - and r10,r4,r5 - ldr r9,[sp,#16] - add r6,r6,r7,ror#27 - eor r11,r4,r5 - add r6,r6,r10 - and r11,r11,r3 - mov r3,r3,ror#2 - add r6,r6,r11 - vext.8 q12,q11,q0,#8 - add r5,r5,r9 - and r10,r3,r4 - ldr r9,[sp,#20] - veor q1,q1,q9 - add r5,r5,r6,ror#27 - eor r11,r3,r4 - veor q1,q1,q2 - add r5,r5,r10 - and r11,r11,r7 - vadd.i32 q13,q0,q14 - mov r7,r7,ror#2 - add r5,r5,r11 - veor q12,q12,q1 - add r4,r4,r9 - and r10,r7,r3 - vshr.u32 q1,q12,#30 - ldr r9,[sp,#24] - add r4,r4,r5,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r7,r3 - add r4,r4,r10 - vsli.32 q1,q12,#2 - and r11,r11,r6 - mov r6,r6,ror#2 - add r4,r4,r11 - add r3,r3,r9 - and r10,r6,r7 - ldr r9,[sp,#28] - add r3,r3,r4,ror#27 - eor r11,r6,r7 - add r3,r3,r10 - and r11,r11,r5 - mov r5,r5,ror#2 - add r3,r3,r11 - add r7,r7,r9 - and r10,r5,r6 - ldr r9,[sp,#32] - add r7,r7,r3,ror#27 - eor r11,r5,r6 - add r7,r7,r10 - and r11,r11,r4 - mov r4,r4,ror#2 - add r7,r7,r11 - vext.8 q12,q0,q1,#8 - add r6,r6,r9 - and r10,r4,r5 - ldr r9,[sp,#36] - veor q2,q2,q10 - add r6,r6,r7,ror#27 - eor r11,r4,r5 - veor q2,q2,q3 - add r6,r6,r10 - and r11,r11,r3 - vadd.i32 q13,q1,q14 - mov r3,r3,ror#2 - add r6,r6,r11 - veor q12,q12,q2 - add r5,r5,r9 - and r10,r3,r4 - vshr.u32 q2,q12,#30 - ldr r9,[sp,#40] - add r5,r5,r6,ror#27 - vst1.32 {q13},[r12,:128]! - eor r11,r3,r4 - add r5,r5,r10 - vsli.32 q2,q12,#2 - and r11,r11,r7 - mov r7,r7,ror#2 - add r5,r5,r11 - add r4,r4,r9 - and r10,r7,r3 - ldr r9,[sp,#44] - add r4,r4,r5,ror#27 - eor r11,r7,r3 - add r4,r4,r10 - and r11,r11,r6 - mov r6,r6,ror#2 - add r4,r4,r11 - add r3,r3,r9 - and r10,r6,r7 - ldr r9,[sp,#48] - add r3,r3,r4,ror#27 - eor r11,r6,r7 - add r3,r3,r10 - and r11,r11,r5 - mov r5,r5,ror#2 - add r3,r3,r11 - vext.8 q12,q1,q2,#8 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#52] - veor q3,q3,q11 - eor r11,r10,r5 - add r7,r7,r3,ror#27 - veor q3,q3,q8 - mov r4,r4,ror#2 - add r7,r7,r11 - vadd.i32 q13,q2,q14 - eor r10,r3,r5 - add r6,r6,r9 - veor q12,q12,q3 - ldr r9,[sp,#56] - eor r11,r10,r4 - vshr.u32 q3,q12,#30 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - vst1.32 {q13},[r12,:128]! - add r6,r6,r11 - eor r10,r7,r4 - vsli.32 q3,q12,#2 - add r5,r5,r9 - ldr r9,[sp,#60] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#0] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - vadd.i32 q13,q3,q14 - eor r10,r5,r7 - add r3,r3,r9 - vst1.32 {q13},[r12,:128]! - sub r12,r12,#64 - teq r1,r2 - sub r8,r8,#16 - subeq r1,r1,#64 - vld1.8 {q0-q1},[r1]! - ldr r9,[sp,#4] - eor r11,r10,r6 - vld1.8 {q2-q3},[r1]! - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - vld1.32 {d28[],d29[]},[r8,:32]! - add r3,r3,r11 - eor r10,r4,r6 - vrev32.8 q0,q0 - add r7,r7,r9 - ldr r9,[sp,#8] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#12] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#16] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - vrev32.8 q1,q1 - eor r10,r6,r3 - add r4,r4,r9 - vadd.i32 q8,q0,q14 - ldr r9,[sp,#20] - eor r11,r10,r7 - vst1.32 {q8},[r12,:128]! - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#24] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#28] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - eor r10,r3,r5 - add r6,r6,r9 - ldr r9,[sp,#32] - eor r11,r10,r4 - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - vrev32.8 q2,q2 - eor r10,r7,r4 - add r5,r5,r9 - vadd.i32 q9,q1,q14 - ldr r9,[sp,#36] - eor r11,r10,r3 - vst1.32 {q9},[r12,:128]! - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#40] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - ldr r9,[sp,#44] - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - eor r10,r4,r6 - add r7,r7,r9 - ldr r9,[sp,#48] - eor r11,r10,r5 - add r7,r7,r3,ror#27 - mov r4,r4,ror#2 - add r7,r7,r11 - vrev32.8 q3,q3 - eor r10,r3,r5 - add r6,r6,r9 - vadd.i32 q10,q2,q14 - ldr r9,[sp,#52] - eor r11,r10,r4 - vst1.32 {q10},[r12,:128]! - add r6,r6,r7,ror#27 - mov r3,r3,ror#2 - add r6,r6,r11 - eor r10,r7,r4 - add r5,r5,r9 - ldr r9,[sp,#56] - eor r11,r10,r3 - add r5,r5,r6,ror#27 - mov r7,r7,ror#2 - add r5,r5,r11 - eor r10,r6,r3 - add r4,r4,r9 - ldr r9,[sp,#60] - eor r11,r10,r7 - add r4,r4,r5,ror#27 - mov r6,r6,ror#2 - add r4,r4,r11 - eor r10,r5,r7 - add r3,r3,r9 - eor r11,r10,r6 - add r3,r3,r4,ror#27 - mov r5,r5,ror#2 - add r3,r3,r11 - ldmia r0,{r9,r10,r11,r12} @ accumulate context - add r3,r3,r9 - ldr r9,[r0,#16] - add r4,r4,r10 - add r5,r5,r11 - add r6,r6,r12 - moveq sp,r14 - add r7,r7,r9 - ldrne r9,[sp] - stmia r0,{r3,r4,r5,r6,r7} - addne r12,sp,#3*16 - bne .Loop_neon - - @ vldmia sp!,{d8-d15} - ldmia sp!,{r4-r12,pc} -.size sha1_block_data_order_neon,.-sha1_block_data_order_neon -#endif -#if __ARM_ARCH__>=7 -.type sha1_block_data_order_armv8,%function -.align 5 -sha1_block_data_order_armv8: -.LARMv8: - vstmdb sp!,{d8-d15} @ ABI specification says so - - veor q1,q1,q1 - adr r3,.LK_00_19 - vld1.32 {q0},[r0]! - vld1.32 {d2[0]},[r0] - sub r0,r0,#16 - vld1.32 {d16[],d17[]},[r3,:32]! - vld1.32 {d18[],d19[]},[r3,:32]! - vld1.32 {d20[],d21[]},[r3,:32]! - vld1.32 {d22[],d23[]},[r3,:32] - -.Loop_v8: - vld1.8 {q4-q5},[r1]! - vld1.8 {q6-q7},[r1]! - vrev32.8 q4,q4 - vrev32.8 q5,q5 - - vadd.i32 q12,q8,q4 - vrev32.8 q6,q6 - vmov q14,q0 @ offload - subs r2,r2,#1 - - vadd.i32 q13,q8,q5 - vrev32.8 q7,q7 - .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 0 - .byte 0x68,0x0c,0x02,0xf2 @ sha1c q0,q1,q12 - vadd.i32 q12,q8,q6 - .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6 - .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 1 - .byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13 - vadd.i32 q13,q8,q7 - .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7 - .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7 - .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 2 - .byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12 - vadd.i32 q12,q8,q4 - .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4 - .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4 - .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 3 - .byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13 - vadd.i32 q13,q9,q5 - .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5 - .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5 - .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 4 - .byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12 - vadd.i32 q12,q9,q6 - .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6 - .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6 - .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 5 - .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13 - vadd.i32 q13,q9,q7 - .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7 - .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7 - .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 6 - .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12 - vadd.i32 q12,q9,q4 - .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4 - .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4 - .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 7 - .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13 - vadd.i32 q13,q9,q5 - .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5 - .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5 - .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 8 - .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12 - vadd.i32 q12,q10,q6 - .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6 - .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6 - .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 9 - .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13 - vadd.i32 q13,q10,q7 - .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7 - .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7 - .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 10 - .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12 - vadd.i32 q12,q10,q4 - .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4 - .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4 - .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 11 - .byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13 - vadd.i32 q13,q10,q5 - .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5 - .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5 - .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 12 - .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12 - vadd.i32 q12,q10,q6 - .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6 - .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6 - .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 13 - .byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13 - vadd.i32 q13,q11,q7 - .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7 - .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7 - .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 14 - .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12 - vadd.i32 q12,q11,q4 - .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4 - .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4 - .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 15 - .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13 - vadd.i32 q13,q11,q5 - .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5 - .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5 - .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 16 - .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12 - vadd.i32 q12,q11,q6 - .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6 - .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 17 - .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13 - vadd.i32 q13,q11,q7 - - .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 18 - .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12 - - .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 19 - .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13 - - vadd.i32 q1,q1,q2 - vadd.i32 q0,q0,q14 - bne .Loop_v8 - - vst1.32 {q0},[r0]! - vst1.32 {d2[0]},[r0] - - vldmia sp!,{d8-d15} - bx lr @ bx lr -.size sha1_block_data_order_armv8,.-sha1_block_data_order_armv8 -#endif -.comm OPENSSL_armcap_P,4,4 +.size sha1_block_data_order,.-sha1_block_data_order +.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by " +.align 2 diff --git a/app/openssl/crypto/sha/asm/sha1-armv8.S b/app/openssl/crypto/sha/asm/sha1-armv8.S deleted file mode 100644 index f9d12625..00000000 --- a/app/openssl/crypto/sha/asm/sha1-armv8.S +++ /dev/null @@ -1,1211 +0,0 @@ -#include "arm_arch.h" - -.text - -.globl sha1_block_data_order -.type sha1_block_data_order,%function -.align 6 -sha1_block_data_order: - ldr x16,.LOPENSSL_armcap_P - adr x17,.LOPENSSL_armcap_P - add x16,x16,x17 - ldr w16,[x16] - tst w16,#ARMV8_SHA1 - b.ne .Lv8_entry - - stp x29,x30,[sp,#-96]! - add x29,sp,#0 - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - - ldp w20,w21,[x0] - ldp w22,w23,[x0,#8] - ldr w24,[x0,#16] - -.Loop: - ldr x3,[x1],#64 - movz w28,#0x7999 - sub x2,x2,#1 - movk w28,#0x5a82,lsl#16 -#ifdef __ARMEB__ - ror x3,x3,#32 -#else - rev32 x3,x3 -#endif - add w24,w24,w28 // warm it up - add w24,w24,w3 - lsr x4,x3,#32 - ldr x5,[x1,#-56] - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - add w23,w23,w4 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x5,x5,#32 -#else - rev32 x5,x5 -#endif - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - add w22,w22,w5 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - lsr x6,x5,#32 - ldr x7,[x1,#-48] - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - add w21,w21,w6 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x7,x7,#32 -#else - rev32 x7,x7 -#endif - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w7 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - lsr x8,x7,#32 - ldr x9,[x1,#-40] - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - add w24,w24,w8 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x9,x9,#32 -#else - rev32 x9,x9 -#endif - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - add w23,w23,w9 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - lsr x10,x9,#32 - ldr x11,[x1,#-32] - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - add w22,w22,w10 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x11,x11,#32 -#else - rev32 x11,x11 -#endif - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - add w21,w21,w11 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - lsr x12,x11,#32 - ldr x13,[x1,#-24] - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w12 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x13,x13,#32 -#else - rev32 x13,x13 -#endif - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - add w24,w24,w13 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - lsr x14,x13,#32 - ldr x15,[x1,#-16] - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - add w23,w23,w14 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x15,x15,#32 -#else - rev32 x15,x15 -#endif - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - add w22,w22,w15 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - lsr x16,x15,#32 - ldr x17,[x1,#-8] - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - add w21,w21,w16 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) -#ifdef __ARMEB__ - ror x17,x17,#32 -#else - rev32 x17,x17 -#endif - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w17 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - lsr x19,x17,#32 - eor w3,w3,w5 - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - eor w3,w3,w11 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - eor w3,w3,w16 - ror w22,w22,#2 - add w24,w24,w19 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w3,w3,#31 - eor w4,w4,w6 - bic w25,w23,w21 - and w26,w22,w21 - ror w27,w20,#27 - eor w4,w4,w12 - add w23,w23,w28 // future e+=K - orr w25,w25,w26 - add w24,w24,w27 // e+=rot(a,5) - eor w4,w4,w17 - ror w21,w21,#2 - add w23,w23,w3 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w4,w4,#31 - eor w5,w5,w7 - bic w25,w22,w20 - and w26,w21,w20 - ror w27,w24,#27 - eor w5,w5,w13 - add w22,w22,w28 // future e+=K - orr w25,w25,w26 - add w23,w23,w27 // e+=rot(a,5) - eor w5,w5,w19 - ror w20,w20,#2 - add w22,w22,w4 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w5,w5,#31 - eor w6,w6,w8 - bic w25,w21,w24 - and w26,w20,w24 - ror w27,w23,#27 - eor w6,w6,w14 - add w21,w21,w28 // future e+=K - orr w25,w25,w26 - add w22,w22,w27 // e+=rot(a,5) - eor w6,w6,w3 - ror w24,w24,#2 - add w21,w21,w5 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w6,w6,#31 - eor w7,w7,w9 - bic w25,w20,w23 - and w26,w24,w23 - ror w27,w22,#27 - eor w7,w7,w15 - add w20,w20,w28 // future e+=K - orr w25,w25,w26 - add w21,w21,w27 // e+=rot(a,5) - eor w7,w7,w4 - ror w23,w23,#2 - add w20,w20,w6 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w7,w7,#31 - movz w28,#0xeba1 - movk w28,#0x6ed9,lsl#16 - eor w8,w8,w10 - bic w25,w24,w22 - and w26,w23,w22 - ror w27,w21,#27 - eor w8,w8,w16 - add w24,w24,w28 // future e+=K - orr w25,w25,w26 - add w20,w20,w27 // e+=rot(a,5) - eor w8,w8,w5 - ror w22,w22,#2 - add w24,w24,w7 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w8,w8,#31 - eor w9,w9,w11 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w9,w9,w17 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w9,w9,w6 - add w23,w23,w8 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w9,w9,#31 - eor w10,w10,w12 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w10,w10,w19 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w10,w10,w7 - add w22,w22,w9 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w10,w10,#31 - eor w11,w11,w13 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w11,w11,w3 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w11,w11,w8 - add w21,w21,w10 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w11,w11,#31 - eor w12,w12,w14 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w12,w12,w4 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w12,w12,w9 - add w20,w20,w11 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w12,w12,#31 - eor w13,w13,w15 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w13,w13,w5 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w13,w13,w10 - add w24,w24,w12 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w13,w13,#31 - eor w14,w14,w16 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w14,w14,w6 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w14,w14,w11 - add w23,w23,w13 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w14,w14,#31 - eor w15,w15,w17 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w15,w15,w7 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w15,w15,w12 - add w22,w22,w14 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w15,w15,#31 - eor w16,w16,w19 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w16,w16,w8 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w16,w16,w13 - add w21,w21,w15 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w16,w16,#31 - eor w17,w17,w3 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w17,w17,w9 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w17,w17,w14 - add w20,w20,w16 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w17,w17,#31 - eor w19,w19,w4 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w19,w19,w10 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w19,w19,w15 - add w24,w24,w17 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w19,w19,#31 - eor w3,w3,w5 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w3,w3,w11 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w3,w3,w16 - add w23,w23,w19 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w3,w3,#31 - eor w4,w4,w6 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w4,w4,w12 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w4,w4,w17 - add w22,w22,w3 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w4,w4,#31 - eor w5,w5,w7 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w5,w5,w13 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w5,w5,w19 - add w21,w21,w4 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w5,w5,#31 - eor w6,w6,w8 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w6,w6,w14 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w6,w6,w3 - add w20,w20,w5 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w6,w6,#31 - eor w7,w7,w9 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w7,w7,w15 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w7,w7,w4 - add w24,w24,w6 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w7,w7,#31 - eor w8,w8,w10 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w8,w8,w16 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w8,w8,w5 - add w23,w23,w7 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w8,w8,#31 - eor w9,w9,w11 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w9,w9,w17 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w9,w9,w6 - add w22,w22,w8 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w9,w9,#31 - eor w10,w10,w12 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w10,w10,w19 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w10,w10,w7 - add w21,w21,w9 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w10,w10,#31 - eor w11,w11,w13 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w11,w11,w3 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w11,w11,w8 - add w20,w20,w10 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w11,w11,#31 - movz w28,#0xbcdc - movk w28,#0x8f1b,lsl#16 - eor w12,w12,w14 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w12,w12,w4 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w12,w12,w9 - add w24,w24,w11 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w12,w12,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w13,w13,w15 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w13,w13,w5 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w13,w13,w10 - add w23,w23,w12 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w13,w13,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w14,w14,w16 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w14,w14,w6 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w14,w14,w11 - add w22,w22,w13 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w14,w14,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w15,w15,w17 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w15,w15,w7 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w15,w15,w12 - add w21,w21,w14 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w15,w15,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w16,w16,w19 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w16,w16,w8 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w16,w16,w13 - add w20,w20,w15 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w16,w16,#31 - orr w25,w22,w23 - and w26,w22,w23 - eor w17,w17,w3 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w17,w17,w9 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w17,w17,w14 - add w24,w24,w16 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w17,w17,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w19,w19,w4 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w19,w19,w10 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w19,w19,w15 - add w23,w23,w17 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w19,w19,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w3,w3,w5 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w3,w3,w11 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w3,w3,w16 - add w22,w22,w19 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w3,w3,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w4,w4,w6 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w4,w4,w12 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w4,w4,w17 - add w21,w21,w3 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w4,w4,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w5,w5,w7 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w5,w5,w13 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w5,w5,w19 - add w20,w20,w4 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w5,w5,#31 - orr w25,w22,w23 - and w26,w22,w23 - eor w6,w6,w8 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w6,w6,w14 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w6,w6,w3 - add w24,w24,w5 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w6,w6,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w7,w7,w9 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w7,w7,w15 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w7,w7,w4 - add w23,w23,w6 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w7,w7,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w8,w8,w10 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w8,w8,w16 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w8,w8,w5 - add w22,w22,w7 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w8,w8,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w9,w9,w11 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w9,w9,w17 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w9,w9,w6 - add w21,w21,w8 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w9,w9,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w10,w10,w12 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w10,w10,w19 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w10,w10,w7 - add w20,w20,w9 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w10,w10,#31 - orr w25,w22,w23 - and w26,w22,w23 - eor w11,w11,w13 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w11,w11,w3 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w11,w11,w8 - add w24,w24,w10 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w11,w11,#31 - orr w25,w21,w22 - and w26,w21,w22 - eor w12,w12,w14 - ror w27,w20,#27 - and w25,w25,w23 - add w23,w23,w28 // future e+=K - eor w12,w12,w4 - add w24,w24,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w21,w21,#2 - eor w12,w12,w9 - add w23,w23,w11 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w12,w12,#31 - orr w25,w20,w21 - and w26,w20,w21 - eor w13,w13,w15 - ror w27,w24,#27 - and w25,w25,w22 - add w22,w22,w28 // future e+=K - eor w13,w13,w5 - add w23,w23,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w20,w20,#2 - eor w13,w13,w10 - add w22,w22,w12 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w13,w13,#31 - orr w25,w24,w20 - and w26,w24,w20 - eor w14,w14,w16 - ror w27,w23,#27 - and w25,w25,w21 - add w21,w21,w28 // future e+=K - eor w14,w14,w6 - add w22,w22,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w24,w24,#2 - eor w14,w14,w11 - add w21,w21,w13 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w14,w14,#31 - orr w25,w23,w24 - and w26,w23,w24 - eor w15,w15,w17 - ror w27,w22,#27 - and w25,w25,w20 - add w20,w20,w28 // future e+=K - eor w15,w15,w7 - add w21,w21,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w23,w23,#2 - eor w15,w15,w12 - add w20,w20,w14 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w15,w15,#31 - movz w28,#0xc1d6 - movk w28,#0xca62,lsl#16 - orr w25,w22,w23 - and w26,w22,w23 - eor w16,w16,w19 - ror w27,w21,#27 - and w25,w25,w24 - add w24,w24,w28 // future e+=K - eor w16,w16,w8 - add w20,w20,w27 // e+=rot(a,5) - orr w25,w25,w26 - ror w22,w22,#2 - eor w16,w16,w13 - add w24,w24,w15 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w16,w16,#31 - eor w17,w17,w3 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w17,w17,w9 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w17,w17,w14 - add w23,w23,w16 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w17,w17,#31 - eor w19,w19,w4 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w19,w19,w10 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w19,w19,w15 - add w22,w22,w17 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w19,w19,#31 - eor w3,w3,w5 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w3,w3,w11 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w3,w3,w16 - add w21,w21,w19 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w3,w3,#31 - eor w4,w4,w6 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w4,w4,w12 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w4,w4,w17 - add w20,w20,w3 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w4,w4,#31 - eor w5,w5,w7 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w5,w5,w13 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w5,w5,w19 - add w24,w24,w4 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w5,w5,#31 - eor w6,w6,w8 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w6,w6,w14 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w6,w6,w3 - add w23,w23,w5 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w6,w6,#31 - eor w7,w7,w9 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w7,w7,w15 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w7,w7,w4 - add w22,w22,w6 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w7,w7,#31 - eor w8,w8,w10 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w8,w8,w16 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w8,w8,w5 - add w21,w21,w7 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w8,w8,#31 - eor w9,w9,w11 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w9,w9,w17 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w9,w9,w6 - add w20,w20,w8 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w9,w9,#31 - eor w10,w10,w12 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w10,w10,w19 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w10,w10,w7 - add w24,w24,w9 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w10,w10,#31 - eor w11,w11,w13 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w11,w11,w3 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w11,w11,w8 - add w23,w23,w10 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w11,w11,#31 - eor w12,w12,w14 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w12,w12,w4 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w12,w12,w9 - add w22,w22,w11 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w12,w12,#31 - eor w13,w13,w15 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w13,w13,w5 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w13,w13,w10 - add w21,w21,w12 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w13,w13,#31 - eor w14,w14,w16 - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w14,w14,w6 - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - eor w14,w14,w11 - add w20,w20,w13 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ror w14,w14,#31 - eor w15,w15,w17 - eor w25,w24,w22 - ror w27,w21,#27 - add w24,w24,w28 // future e+=K - eor w15,w15,w7 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - eor w15,w15,w12 - add w24,w24,w14 // future e+=X[i] - add w20,w20,w25 // e+=F(b,c,d) - ror w15,w15,#31 - eor w16,w16,w19 - eor w25,w23,w21 - ror w27,w20,#27 - add w23,w23,w28 // future e+=K - eor w16,w16,w8 - eor w25,w25,w22 - add w24,w24,w27 // e+=rot(a,5) - ror w21,w21,#2 - eor w16,w16,w13 - add w23,w23,w15 // future e+=X[i] - add w24,w24,w25 // e+=F(b,c,d) - ror w16,w16,#31 - eor w17,w17,w3 - eor w25,w22,w20 - ror w27,w24,#27 - add w22,w22,w28 // future e+=K - eor w17,w17,w9 - eor w25,w25,w21 - add w23,w23,w27 // e+=rot(a,5) - ror w20,w20,#2 - eor w17,w17,w14 - add w22,w22,w16 // future e+=X[i] - add w23,w23,w25 // e+=F(b,c,d) - ror w17,w17,#31 - eor w19,w19,w4 - eor w25,w21,w24 - ror w27,w23,#27 - add w21,w21,w28 // future e+=K - eor w19,w19,w10 - eor w25,w25,w20 - add w22,w22,w27 // e+=rot(a,5) - ror w24,w24,#2 - eor w19,w19,w15 - add w21,w21,w17 // future e+=X[i] - add w22,w22,w25 // e+=F(b,c,d) - ror w19,w19,#31 - ldp w4,w5,[x0] - eor w25,w20,w23 - ror w27,w22,#27 - add w20,w20,w28 // future e+=K - eor w25,w25,w24 - add w21,w21,w27 // e+=rot(a,5) - ror w23,w23,#2 - add w20,w20,w19 // future e+=X[i] - add w21,w21,w25 // e+=F(b,c,d) - ldp w6,w7,[x0,#8] - eor w25,w24,w22 - ror w27,w21,#27 - eor w25,w25,w23 - add w20,w20,w27 // e+=rot(a,5) - ror w22,w22,#2 - ldr w8,[x0,#16] - add w20,w20,w25 // e+=F(b,c,d) - add w21,w21,w5 - add w22,w22,w6 - add w20,w20,w4 - add w23,w23,w7 - add w24,w24,w8 - stp w20,w21,[x0] - stp w22,w23,[x0,#8] - str w24,[x0,#16] - cbnz x2,.Loop - - ldp x19,x20,[sp,#16] - ldp x21,x22,[sp,#32] - ldp x23,x24,[sp,#48] - ldp x25,x26,[sp,#64] - ldp x27,x28,[sp,#80] - ldr x29,[sp],#96 - ret -.size sha1_block_data_order,.-sha1_block_data_order -.type sha1_block_armv8,%function -.align 6 -sha1_block_armv8: -.Lv8_entry: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - adr x4,.Lconst - eor v1.16b,v1.16b,v1.16b - ld1 {v0.4s},[x0],#16 - ld1 {v1.s}[0],[x0] - sub x0,x0,#16 - ld1 {v16.4s-v19.4s},[x4] - -.Loop_hw: - ld1 {v4.16b-v7.16b},[x1],#64 - sub x2,x2,#1 - rev32 v4.16b,v4.16b - rev32 v5.16b,v5.16b - - add v20.4s,v16.4s,v4.4s - rev32 v6.16b,v6.16b - orr v22.16b,v0.16b,v0.16b // offload - - add v21.4s,v16.4s,v5.4s - rev32 v7.16b,v7.16b - .inst 0x5e280803 //sha1h v3.16b,v0.16b - .inst 0x5e140020 //sha1c v0.16b,v1.16b,v20.4s // 0 - add v20.4s,v16.4s,v6.4s - .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b - .inst 0x5e280802 //sha1h v2.16b,v0.16b // 1 - .inst 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s - add v21.4s,v16.4s,v7.4s - .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b - .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b - .inst 0x5e280803 //sha1h v3.16b,v0.16b // 2 - .inst 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s - add v20.4s,v16.4s,v4.4s - .inst 0x5e281885 //sha1su1 v5.16b,v4.16b - .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b - .inst 0x5e280802 //sha1h v2.16b,v0.16b // 3 - .inst 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s - add v21.4s,v17.4s,v5.4s - .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b - .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b - .inst 0x5e280803 //sha1h v3.16b,v0.16b // 4 - .inst 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s - add v20.4s,v17.4s,v6.4s - .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b - .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b - .inst 0x5e280802 //sha1h v2.16b,v0.16b // 5 - .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v17.4s,v7.4s - .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b - .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b - .inst 0x5e280803 //sha1h v3.16b,v0.16b // 6 - .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - add v20.4s,v17.4s,v4.4s - .inst 0x5e281885 //sha1su1 v5.16b,v4.16b - .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b - .inst 0x5e280802 //sha1h v2.16b,v0.16b // 7 - .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v17.4s,v5.4s - .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b - .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b - .inst 0x5e280803 //sha1h v3.16b,v0.16b // 8 - .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - add v20.4s,v18.4s,v6.4s - .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b - .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b - .inst 0x5e280802 //sha1h v2.16b,v0.16b // 9 - .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v18.4s,v7.4s - .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b - .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b - .inst 0x5e280803 //sha1h v3.16b,v0.16b // 10 - .inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s - add v20.4s,v18.4s,v4.4s - .inst 0x5e281885 //sha1su1 v5.16b,v4.16b - .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b - .inst 0x5e280802 //sha1h v2.16b,v0.16b // 11 - .inst 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s - add v21.4s,v18.4s,v5.4s - .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b - .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b - .inst 0x5e280803 //sha1h v3.16b,v0.16b // 12 - .inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s - add v20.4s,v18.4s,v6.4s - .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b - .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b - .inst 0x5e280802 //sha1h v2.16b,v0.16b // 13 - .inst 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s - add v21.4s,v19.4s,v7.4s - .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b - .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b - .inst 0x5e280803 //sha1h v3.16b,v0.16b // 14 - .inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s - add v20.4s,v19.4s,v4.4s - .inst 0x5e281885 //sha1su1 v5.16b,v4.16b - .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b - .inst 0x5e280802 //sha1h v2.16b,v0.16b // 15 - .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v19.4s,v5.4s - .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b - .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b - .inst 0x5e280803 //sha1h v3.16b,v0.16b // 16 - .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - add v20.4s,v19.4s,v6.4s - .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b - .inst 0x5e280802 //sha1h v2.16b,v0.16b // 17 - .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - add v21.4s,v19.4s,v7.4s - - .inst 0x5e280803 //sha1h v3.16b,v0.16b // 18 - .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s - - .inst 0x5e280802 //sha1h v2.16b,v0.16b // 19 - .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s - - add v1.4s,v1.4s,v2.4s - add v0.4s,v0.4s,v22.4s - - cbnz x2,.Loop_hw - - st1 {v0.4s},[x0],#16 - st1 {v1.s}[0],[x0] - - ldr x29,[sp],#16 - ret -.size sha1_block_armv8,.-sha1_block_armv8 -.align 6 -.Lconst: -.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19 -.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39 -.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59 -.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79 -.LOPENSSL_armcap_P: -.quad OPENSSL_armcap_P-. -.asciz "SHA1 block transform for ARMv8, CRYPTOGAMS by " -.align 2 -.comm OPENSSL_armcap_P,4,4 diff --git a/app/openssl/crypto/sha/asm/sha1-armv8.pl b/app/openssl/crypto/sha/asm/sha1-armv8.pl deleted file mode 100644 index c1f552b6..00000000 --- a/app/openssl/crypto/sha/asm/sha1-armv8.pl +++ /dev/null @@ -1,333 +0,0 @@ -#!/usr/bin/env perl -# -# ==================================================================== -# Written by Andy Polyakov for the OpenSSL -# project. The module is, however, dual licensed under OpenSSL and -# CRYPTOGAMS licenses depending on where you obtain it. For further -# details see http://www.openssl.org/~appro/cryptogams/. -# ==================================================================== -# -# SHA1 for ARMv8. -# -# Performance in cycles per processed byte and improvement coefficient -# over code generated with "default" compiler: -# -# hardware-assisted software(*) -# Apple A7 2.31 4.13 (+14%) -# Cortex-A5x n/a n/a -# -# (*) Software results are presented mostly for reference purposes. - -$flavour = shift; -open STDOUT,">".shift; - -($ctx,$inp,$num)=("x0","x1","x2"); -@Xw=map("w$_",(3..17,19)); -@Xx=map("x$_",(3..17,19)); -@V=($A,$B,$C,$D,$E)=map("w$_",(20..24)); -($t0,$t1,$t2,$K)=map("w$_",(25..28)); - - -sub BODY_00_19 { -my ($i,$a,$b,$c,$d,$e)=@_; -my $j=($i+2)&15; - -$code.=<<___ if ($i<15 && !($i&1)); - lsr @Xx[$i+1],@Xx[$i],#32 -___ -$code.=<<___ if ($i<14 && !($i&1)); - ldr @Xx[$i+2],[$inp,#`($i+2)*4-64`] -___ -$code.=<<___ if ($i<14 && ($i&1)); -#ifdef __ARMEB__ - ror @Xx[$i+1],@Xx[$i+1],#32 -#else - rev32 @Xx[$i+1],@Xx[$i+1] -#endif -___ -$code.=<<___ if ($i<14); - bic $t0,$d,$b - and $t1,$c,$b - ror $t2,$a,#27 - add $d,$d,$K // future e+=K - orr $t0,$t0,$t1 - add $e,$e,$t2 // e+=rot(a,5) - ror $b,$b,#2 - add $d,$d,@Xw[($i+1)&15] // future e+=X[i] - add $e,$e,$t0 // e+=F(b,c,d) -___ -$code.=<<___ if ($i==19); - movz $K,#0xeba1 - movk $K,#0x6ed9,lsl#16 -___ -$code.=<<___ if ($i>=14); - eor @Xw[$j],@Xw[$j],@Xw[($j+2)&15] - bic $t0,$d,$b - and $t1,$c,$b - ror $t2,$a,#27 - eor @Xw[$j],@Xw[$j],@Xw[($j+8)&15] - add $d,$d,$K // future e+=K - orr $t0,$t0,$t1 - add $e,$e,$t2 // e+=rot(a,5) - eor @Xw[$j],@Xw[$j],@Xw[($j+13)&15] - ror $b,$b,#2 - add $d,$d,@Xw[($i+1)&15] // future e+=X[i] - add $e,$e,$t0 // e+=F(b,c,d) - ror @Xw[$j],@Xw[$j],#31 -___ -} - -sub BODY_40_59 { -my ($i,$a,$b,$c,$d,$e)=@_; -my $j=($i+2)&15; - -$code.=<<___ if ($i==59); - movz $K,#0xc1d6 - movk $K,#0xca62,lsl#16 -___ -$code.=<<___; - orr $t0,$b,$c - and $t1,$b,$c - eor @Xw[$j],@Xw[$j],@Xw[($j+2)&15] - ror $t2,$a,#27 - and $t0,$t0,$d - add $d,$d,$K // future e+=K - eor @Xw[$j],@Xw[$j],@Xw[($j+8)&15] - add $e,$e,$t2 // e+=rot(a,5) - orr $t0,$t0,$t1 - ror $b,$b,#2 - eor @Xw[$j],@Xw[$j],@Xw[($j+13)&15] - add $d,$d,@Xw[($i+1)&15] // future e+=X[i] - add $e,$e,$t0 // e+=F(b,c,d) - ror @Xw[$j],@Xw[$j],#31 -___ -} - -sub BODY_20_39 { -my ($i,$a,$b,$c,$d,$e)=@_; -my $j=($i+2)&15; - -$code.=<<___ if ($i==39); - movz $K,#0xbcdc - movk $K,#0x8f1b,lsl#16 -___ -$code.=<<___ if ($i<78); - eor @Xw[$j],@Xw[$j],@Xw[($j+2)&15] - eor $t0,$d,$b - ror $t2,$a,#27 - add $d,$d,$K // future e+=K - eor @Xw[$j],@Xw[$j],@Xw[($j+8)&15] - eor $t0,$t0,$c - add $e,$e,$t2 // e+=rot(a,5) - ror $b,$b,#2 - eor @Xw[$j],@Xw[$j],@Xw[($j+13)&15] - add $d,$d,@Xw[($i+1)&15] // future e+=X[i] - add $e,$e,$t0 // e+=F(b,c,d) - ror @Xw[$j],@Xw[$j],#31 -___ -$code.=<<___ if ($i==78); - ldp @Xw[1],@Xw[2],[$ctx] - eor $t0,$d,$b - ror $t2,$a,#27 - add $d,$d,$K // future e+=K - eor $t0,$t0,$c - add $e,$e,$t2 // e+=rot(a,5) - ror $b,$b,#2 - add $d,$d,@Xw[($i+1)&15] // future e+=X[i] - add $e,$e,$t0 // e+=F(b,c,d) -___ -$code.=<<___ if ($i==79); - ldp @Xw[3],@Xw[4],[$ctx,#8] - eor $t0,$d,$b - ror $t2,$a,#27 - eor $t0,$t0,$c - add $e,$e,$t2 // e+=rot(a,5) - ror $b,$b,#2 - ldr @Xw[5],[$ctx,#16] - add $e,$e,$t0 // e+=F(b,c,d) -___ -} - -$code.=<<___; -#include "arm_arch.h" - -.text - -.globl sha1_block_data_order -.type sha1_block_data_order,%function -.align 6 -sha1_block_data_order: - ldr x16,.LOPENSSL_armcap_P - adr x17,.LOPENSSL_armcap_P - add x16,x16,x17 - ldr w16,[x16] - tst w16,#ARMV8_SHA1 - b.ne .Lv8_entry - - stp x29,x30,[sp,#-96]! - add x29,sp,#0 - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - - ldp $A,$B,[$ctx] - ldp $C,$D,[$ctx,#8] - ldr $E,[$ctx,#16] - -.Loop: - ldr @Xx[0],[$inp],#64 - movz $K,#0x7999 - sub $num,$num,#1 - movk $K,#0x5a82,lsl#16 -#ifdef __ARMEB__ - ror $Xx[0],@Xx[0],#32 -#else - rev32 @Xx[0],@Xx[0] -#endif - add $E,$E,$K // warm it up - add $E,$E,@Xw[0] -___ -for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); } -for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } -for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } -for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } -$code.=<<___; - add $B,$B,@Xw[2] - add $C,$C,@Xw[3] - add $A,$A,@Xw[1] - add $D,$D,@Xw[4] - add $E,$E,@Xw[5] - stp $A,$B,[$ctx] - stp $C,$D,[$ctx,#8] - str $E,[$ctx,#16] - cbnz $num,.Loop - - ldp x19,x20,[sp,#16] - ldp x21,x22,[sp,#32] - ldp x23,x24,[sp,#48] - ldp x25,x26,[sp,#64] - ldp x27,x28,[sp,#80] - ldr x29,[sp],#96 - ret -.size sha1_block_data_order,.-sha1_block_data_order -___ -{{{ -my ($ABCD,$E,$E0,$E1)=map("v$_.16b",(0..3)); -my @MSG=map("v$_.16b",(4..7)); -my @Kxx=map("v$_.4s",(16..19)); -my ($W0,$W1)=("v20.4s","v21.4s"); -my $ABCD_SAVE="v22.16b"; - -$code.=<<___; -.type sha1_block_armv8,%function -.align 6 -sha1_block_armv8: -.Lv8_entry: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - adr x4,.Lconst - eor $E,$E,$E - ld1.32 {$ABCD},[$ctx],#16 - ld1.32 {$E}[0],[$ctx] - sub $ctx,$ctx,#16 - ld1.32 {@Kxx[0]-@Kxx[3]},[x4] - -.Loop_hw: - ld1 {@MSG[0]-@MSG[3]},[$inp],#64 - sub $num,$num,#1 - rev32 @MSG[0],@MSG[0] - rev32 @MSG[1],@MSG[1] - - add.i32 $W0,@Kxx[0],@MSG[0] - rev32 @MSG[2],@MSG[2] - orr $ABCD_SAVE,$ABCD,$ABCD // offload - - add.i32 $W1,@Kxx[0],@MSG[1] - rev32 @MSG[3],@MSG[3] - sha1h $E1,$ABCD - sha1c $ABCD,$E,$W0 // 0 - add.i32 $W0,@Kxx[$j],@MSG[2] - sha1su0 @MSG[0],@MSG[1],@MSG[2] -___ -for ($j=0,$i=1;$i<20-3;$i++) { -my $f=("c","p","m","p")[$i/5]; -$code.=<<___; - sha1h $E0,$ABCD // $i - sha1$f $ABCD,$E1,$W1 - add.i32 $W1,@Kxx[$j],@MSG[3] - sha1su1 @MSG[0],@MSG[3] -___ -$code.=<<___ if ($i<20-4); - sha1su0 @MSG[1],@MSG[2],@MSG[3] -___ - ($E0,$E1)=($E1,$E0); ($W0,$W1)=($W1,$W0); - push(@MSG,shift(@MSG)); $j++ if ((($i+3)%5)==0); -} -$code.=<<___; - sha1h $E0,$ABCD // $i - sha1p $ABCD,$E1,$W1 - add.i32 $W1,@Kxx[$j],@MSG[3] - - sha1h $E1,$ABCD // 18 - sha1p $ABCD,$E0,$W0 - - sha1h $E0,$ABCD // 19 - sha1p $ABCD,$E1,$W1 - - add.i32 $E,$E,$E0 - add.i32 $ABCD,$ABCD,$ABCD_SAVE - - cbnz $num,.Loop_hw - - st1.32 {$ABCD},[$ctx],#16 - st1.32 {$E}[0],[$ctx] - - ldr x29,[sp],#16 - ret -.size sha1_block_armv8,.-sha1_block_armv8 -.align 6 -.Lconst: -.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19 -.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39 -.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59 -.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79 -.LOPENSSL_armcap_P: -.quad OPENSSL_armcap_P-. -.asciz "SHA1 block transform for ARMv8, CRYPTOGAMS by " -.align 2 -.comm OPENSSL_armcap_P,4,4 -___ -}}} - -{ my %opcode = ( - "sha1c" => 0x5e000000, "sha1p" => 0x5e001000, - "sha1m" => 0x5e002000, "sha1su0" => 0x5e003000, - "sha1h" => 0x5e280800, "sha1su1" => 0x5e281800 ); - - sub unsha1 { - my ($mnemonic,$arg)=@_; - - $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o - && - sprintf ".inst\t0x%08x\t//%s %s", - $opcode{$mnemonic}|$1|($2<<5)|($3<<16), - $mnemonic,$arg; - } -} - -foreach(split("\n",$code)) { - - s/\`([^\`]*)\`/eval($1)/geo; - - s/\b(sha1\w+)\s+([qv].*)/unsha1($1,$2)/geo; - - s/\.\w?32\b//o and s/\.16b/\.4s/go; - m/(ld|st)1[^\[]+\[0\]/o and s/\.4s/\.s/go; - - print $_,"\n"; -} - -close STDOUT; diff --git a/app/openssl/crypto/sha/asm/sha256-armv4.pl b/app/openssl/crypto/sha/asm/sha256-armv4.pl index 505ca8f3..9c84e8d9 100644 --- a/app/openssl/crypto/sha/asm/sha256-armv4.pl +++ b/app/openssl/crypto/sha/asm/sha256-armv4.pl @@ -1,7 +1,7 @@ #!/usr/bin/env perl # ==================================================================== -# Written by Andy Polyakov for the OpenSSL +# Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. @@ -21,27 +21,15 @@ # February 2011. # # Profiler-assisted and platform-specific optimization resulted in 16% -# improvement on Cortex A8 core and ~15.4 cycles per processed byte. - -# September 2013. -# -# Add NEON implementation. On Cortex A8 it was measured to process one -# byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon -# S4 does it in 12.5 cycles too, but it's 50% faster than integer-only -# code (meaning that latter performs sub-optimally, nothing was done -# about it). - -# May 2014. -# -# Add ARMv8 code path performing at 2.0 cpb on Apple A7. +# improvement on Cortex A8 core and ~17 cycles per processed byte. while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} open STDOUT,">$output"; $ctx="r0"; $t0="r0"; -$inp="r1"; $t4="r1"; +$inp="r1"; $t3="r1"; $len="r2"; $t1="r2"; -$T1="r3"; $t3="r3"; +$T1="r3"; $A="r4"; $B="r5"; $C="r6"; @@ -64,88 +52,71 @@ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; $code.=<<___ if ($i<16); #if __ARM_ARCH__>=7 - @ ldr $t1,[$inp],#4 @ $i -# if $i==15 - str $inp,[sp,#17*4] @ make room for $t4 -# endif - eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` - add $a,$a,$t2 @ h+=Maj(a,b,c) from the past - eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e) - rev $t1,$t1 + ldr $T1,[$inp],#4 #else - @ ldrb $t1,[$inp,#3] @ $i - add $a,$a,$t2 @ h+=Maj(a,b,c) from the past + ldrb $T1,[$inp,#3] @ $i ldrb $t2,[$inp,#2] - ldrb $t0,[$inp,#1] - orr $t1,$t1,$t2,lsl#8 - ldrb $t2,[$inp],#4 - orr $t1,$t1,$t0,lsl#16 -# if $i==15 - str $inp,[sp,#17*4] @ make room for $t4 -# endif - eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` - orr $t1,$t1,$t2,lsl#24 - eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e) + ldrb $t1,[$inp,#1] + ldrb $t0,[$inp],#4 + orr $T1,$T1,$t2,lsl#8 + orr $T1,$T1,$t1,lsl#16 + orr $T1,$T1,$t0,lsl#24 #endif ___ $code.=<<___; + mov $t0,$e,ror#$Sigma1[0] ldr $t2,[$Ktbl],#4 @ *K256++ - add $h,$h,$t1 @ h+=X[i] - str $t1,[sp,#`$i%16`*4] + eor $t0,$t0,$e,ror#$Sigma1[1] eor $t1,$f,$g - add $h,$h,$t0,ror#$Sigma1[0] @ h+=Sigma1(e) +#if $i>=16 + add $T1,$T1,$t3 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev $T1,$T1 +#endif +#if $i==15 + str $inp,[sp,#17*4] @ leave room for $t3 +#endif + eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e) and $t1,$t1,$e - add $h,$h,$t2 @ h+=K256[i] + str $T1,[sp,#`$i%16`*4] + add $T1,$T1,$t0 eor $t1,$t1,$g @ Ch(e,f,g) - eor $t0,$a,$a,ror#`$Sigma0[1]-$Sigma0[0]` - add $h,$h,$t1 @ h+=Ch(e,f,g) -#if $i==31 - and $t2,$t2,#0xff - cmp $t2,#0xf2 @ done? + add $T1,$T1,$h + mov $h,$a,ror#$Sigma0[0] + add $T1,$T1,$t1 + eor $h,$h,$a,ror#$Sigma0[1] + add $T1,$T1,$t2 + eor $h,$h,$a,ror#$Sigma0[2] @ Sigma0(a) +#if $i>=15 + ldr $t3,[sp,#`($i+2)%16`*4] @ from BODY_16_xx #endif -#if $i<15 -# if __ARM_ARCH__>=7 - ldr $t1,[$inp],#4 @ prefetch -# else - ldrb $t1,[$inp,#3] -# endif - eor $t2,$a,$b @ a^b, b^c in next round -#else - ldr $t1,[sp,#`($i+2)%16`*4] @ from future BODY_16_xx - eor $t2,$a,$b @ a^b, b^c in next round - ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx -#endif - eor $t0,$t0,$a,ror#`$Sigma0[2]-$Sigma0[0]` @ Sigma0(a) - and $t3,$t3,$t2 @ (b^c)&=(a^b) - add $d,$d,$h @ d+=h - eor $t3,$t3,$b @ Maj(a,b,c) - add $h,$h,$t0,ror#$Sigma0[0] @ h+=Sigma0(a) - @ add $h,$h,$t3 @ h+=Maj(a,b,c) + orr $t0,$a,$b + and $t1,$a,$b + and $t0,$t0,$c + add $h,$h,$T1 + orr $t0,$t0,$t1 @ Maj(a,b,c) + add $d,$d,$T1 + add $h,$h,$t0 ___ - ($t2,$t3)=($t3,$t2); } sub BODY_16_XX { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; $code.=<<___; - @ ldr $t1,[sp,#`($i+1)%16`*4] @ $i - @ ldr $t4,[sp,#`($i+14)%16`*4] - mov $t0,$t1,ror#$sigma0[0] - add $a,$a,$t2 @ h+=Maj(a,b,c) from the past - mov $t2,$t4,ror#$sigma1[0] - eor $t0,$t0,$t1,ror#$sigma0[1] - eor $t2,$t2,$t4,ror#$sigma1[1] - eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1]) - ldr $t1,[sp,#`($i+0)%16`*4] - eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14]) - ldr $t4,[sp,#`($i+9)%16`*4] - - add $t2,$t2,$t0 - eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` @ from BODY_00_15 - add $t1,$t1,$t2 - eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e) - add $t1,$t1,$t4 @ X[i] + @ ldr $t3,[sp,#`($i+1)%16`*4] @ $i + ldr $t2,[sp,#`($i+14)%16`*4] + mov $t0,$t3,ror#$sigma0[0] + ldr $T1,[sp,#`($i+0)%16`*4] + eor $t0,$t0,$t3,ror#$sigma0[1] + ldr $t1,[sp,#`($i+9)%16`*4] + eor $t0,$t0,$t3,lsr#$sigma0[2] @ sigma0(X[i+1]) + mov $t3,$t2,ror#$sigma1[0] + add $T1,$T1,$t0 + eor $t3,$t3,$t2,ror#$sigma1[1] + add $T1,$T1,$t1 + eor $t3,$t3,$t2,lsr#$sigma1[2] @ sigma1(X[i+14]) + @ add $T1,$T1,$t3 ___ &BODY_00_15(@_); } @@ -176,64 +147,46 @@ K256: .word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .size K256,.-K256 -.word 0 @ terminator -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-sha256_block_data_order -.align 5 .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: sub r3,pc,#8 @ sha256_block_data_order add $len,$inp,$len,lsl#6 @ len to point at the end of inp -#if __ARM_ARCH__>=7 - ldr r12,.LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P - tst r12,#ARMV8_SHA256 - bne .LARMv8 - tst r12,#ARMV7_NEON - bne .LNEON -#endif stmdb sp!,{$ctx,$inp,$len,r4-r11,lr} ldmia $ctx,{$A,$B,$C,$D,$E,$F,$G,$H} - sub $Ktbl,r3,#256+32 @ K256 + sub $Ktbl,r3,#256 @ K256 sub sp,sp,#16*4 @ alloca(X[16]) .Loop: -# if __ARM_ARCH__>=7 - ldr $t1,[$inp],#4 -# else - ldrb $t1,[$inp,#3] -# endif - eor $t3,$B,$C @ magic - eor $t2,$t2,$t2 ___ for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); } $code.=".Lrounds_16_xx:\n"; for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); } $code.=<<___; - ldreq $t3,[sp,#16*4] @ pull ctx + and $t2,$t2,#0xff + cmp $t2,#0xf2 bne .Lrounds_16_xx - add $A,$A,$t2 @ h+=Maj(a,b,c) from the past - ldr $t0,[$t3,#0] - ldr $t1,[$t3,#4] - ldr $t2,[$t3,#8] + ldr $T1,[sp,#16*4] @ pull ctx + ldr $t0,[$T1,#0] + ldr $t1,[$T1,#4] + ldr $t2,[$T1,#8] add $A,$A,$t0 - ldr $t0,[$t3,#12] + ldr $t0,[$T1,#12] add $B,$B,$t1 - ldr $t1,[$t3,#16] + ldr $t1,[$T1,#16] add $C,$C,$t2 - ldr $t2,[$t3,#20] + ldr $t2,[$T1,#20] add $D,$D,$t0 - ldr $t0,[$t3,#24] + ldr $t0,[$T1,#24] add $E,$E,$t1 - ldr $t1,[$t3,#28] + ldr $t1,[$T1,#28] add $F,$F,$t2 ldr $inp,[sp,#17*4] @ pull inp ldr $t2,[sp,#18*4] @ pull inp+len add $G,$G,$t0 add $H,$H,$t1 - stmia $t3,{$A,$B,$C,$D,$E,$F,$G,$H} + stmia $T1,{$A,$B,$C,$D,$E,$F,$G,$H} cmp $inp,$t2 sub $Ktbl,$Ktbl,#256 @ rewind Ktbl bne .Loop @@ -247,410 +200,12 @@ $code.=<<___; moveq pc,lr @ be binary compatible with V4, yet bx lr @ interoperable with Thumb ISA:-) #endif -.size sha256_block_data_order,.-sha256_block_data_order -___ -###################################################################### -# NEON stuff -# -{{{ -my @X=map("q$_",(0..3)); -my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25"); -my $Xfer=$t4; -my $j=0; - -sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; } -sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; } - -sub AUTOLOAD() # thunk [simplified] x86-style perlasm -{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./; - my $arg = pop; - $arg = "#$arg" if ($arg*1 eq $arg); - $code .= "\t$opcode\t".join(',',@_,$arg)."\n"; -} - -sub Xupdate() -{ use integer; - my $body = shift; - my @insns = (&$body,&$body,&$body,&$body); - my ($a,$b,$c,$d,$e,$f,$g,$h); - - &vext_8 ($T0,@X[0],@X[1],4); # X[1..4] - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vext_8 ($T1,@X[2],@X[3],4); # X[9..12] - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T2,$T0,$sigma0[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += X[9..12] - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T1,$T0,$sigma0[2]); - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T2,$T0,32-$sigma0[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T3,$T0,$sigma0[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T1,$T1,$T2); - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T3,$T0,32-$sigma0[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T1,$T1,$T3); # sigma0(X[1..4]) - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T5,&Dhi(@X[3]),$sigma1[2]); - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4]) - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T5,$T5,$T4); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T5,$T5,$T4); # sigma1(X[14..15]) - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (&Dlo(@X[0]),&Dlo(@X[0]),$T5);# X[0..1] += sigma1(X[14..15]) - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[0]); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T5,&Dlo(@X[0]),$sigma1[2]); - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T5,$T5,$T4); - eval(shift(@insns)); - eval(shift(@insns)); - &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &vld1_32 ("{$T0}","[$Ktbl,:128]!"); - eval(shift(@insns)); - eval(shift(@insns)); - &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[1]); - eval(shift(@insns)); - eval(shift(@insns)); - &veor ($T5,$T5,$T4); # sigma1(X[16..17]) - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 (&Dhi(@X[0]),&Dhi(@X[0]),$T5);# X[2..3] += sigma1(X[16..17]) - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 ($T0,$T0,@X[0]); - while($#insns>=2) { eval(shift(@insns)); } - &vst1_32 ("{$T0}","[$Xfer,:128]!"); - eval(shift(@insns)); - eval(shift(@insns)); - - push(@X,shift(@X)); # "rotate" X[] -} - -sub Xpreload() -{ use integer; - my $body = shift; - my @insns = (&$body,&$body,&$body,&$body); - my ($a,$b,$c,$d,$e,$f,$g,$h); - - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vld1_32 ("{$T0}","[$Ktbl,:128]!"); - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vrev32_8 (@X[0],@X[0]); - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - eval(shift(@insns)); - &vadd_i32 ($T0,$T0,@X[0]); - foreach (@insns) { eval; } # remaining instructions - &vst1_32 ("{$T0}","[$Xfer,:128]!"); - - push(@X,shift(@X)); # "rotate" X[] -} - -sub body_00_15 () { - ( - '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'. - '&add ($h,$h,$t1)', # h+=X[i]+K[i] - '&eor ($t1,$f,$g)', - '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))', - '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past - '&and ($t1,$t1,$e)', - '&eor ($t2,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e) - '&eor ($t0,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))', - '&eor ($t1,$t1,$g)', # Ch(e,f,g) - '&add ($h,$h,$t2,"ror#$Sigma1[0]")', # h+=Sigma1(e) - '&eor ($t2,$a,$b)', # a^b, b^c in next round - '&eor ($t0,$t0,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a) - '&add ($h,$h,$t1)', # h+=Ch(e,f,g) - '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'. - '&ldr ($t1,"[$Ktbl]") if ($j==15);'. - '&ldr ($t1,"[sp,#64]") if ($j==31)', - '&and ($t3,$t3,$t2)', # (b^c)&=(a^b) - '&add ($d,$d,$h)', # d+=h - '&add ($h,$h,$t0,"ror#$Sigma0[0]");'. # h+=Sigma0(a) - '&eor ($t3,$t3,$b)', # Maj(a,b,c) - '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);' - ) -} - -$code.=<<___; -#if __ARM_ARCH__>=7 -.fpu neon - -.type sha256_block_data_order_neon,%function -.align 4 -sha256_block_data_order_neon: -.LNEON: - stmdb sp!,{r4-r12,lr} - - mov $t2,sp - sub sp,sp,#16*4+16 @ alloca - sub $Ktbl,r3,#256+32 @ K256 - bic sp,sp,#15 @ align for 128-bit stores - - vld1.8 {@X[0]},[$inp]! - vld1.8 {@X[1]},[$inp]! - vld1.8 {@X[2]},[$inp]! - vld1.8 {@X[3]},[$inp]! - vld1.32 {$T0},[$Ktbl,:128]! - vld1.32 {$T1},[$Ktbl,:128]! - vld1.32 {$T2},[$Ktbl,:128]! - vld1.32 {$T3},[$Ktbl,:128]! - vrev32.8 @X[0],@X[0] @ yes, even on - str $ctx,[sp,#64] - vrev32.8 @X[1],@X[1] @ big-endian - str $inp,[sp,#68] - mov $Xfer,sp - vrev32.8 @X[2],@X[2] - str $len,[sp,#72] - vrev32.8 @X[3],@X[3] - str $t2,[sp,#76] @ save original sp - vadd.i32 $T0,$T0,@X[0] - vadd.i32 $T1,$T1,@X[1] - vst1.32 {$T0},[$Xfer,:128]! - vadd.i32 $T2,$T2,@X[2] - vst1.32 {$T1},[$Xfer,:128]! - vadd.i32 $T3,$T3,@X[3] - vst1.32 {$T2},[$Xfer,:128]! - vst1.32 {$T3},[$Xfer,:128]! - - ldmia $ctx,{$A-$H} - sub $Xfer,$Xfer,#64 - ldr $t1,[sp,#0] - eor $t2,$t2,$t2 - eor $t3,$B,$C - b .L_00_48 - -.align 4 -.L_00_48: -___ - &Xupdate(\&body_00_15); - &Xupdate(\&body_00_15); - &Xupdate(\&body_00_15); - &Xupdate(\&body_00_15); -$code.=<<___; - teq $t1,#0 @ check for K256 terminator - ldr $t1,[sp,#0] - sub $Xfer,$Xfer,#64 - bne .L_00_48 - - ldr $inp,[sp,#68] - ldr $t0,[sp,#72] - sub $Ktbl,$Ktbl,#256 @ rewind $Ktbl - teq $inp,$t0 - subeq $inp,$inp,#64 @ avoid SEGV - vld1.8 {@X[0]},[$inp]! @ load next input block - vld1.8 {@X[1]},[$inp]! - vld1.8 {@X[2]},[$inp]! - vld1.8 {@X[3]},[$inp]! - strne $inp,[sp,#68] - mov $Xfer,sp -___ - &Xpreload(\&body_00_15); - &Xpreload(\&body_00_15); - &Xpreload(\&body_00_15); - &Xpreload(\&body_00_15); -$code.=<<___; - ldr $t0,[$t1,#0] - add $A,$A,$t2 @ h+=Maj(a,b,c) from the past - ldr $t2,[$t1,#4] - ldr $t3,[$t1,#8] - ldr $t4,[$t1,#12] - add $A,$A,$t0 @ accumulate - ldr $t0,[$t1,#16] - add $B,$B,$t2 - ldr $t2,[$t1,#20] - add $C,$C,$t3 - ldr $t3,[$t1,#24] - add $D,$D,$t4 - ldr $t4,[$t1,#28] - add $E,$E,$t0 - str $A,[$t1],#4 - add $F,$F,$t2 - str $B,[$t1],#4 - add $G,$G,$t3 - str $C,[$t1],#4 - add $H,$H,$t4 - str $D,[$t1],#4 - stmia $t1,{$E-$H} - - movne $Xfer,sp - ldrne $t1,[sp,#0] - eorne $t2,$t2,$t2 - ldreq sp,[sp,#76] @ restore original sp - eorne $t3,$B,$C - bne .L_00_48 - - ldmia sp!,{r4-r12,pc} -.size sha256_block_data_order_neon,.-sha256_block_data_order_neon -#endif -___ -}}} -###################################################################### -# ARMv8 stuff -# -{{{ -my ($ABCD,$EFGH,$abcd)=map("q$_",(0..2)); -my @MSG=map("q$_",(8..11)); -my ($W0,$W1,$ABCD_SAVE,$EFGH_SAVE)=map("q$_",(12..15)); -my $Ktbl="r3"; - -$code.=<<___; -#if __ARM_ARCH__>=7 -.type sha256_block_data_order_armv8,%function -.align 5 -sha256_block_data_order_armv8: -.LARMv8: - vld1.32 {$ABCD,$EFGH},[$ctx] - sub $Ktbl,r3,#sha256_block_data_order-K256 - -.Loop_v8: - vld1.8 {@MSG[0]-@MSG[1]},[$inp]! - vld1.8 {@MSG[2]-@MSG[3]},[$inp]! - vld1.32 {$W0},[$Ktbl]! - vrev32.8 @MSG[0],@MSG[0] - vrev32.8 @MSG[1],@MSG[1] - vrev32.8 @MSG[2],@MSG[2] - vrev32.8 @MSG[3],@MSG[3] - vmov $ABCD_SAVE,$ABCD @ offload - vmov $EFGH_SAVE,$EFGH - teq $inp,$len -___ -for($i=0;$i<12;$i++) { -$code.=<<___; - vld1.32 {$W1},[$Ktbl]! - vadd.i32 $W0,$W0,@MSG[0] - sha256su0 @MSG[0],@MSG[1] - vmov $abcd,$ABCD - sha256h $ABCD,$EFGH,$W0 - sha256h2 $EFGH,$abcd,$W0 - sha256su1 @MSG[0],@MSG[2],@MSG[3] -___ - ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG)); -} -$code.=<<___; - vld1.32 {$W1},[$Ktbl]! - vadd.i32 $W0,$W0,@MSG[0] - vmov $abcd,$ABCD - sha256h $ABCD,$EFGH,$W0 - sha256h2 $EFGH,$abcd,$W0 - - vld1.32 {$W0},[$Ktbl]! - vadd.i32 $W1,$W1,@MSG[1] - vmov $abcd,$ABCD - sha256h $ABCD,$EFGH,$W1 - sha256h2 $EFGH,$abcd,$W1 - - vld1.32 {$W1},[$Ktbl] - vadd.i32 $W0,$W0,@MSG[2] - sub $Ktbl,$Ktbl,#256-16 @ rewind - vmov $abcd,$ABCD - sha256h $ABCD,$EFGH,$W0 - sha256h2 $EFGH,$abcd,$W0 - - vadd.i32 $W1,$W1,@MSG[3] - vmov $abcd,$ABCD - sha256h $ABCD,$EFGH,$W1 - sha256h2 $EFGH,$abcd,$W1 - - vadd.i32 $ABCD,$ABCD,$ABCD_SAVE - vadd.i32 $EFGH,$EFGH,$EFGH_SAVE - bne .Loop_v8 - - vst1.32 {$ABCD,$EFGH},[$ctx] - - ret @ bx lr -.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8 -#endif -___ -}}} -$code.=<<___; -.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by " +.size sha256_block_data_order,.-sha256_block_data_order +.asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by " .align 2 -.comm OPENSSL_armcap_P,4,4 ___ -{ my %opcode = ( - "sha256h" => 0xf3000c40, "sha256h2" => 0xf3100c40, - "sha256su0" => 0xf3ba03c0, "sha256su1" => 0xf3200c40 ); - - sub unsha256 { - my ($mnemonic,$arg)=@_; - - if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) { - my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19) - |(($2&7)<<17)|(($2&8)<<4) - |(($3&7)<<1) |(($3&8)<<2); - # since ARMv7 instructions are always encoded little-endian. - # correct solution is to use .inst directive, but older - # assemblers don't implement it:-( - sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s", - $word&0xff,($word>>8)&0xff, - ($word>>16)&0xff,($word>>24)&0xff, - $mnemonic,$arg; - } - } -} - -foreach (split($/,$code)) { - - s/\`([^\`]*)\`/eval $1/geo; - - s/\b(sha256\w+)\s+(q.*)/unsha256($1,$2)/geo; - - s/\bret\b/bx lr/go or - s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4 - - print $_,"\n"; -} - +$code =~ s/\`([^\`]*)\`/eval $1/gem; +$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 +print $code; close STDOUT; # enforce flush diff --git a/app/openssl/crypto/sha/asm/sha256-armv4.s b/app/openssl/crypto/sha/asm/sha256-armv4.s index 853d7da5..9c20a63c 100644 --- a/app/openssl/crypto/sha/asm/sha256-armv4.s +++ b/app/openssl/crypto/sha/asm/sha256-armv4.s @@ -23,1721 +23,1463 @@ K256: .word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .size K256,.-K256 -.word 0 @ terminator -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-sha256_block_data_order -.align 5 .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: sub r3,pc,#8 @ sha256_block_data_order add r2,r1,r2,lsl#6 @ len to point at the end of inp -#if __ARM_ARCH__>=7 - ldr r12,.LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P - tst r12,#ARMV8_SHA256 - bne .LARMv8 - tst r12,#ARMV7_NEON - bne .LNEON -#endif stmdb sp!,{r0,r1,r2,r4-r11,lr} ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} - sub r14,r3,#256+32 @ K256 + sub r14,r3,#256 @ K256 sub sp,sp,#16*4 @ alloca(X[16]) .Loop: -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ magic - eor r12,r12,r12 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 0 -# if 0==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r8,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 0 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past + ldrb r3,[r1,#3] @ 0 ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 0==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r8,ror#19 @ Sigma1(e) + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 #endif + mov r0,r8,ror#6 ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#0*4] + eor r0,r0,r8,ror#11 eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) +#if 0>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 0==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] + str r3,[sp,#0*4] + add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 0==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 0<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#2*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#15*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) + add r3,r3,r11 + mov r11,r4,ror#2 + add r3,r3,r2 + eor r11,r11,r4,ror#13 + add r3,r3,r12 + eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 0>=15 + ldr r1,[sp,#2*4] @ from BODY_16_xx +#endif + orr r0,r4,r5 + and r2,r4,r5 + and r0,r0,r6 + add r11,r11,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r7,r7,r3 + add r11,r11,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 1 -# if 1==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r7,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 1 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 1==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r7,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#1*4] + ldrb r3,[r1,#3] @ 1 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r7,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r7,ror#11 eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) +#if 1>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 1==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] + str r3,[sp,#1*4] + add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 1==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 1<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#3*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#0*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) + add r3,r3,r10 + mov r10,r11,ror#2 + add r3,r3,r2 + eor r10,r10,r11,ror#13 + add r3,r3,r12 + eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 1>=15 + ldr r1,[sp,#3*4] @ from BODY_16_xx +#endif + orr r0,r11,r4 + and r2,r11,r4 + and r0,r0,r5 + add r10,r10,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r6,r6,r3 + add r10,r10,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 2 -# if 2==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r6,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 2 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past + ldrb r3,[r1,#3] @ 2 ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 2==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r6,ror#19 @ Sigma1(e) + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 #endif + mov r0,r6,ror#6 ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#2*4] + eor r0,r0,r6,ror#11 eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) +#if 2>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 2==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] + str r3,[sp,#2*4] + add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 2==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 2<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#4*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#1*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) + add r3,r3,r9 + mov r9,r10,ror#2 + add r3,r3,r2 + eor r9,r9,r10,ror#13 + add r3,r3,r12 + eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 2>=15 + ldr r1,[sp,#4*4] @ from BODY_16_xx +#endif + orr r0,r10,r11 + and r2,r10,r11 + and r0,r0,r4 + add r9,r9,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r5,r5,r3 + add r9,r9,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 3 -# if 3==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r5,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 3 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 3==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r5,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#3*4] + ldrb r3,[r1,#3] @ 3 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r5,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r5,ror#11 eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) +#if 3>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 3==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] + str r3,[sp,#3*4] + add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 3==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 3<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#5*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#2*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) + add r3,r3,r8 + mov r8,r9,ror#2 + add r3,r3,r2 + eor r8,r8,r9,ror#13 + add r3,r3,r12 + eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 3>=15 + ldr r1,[sp,#5*4] @ from BODY_16_xx +#endif + orr r0,r9,r10 + and r2,r9,r10 + and r0,r0,r11 + add r8,r8,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r4,r4,r3 + add r8,r8,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 4 -# if 4==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r4,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 4 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past + ldrb r3,[r1,#3] @ 4 ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 4==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r4,ror#19 @ Sigma1(e) + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 #endif + mov r0,r4,ror#6 ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#4*4] + eor r0,r0,r4,ror#11 eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) +#if 4>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 4==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] + str r3,[sp,#4*4] + add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 4==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 4<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#6*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#3*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) + add r3,r3,r7 + mov r7,r8,ror#2 + add r3,r3,r2 + eor r7,r7,r8,ror#13 + add r3,r3,r12 + eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 4>=15 + ldr r1,[sp,#6*4] @ from BODY_16_xx +#endif + orr r0,r8,r9 + and r2,r8,r9 + and r0,r0,r10 + add r7,r7,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r11,r11,r3 + add r7,r7,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 5 -# if 5==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r11,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 5==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r11,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#5*4] + ldrb r3,[r1,#3] @ 5 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r11,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r11,ror#11 eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) +#if 5>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 5==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] + str r3,[sp,#5*4] + add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 5==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 5<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#7*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#4*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) + add r3,r3,r6 + mov r6,r7,ror#2 + add r3,r3,r2 + eor r6,r6,r7,ror#13 + add r3,r3,r12 + eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 5>=15 + ldr r1,[sp,#7*4] @ from BODY_16_xx +#endif + orr r0,r7,r8 + and r2,r7,r8 + and r0,r0,r9 + add r6,r6,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r10,r10,r3 + add r6,r6,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 6 -# if 6==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r10,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 6 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past + ldrb r3,[r1,#3] @ 6 ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 6==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r10,ror#19 @ Sigma1(e) + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 #endif + mov r0,r10,ror#6 ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#6*4] + eor r0,r0,r10,ror#11 eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) +#if 6>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 6==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] + str r3,[sp,#6*4] + add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 6==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 6<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#8*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#5*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) + add r3,r3,r5 + mov r5,r6,ror#2 + add r3,r3,r2 + eor r5,r5,r6,ror#13 + add r3,r3,r12 + eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 6>=15 + ldr r1,[sp,#8*4] @ from BODY_16_xx +#endif + orr r0,r6,r7 + and r2,r6,r7 + and r0,r0,r8 + add r5,r5,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r9,r9,r3 + add r5,r5,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 7 -# if 7==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r9,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 7==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r9,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#7*4] + ldrb r3,[r1,#3] @ 7 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r9,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r9,ror#11 eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) +#if 7>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 7==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] + str r3,[sp,#7*4] + add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 7==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 7<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#9*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#6*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) + add r3,r3,r4 + mov r4,r5,ror#2 + add r3,r3,r2 + eor r4,r4,r5,ror#13 + add r3,r3,r12 + eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 7>=15 + ldr r1,[sp,#9*4] @ from BODY_16_xx +#endif + orr r0,r5,r6 + and r2,r5,r6 + and r0,r0,r7 + add r4,r4,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r8,r8,r3 + add r4,r4,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 8 -# if 8==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r8,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 8 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past + ldrb r3,[r1,#3] @ 8 ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 8==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r8,ror#19 @ Sigma1(e) + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 #endif + mov r0,r8,ror#6 ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#8*4] + eor r0,r0,r8,ror#11 eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) +#if 8>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 8==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] + str r3,[sp,#8*4] + add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 8==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 8<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#10*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#7*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) + add r3,r3,r11 + mov r11,r4,ror#2 + add r3,r3,r2 + eor r11,r11,r4,ror#13 + add r3,r3,r12 + eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 8>=15 + ldr r1,[sp,#10*4] @ from BODY_16_xx +#endif + orr r0,r4,r5 + and r2,r4,r5 + and r0,r0,r6 + add r11,r11,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r7,r7,r3 + add r11,r11,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 9 -# if 9==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r7,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 9 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 9==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r7,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#9*4] + ldrb r3,[r1,#3] @ 9 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r7,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r7,ror#11 eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) +#if 9>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 9==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] + str r3,[sp,#9*4] + add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 9==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 9<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#11*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#8*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) + add r3,r3,r10 + mov r10,r11,ror#2 + add r3,r3,r2 + eor r10,r10,r11,ror#13 + add r3,r3,r12 + eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 9>=15 + ldr r1,[sp,#11*4] @ from BODY_16_xx +#endif + orr r0,r11,r4 + and r2,r11,r4 + and r0,r0,r5 + add r10,r10,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r6,r6,r3 + add r10,r10,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 10 -# if 10==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r6,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 10 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past + ldrb r3,[r1,#3] @ 10 ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 10==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r6,ror#19 @ Sigma1(e) + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 #endif + mov r0,r6,ror#6 ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#10*4] + eor r0,r0,r6,ror#11 eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) +#if 10>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 10==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] + str r3,[sp,#10*4] + add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 10==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 10<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#12*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#9*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) + add r3,r3,r9 + mov r9,r10,ror#2 + add r3,r3,r2 + eor r9,r9,r10,ror#13 + add r3,r3,r12 + eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 10>=15 + ldr r1,[sp,#12*4] @ from BODY_16_xx +#endif + orr r0,r10,r11 + and r2,r10,r11 + and r0,r0,r4 + add r9,r9,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r5,r5,r3 + add r9,r9,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 11 -# if 11==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r5,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 11 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 11==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r5,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#11*4] + ldrb r3,[r1,#3] @ 11 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r5,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r5,ror#11 eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) +#if 11>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 11==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] + str r3,[sp,#11*4] + add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 11==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 11<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#13*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#10*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) + add r3,r3,r8 + mov r8,r9,ror#2 + add r3,r3,r2 + eor r8,r8,r9,ror#13 + add r3,r3,r12 + eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 11>=15 + ldr r1,[sp,#13*4] @ from BODY_16_xx +#endif + orr r0,r9,r10 + and r2,r9,r10 + and r0,r0,r11 + add r8,r8,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r4,r4,r3 + add r8,r8,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 12 -# if 12==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r4,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 12 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past + ldrb r3,[r1,#3] @ 12 ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 12==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r4,ror#19 @ Sigma1(e) + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 #endif + mov r0,r4,ror#6 ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#12*4] + eor r0,r0,r4,ror#11 eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) +#if 12>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 12==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] + str r3,[sp,#12*4] + add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 12==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 12<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#14*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#11*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) + add r3,r3,r7 + mov r7,r8,ror#2 + add r3,r3,r2 + eor r7,r7,r8,ror#13 + add r3,r3,r12 + eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 12>=15 + ldr r1,[sp,#14*4] @ from BODY_16_xx +#endif + orr r0,r8,r9 + and r2,r8,r9 + and r0,r0,r10 + add r7,r7,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r11,r11,r3 + add r7,r7,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 13 -# if 13==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r11,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 13 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 13==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r11,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#13*4] + ldrb r3,[r1,#3] @ 13 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r11,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r11,ror#11 eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) +#if 13>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 13==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] + str r3,[sp,#13*4] + add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 13==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 13<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#15*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#12*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) + add r3,r3,r6 + mov r6,r7,ror#2 + add r3,r3,r2 + eor r6,r6,r7,ror#13 + add r3,r3,r12 + eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 13>=15 + ldr r1,[sp,#15*4] @ from BODY_16_xx +#endif + orr r0,r7,r8 + and r2,r7,r8 + and r0,r0,r9 + add r6,r6,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r10,r10,r3 + add r6,r6,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 14 -# if 14==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r10,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 14 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past + ldrb r3,[r1,#3] @ 14 ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 14==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r10,ror#19 @ Sigma1(e) + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 #endif + mov r0,r10,ror#6 ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#14*4] + eor r0,r0,r10,ror#11 eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) +#if 14>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 14==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] + str r3,[sp,#14*4] + add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 14==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 14<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#0*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#13*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) + add r3,r3,r5 + mov r5,r6,ror#2 + add r3,r3,r2 + eor r5,r5,r6,ror#13 + add r3,r3,r12 + eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 14>=15 + ldr r1,[sp,#0*4] @ from BODY_16_xx +#endif + orr r0,r6,r7 + and r2,r6,r7 + and r0,r0,r8 + add r5,r5,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r9,r9,r3 + add r5,r5,r0 #if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 15 -# if 15==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r9,ror#19 @ Sigma1(e) - rev r2,r2 + ldr r3,[r1],#4 #else - @ ldrb r2,[r1,#3] @ 15 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 15==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r9,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#15*4] + ldrb r3,[r1,#3] @ 15 + ldrb r12,[r1,#2] + ldrb r2,[r1,#1] + ldrb r0,[r1],#4 + orr r3,r3,r12,lsl#8 + orr r3,r3,r2,lsl#16 + orr r3,r3,r0,lsl#24 +#endif + mov r0,r9,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r9,ror#11 eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) +#if 15>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 15==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] + str r3,[sp,#15*4] + add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 15==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 15<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#1*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#14*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) + add r3,r3,r4 + mov r4,r5,ror#2 + add r3,r3,r2 + eor r4,r4,r5,ror#13 + add r3,r3,r12 + eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 15>=15 + ldr r1,[sp,#1*4] @ from BODY_16_xx +#endif + orr r0,r5,r6 + and r2,r5,r6 + and r0,r0,r7 + add r4,r4,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r8,r8,r3 + add r4,r4,r0 .Lrounds_16_xx: - @ ldr r2,[sp,#1*4] @ 16 - @ ldr r1,[sp,#14*4] - mov r0,r2,ror#7 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#0*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#9*4] - - add r12,r12,r0 - eor r0,r8,r8,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r8,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] + @ ldr r1,[sp,#1*4] @ 16 + ldr r12,[sp,#14*4] + mov r0,r1,ror#7 + ldr r3,[sp,#0*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#9*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r8,ror#6 ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#0*4] + eor r0,r0,r8,ror#11 eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) +#if 16>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 16==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] + str r3,[sp,#0*4] + add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 16==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 16<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#2*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#15*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#2*4] @ 17 - @ ldr r1,[sp,#15*4] - mov r0,r2,ror#7 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#1*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#10*4] - + add r3,r3,r11 + mov r11,r4,ror#2 + add r3,r3,r2 + eor r11,r11,r4,ror#13 + add r3,r3,r12 + eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 16>=15 + ldr r1,[sp,#2*4] @ from BODY_16_xx +#endif + orr r0,r4,r5 + and r2,r4,r5 + and r0,r0,r6 + add r11,r11,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r7,r7,r3 + add r11,r11,r0 + @ ldr r1,[sp,#2*4] @ 17 + ldr r12,[sp,#15*4] + mov r0,r1,ror#7 + ldr r3,[sp,#1*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#10*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r0,r7,r7,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r7,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#1*4] + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r7,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r7,ror#11 eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) +#if 17>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 17==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] + str r3,[sp,#1*4] + add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 17==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 17<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#3*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#0*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#3*4] @ 18 - @ ldr r1,[sp,#0*4] - mov r0,r2,ror#7 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#2*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#11*4] - - add r12,r12,r0 - eor r0,r6,r6,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r6,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] + add r3,r3,r10 + mov r10,r11,ror#2 + add r3,r3,r2 + eor r10,r10,r11,ror#13 + add r3,r3,r12 + eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 17>=15 + ldr r1,[sp,#3*4] @ from BODY_16_xx +#endif + orr r0,r11,r4 + and r2,r11,r4 + and r0,r0,r5 + add r10,r10,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r6,r6,r3 + add r10,r10,r0 + @ ldr r1,[sp,#3*4] @ 18 + ldr r12,[sp,#0*4] + mov r0,r1,ror#7 + ldr r3,[sp,#2*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#11*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r6,ror#6 ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#2*4] + eor r0,r0,r6,ror#11 eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) +#if 18>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 18==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] + str r3,[sp,#2*4] + add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 18==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 18<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#4*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#1*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#4*4] @ 19 - @ ldr r1,[sp,#1*4] - mov r0,r2,ror#7 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#3*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#12*4] - + add r3,r3,r9 + mov r9,r10,ror#2 + add r3,r3,r2 + eor r9,r9,r10,ror#13 + add r3,r3,r12 + eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 18>=15 + ldr r1,[sp,#4*4] @ from BODY_16_xx +#endif + orr r0,r10,r11 + and r2,r10,r11 + and r0,r0,r4 + add r9,r9,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r5,r5,r3 + add r9,r9,r0 + @ ldr r1,[sp,#4*4] @ 19 + ldr r12,[sp,#1*4] + mov r0,r1,ror#7 + ldr r3,[sp,#3*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#12*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r0,r5,r5,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r5,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#3*4] + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r5,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r5,ror#11 eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) +#if 19>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 19==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] + str r3,[sp,#3*4] + add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 19==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 19<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#5*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#2*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#5*4] @ 20 - @ ldr r1,[sp,#2*4] - mov r0,r2,ror#7 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#4*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#13*4] - - add r12,r12,r0 - eor r0,r4,r4,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r4,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] + add r3,r3,r8 + mov r8,r9,ror#2 + add r3,r3,r2 + eor r8,r8,r9,ror#13 + add r3,r3,r12 + eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 19>=15 + ldr r1,[sp,#5*4] @ from BODY_16_xx +#endif + orr r0,r9,r10 + and r2,r9,r10 + and r0,r0,r11 + add r8,r8,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r4,r4,r3 + add r8,r8,r0 + @ ldr r1,[sp,#5*4] @ 20 + ldr r12,[sp,#2*4] + mov r0,r1,ror#7 + ldr r3,[sp,#4*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#13*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r4,ror#6 ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#4*4] + eor r0,r0,r4,ror#11 eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) +#if 20>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 20==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] + str r3,[sp,#4*4] + add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 20==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 20<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#6*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#3*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#6*4] @ 21 - @ ldr r1,[sp,#3*4] - mov r0,r2,ror#7 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#5*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#14*4] - + add r3,r3,r7 + mov r7,r8,ror#2 + add r3,r3,r2 + eor r7,r7,r8,ror#13 + add r3,r3,r12 + eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 20>=15 + ldr r1,[sp,#6*4] @ from BODY_16_xx +#endif + orr r0,r8,r9 + and r2,r8,r9 + and r0,r0,r10 + add r7,r7,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r11,r11,r3 + add r7,r7,r0 + @ ldr r1,[sp,#6*4] @ 21 + ldr r12,[sp,#3*4] + mov r0,r1,ror#7 + ldr r3,[sp,#5*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#14*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r0,r11,r11,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r11,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#5*4] + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r11,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r11,ror#11 eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) +#if 21>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 21==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] + str r3,[sp,#5*4] + add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 21==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 21<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#7*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#4*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#7*4] @ 22 - @ ldr r1,[sp,#4*4] - mov r0,r2,ror#7 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#6*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#15*4] - - add r12,r12,r0 - eor r0,r10,r10,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r10,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] + add r3,r3,r6 + mov r6,r7,ror#2 + add r3,r3,r2 + eor r6,r6,r7,ror#13 + add r3,r3,r12 + eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 21>=15 + ldr r1,[sp,#7*4] @ from BODY_16_xx +#endif + orr r0,r7,r8 + and r2,r7,r8 + and r0,r0,r9 + add r6,r6,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r10,r10,r3 + add r6,r6,r0 + @ ldr r1,[sp,#7*4] @ 22 + ldr r12,[sp,#4*4] + mov r0,r1,ror#7 + ldr r3,[sp,#6*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#15*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r10,ror#6 ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#6*4] + eor r0,r0,r10,ror#11 eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) +#if 22>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 22==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] + str r3,[sp,#6*4] + add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 22==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 22<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#8*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#5*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#8*4] @ 23 - @ ldr r1,[sp,#5*4] - mov r0,r2,ror#7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#7*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#0*4] - + add r3,r3,r5 + mov r5,r6,ror#2 + add r3,r3,r2 + eor r5,r5,r6,ror#13 + add r3,r3,r12 + eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 22>=15 + ldr r1,[sp,#8*4] @ from BODY_16_xx +#endif + orr r0,r6,r7 + and r2,r6,r7 + and r0,r0,r8 + add r5,r5,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r9,r9,r3 + add r5,r5,r0 + @ ldr r1,[sp,#8*4] @ 23 + ldr r12,[sp,#5*4] + mov r0,r1,ror#7 + ldr r3,[sp,#7*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#0*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r0,r9,r9,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r9,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#7*4] + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r9,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r9,ror#11 eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) +#if 23>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 23==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] + str r3,[sp,#7*4] + add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 23==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 23<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#9*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#6*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#9*4] @ 24 - @ ldr r1,[sp,#6*4] - mov r0,r2,ror#7 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#8*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#1*4] - - add r12,r12,r0 - eor r0,r8,r8,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r8,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] + add r3,r3,r4 + mov r4,r5,ror#2 + add r3,r3,r2 + eor r4,r4,r5,ror#13 + add r3,r3,r12 + eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 23>=15 + ldr r1,[sp,#9*4] @ from BODY_16_xx +#endif + orr r0,r5,r6 + and r2,r5,r6 + and r0,r0,r7 + add r4,r4,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r8,r8,r3 + add r4,r4,r0 + @ ldr r1,[sp,#9*4] @ 24 + ldr r12,[sp,#6*4] + mov r0,r1,ror#7 + ldr r3,[sp,#8*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#1*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r8,ror#6 ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#8*4] + eor r0,r0,r8,ror#11 eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) +#if 24>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 24==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] + str r3,[sp,#8*4] + add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 24==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 24<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#10*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#7*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#10*4] @ 25 - @ ldr r1,[sp,#7*4] - mov r0,r2,ror#7 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#9*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#2*4] - + add r3,r3,r11 + mov r11,r4,ror#2 + add r3,r3,r2 + eor r11,r11,r4,ror#13 + add r3,r3,r12 + eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 24>=15 + ldr r1,[sp,#10*4] @ from BODY_16_xx +#endif + orr r0,r4,r5 + and r2,r4,r5 + and r0,r0,r6 + add r11,r11,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r7,r7,r3 + add r11,r11,r0 + @ ldr r1,[sp,#10*4] @ 25 + ldr r12,[sp,#7*4] + mov r0,r1,ror#7 + ldr r3,[sp,#9*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#2*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r0,r7,r7,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r7,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#9*4] + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r7,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r7,ror#11 eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) +#if 25>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 25==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] + str r3,[sp,#9*4] + add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 25==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 25<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#11*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#8*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#11*4] @ 26 - @ ldr r1,[sp,#8*4] - mov r0,r2,ror#7 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#10*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#3*4] - - add r12,r12,r0 - eor r0,r6,r6,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r6,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] + add r3,r3,r10 + mov r10,r11,ror#2 + add r3,r3,r2 + eor r10,r10,r11,ror#13 + add r3,r3,r12 + eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 25>=15 + ldr r1,[sp,#11*4] @ from BODY_16_xx +#endif + orr r0,r11,r4 + and r2,r11,r4 + and r0,r0,r5 + add r10,r10,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r6,r6,r3 + add r10,r10,r0 + @ ldr r1,[sp,#11*4] @ 26 + ldr r12,[sp,#8*4] + mov r0,r1,ror#7 + ldr r3,[sp,#10*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#3*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r6,ror#6 ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#10*4] + eor r0,r0,r6,ror#11 eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) +#if 26>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 26==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] + str r3,[sp,#10*4] + add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 26==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 26<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#12*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#9*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#12*4] @ 27 - @ ldr r1,[sp,#9*4] - mov r0,r2,ror#7 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#11*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#4*4] - + add r3,r3,r9 + mov r9,r10,ror#2 + add r3,r3,r2 + eor r9,r9,r10,ror#13 + add r3,r3,r12 + eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 26>=15 + ldr r1,[sp,#12*4] @ from BODY_16_xx +#endif + orr r0,r10,r11 + and r2,r10,r11 + and r0,r0,r4 + add r9,r9,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r5,r5,r3 + add r9,r9,r0 + @ ldr r1,[sp,#12*4] @ 27 + ldr r12,[sp,#9*4] + mov r0,r1,ror#7 + ldr r3,[sp,#11*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#4*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r0,r5,r5,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r5,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#11*4] + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r5,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r5,ror#11 eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) +#if 27>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 27==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] + str r3,[sp,#11*4] + add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 27==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 27<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#13*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#10*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#13*4] @ 28 - @ ldr r1,[sp,#10*4] - mov r0,r2,ror#7 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#12*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#5*4] - - add r12,r12,r0 - eor r0,r4,r4,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r4,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] + add r3,r3,r8 + mov r8,r9,ror#2 + add r3,r3,r2 + eor r8,r8,r9,ror#13 + add r3,r3,r12 + eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 27>=15 + ldr r1,[sp,#13*4] @ from BODY_16_xx +#endif + orr r0,r9,r10 + and r2,r9,r10 + and r0,r0,r11 + add r8,r8,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r4,r4,r3 + add r8,r8,r0 + @ ldr r1,[sp,#13*4] @ 28 + ldr r12,[sp,#10*4] + mov r0,r1,ror#7 + ldr r3,[sp,#12*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#5*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r4,ror#6 ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#12*4] + eor r0,r0,r4,ror#11 eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) +#if 28>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 28==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] + str r3,[sp,#12*4] + add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 28==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 28<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#14*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#11*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#14*4] @ 29 - @ ldr r1,[sp,#11*4] - mov r0,r2,ror#7 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#13*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#6*4] - + add r3,r3,r7 + mov r7,r8,ror#2 + add r3,r3,r2 + eor r7,r7,r8,ror#13 + add r3,r3,r12 + eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 28>=15 + ldr r1,[sp,#14*4] @ from BODY_16_xx +#endif + orr r0,r8,r9 + and r2,r8,r9 + and r0,r0,r10 + add r7,r7,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r11,r11,r3 + add r7,r7,r0 + @ ldr r1,[sp,#14*4] @ 29 + ldr r12,[sp,#11*4] + mov r0,r1,ror#7 + ldr r3,[sp,#13*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#6*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r0,r11,r11,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r11,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#13*4] + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r11,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r11,ror#11 eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) +#if 29>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 29==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] + str r3,[sp,#13*4] + add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 29==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 29<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#15*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#12*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#15*4] @ 30 - @ ldr r1,[sp,#12*4] - mov r0,r2,ror#7 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#14*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#7*4] - - add r12,r12,r0 - eor r0,r10,r10,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r10,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] + add r3,r3,r6 + mov r6,r7,ror#2 + add r3,r3,r2 + eor r6,r6,r7,ror#13 + add r3,r3,r12 + eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 29>=15 + ldr r1,[sp,#15*4] @ from BODY_16_xx +#endif + orr r0,r7,r8 + and r2,r7,r8 + and r0,r0,r9 + add r6,r6,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r10,r10,r3 + add r6,r6,r0 + @ ldr r1,[sp,#15*4] @ 30 + ldr r12,[sp,#12*4] + mov r0,r1,ror#7 + ldr r3,[sp,#14*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#7*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 + add r3,r3,r0 + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r10,ror#6 ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#14*4] + eor r0,r0,r10,ror#11 eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) +#if 30>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 30==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] + str r3,[sp,#14*4] + add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 30==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 30<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#0*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#13*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#0*4] @ 31 - @ ldr r1,[sp,#13*4] - mov r0,r2,ror#7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#15*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#8*4] - + add r3,r3,r5 + mov r5,r6,ror#2 + add r3,r3,r2 + eor r5,r5,r6,ror#13 + add r3,r3,r12 + eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 30>=15 + ldr r1,[sp,#0*4] @ from BODY_16_xx +#endif + orr r0,r6,r7 + and r2,r6,r7 + and r0,r0,r8 + add r5,r5,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r9,r9,r3 + add r5,r5,r0 + @ ldr r1,[sp,#0*4] @ 31 + ldr r12,[sp,#13*4] + mov r0,r1,ror#7 + ldr r3,[sp,#15*4] + eor r0,r0,r1,ror#18 + ldr r2,[sp,#8*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r0,r9,r9,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r9,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#15*4] + eor r1,r1,r12,ror#19 + add r3,r3,r2 + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 + mov r0,r9,ror#6 + ldr r12,[r14],#4 @ *K256++ + eor r0,r0,r9,ror#11 eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) +#if 31>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 31==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif + eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] + str r3,[sp,#15*4] + add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 31==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 31<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#1*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#14*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) - ldreq r3,[sp,#16*4] @ pull ctx + add r3,r3,r4 + mov r4,r5,ror#2 + add r3,r3,r2 + eor r4,r4,r5,ror#13 + add r3,r3,r12 + eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 31>=15 + ldr r1,[sp,#1*4] @ from BODY_16_xx +#endif + orr r0,r5,r6 + and r2,r5,r6 + and r0,r0,r7 + add r4,r4,r3 + orr r0,r0,r2 @ Maj(a,b,c) + add r8,r8,r3 + add r4,r4,r0 + and r12,r12,#0xff + cmp r12,#0xf2 bne .Lrounds_16_xx - add r4,r4,r12 @ h+=Maj(a,b,c) from the past + ldr r3,[sp,#16*4] @ pull ctx ldr r0,[r3,#0] ldr r2,[r3,#4] ldr r12,[r3,#8] @@ -1770,921 +1512,6 @@ sha256_block_data_order: moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif -.size sha256_block_data_order,.-sha256_block_data_order -#if __ARM_ARCH__>=7 -.fpu neon - -.type sha256_block_data_order_neon,%function -.align 4 -sha256_block_data_order_neon: -.LNEON: - stmdb sp!,{r4-r12,lr} - - mov r12,sp - sub sp,sp,#16*4+16 @ alloca - sub r14,r3,#256+32 @ K256 - bic sp,sp,#15 @ align for 128-bit stores - - vld1.8 {q0},[r1]! - vld1.8 {q1},[r1]! - vld1.8 {q2},[r1]! - vld1.8 {q3},[r1]! - vld1.32 {q8},[r14,:128]! - vld1.32 {q9},[r14,:128]! - vld1.32 {q10},[r14,:128]! - vld1.32 {q11},[r14,:128]! - vrev32.8 q0,q0 @ yes, even on - str r0,[sp,#64] - vrev32.8 q1,q1 @ big-endian - str r1,[sp,#68] - mov r1,sp - vrev32.8 q2,q2 - str r2,[sp,#72] - vrev32.8 q3,q3 - str r12,[sp,#76] @ save original sp - vadd.i32 q8,q8,q0 - vadd.i32 q9,q9,q1 - vst1.32 {q8},[r1,:128]! - vadd.i32 q10,q10,q2 - vst1.32 {q9},[r1,:128]! - vadd.i32 q11,q11,q3 - vst1.32 {q10},[r1,:128]! - vst1.32 {q11},[r1,:128]! - - ldmia r0,{r4-r11} - sub r1,r1,#64 - ldr r2,[sp,#0] - eor r12,r12,r12 - eor r3,r5,r6 - b .L_00_48 - -.align 4 -.L_00_48: - vext.8 q8,q0,q1,#4 - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - vext.8 q9,q2,q3,#4 - add r4,r4,r12 - and r2,r2,r8 - eor r12,r0,r8,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vadd.i32 q0,q0,q9 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - vshr.u32 q9,q8,#3 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#4] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - veor q9,q9,q10 - add r10,r10,r2 - vsli.32 q11,q8,#14 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - vshr.u32 d24,d7,#17 - add r11,r11,r3 - and r2,r2,r7 - veor q9,q9,q11 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - vsli.32 d24,d7,#15 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - vshr.u32 d25,d7,#10 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - vadd.i32 q0,q0,q9 - add r10,r10,r2 - ldr r2,[sp,#8] - veor d25,d25,d24 - and r12,r12,r3 - add r6,r6,r10 - vshr.u32 d24,d7,#19 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - vsli.32 d24,d7,#13 - add r9,r9,r2 - eor r2,r7,r8 - veor d25,d25,d24 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - vadd.i32 d0,d0,d25 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - vshr.u32 d24,d0,#17 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - vsli.32 d24,d0,#15 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - vshr.u32 d25,d0,#10 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - veor d25,d25,d24 - ldr r2,[sp,#12] - and r3,r3,r12 - vshr.u32 d24,d0,#19 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - vld1.32 {q8},[r14,:128]! - add r8,r8,r2 - vsli.32 d24,d0,#13 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - veor d25,d25,d24 - add r9,r9,r3 - and r2,r2,r5 - vadd.i32 d1,d1,d25 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - vadd.i32 q8,q8,q0 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#16] - and r12,r12,r3 - add r4,r4,r8 - vst1.32 {q8},[r1,:128]! - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vext.8 q8,q1,q2,#4 - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - vext.8 q9,q3,q0,#4 - add r8,r8,r12 - and r2,r2,r4 - eor r12,r0,r4,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vadd.i32 q1,q1,q9 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - vshr.u32 q9,q8,#3 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#20] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - veor q9,q9,q10 - add r6,r6,r2 - vsli.32 q11,q8,#14 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - vshr.u32 d24,d1,#17 - add r7,r7,r3 - and r2,r2,r11 - veor q9,q9,q11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - vsli.32 d24,d1,#15 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - vshr.u32 d25,d1,#10 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - vadd.i32 q1,q1,q9 - add r6,r6,r2 - ldr r2,[sp,#24] - veor d25,d25,d24 - and r12,r12,r3 - add r10,r10,r6 - vshr.u32 d24,d1,#19 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - vsli.32 d24,d1,#13 - add r5,r5,r2 - eor r2,r11,r4 - veor d25,d25,d24 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - vadd.i32 d2,d2,d25 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - vshr.u32 d24,d2,#17 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - vsli.32 d24,d2,#15 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - vshr.u32 d25,d2,#10 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - veor d25,d25,d24 - ldr r2,[sp,#28] - and r3,r3,r12 - vshr.u32 d24,d2,#19 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - vld1.32 {q8},[r14,:128]! - add r4,r4,r2 - vsli.32 d24,d2,#13 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - veor d25,d25,d24 - add r5,r5,r3 - and r2,r2,r9 - vadd.i32 d3,d3,d25 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - vadd.i32 q8,q8,q1 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#32] - and r12,r12,r3 - add r8,r8,r4 - vst1.32 {q8},[r1,:128]! - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vext.8 q8,q2,q3,#4 - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - vext.8 q9,q0,q1,#4 - add r4,r4,r12 - and r2,r2,r8 - eor r12,r0,r8,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vadd.i32 q2,q2,q9 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - vshr.u32 q9,q8,#3 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#36] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - veor q9,q9,q10 - add r10,r10,r2 - vsli.32 q11,q8,#14 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - vshr.u32 d24,d3,#17 - add r11,r11,r3 - and r2,r2,r7 - veor q9,q9,q11 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - vsli.32 d24,d3,#15 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - vshr.u32 d25,d3,#10 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - vadd.i32 q2,q2,q9 - add r10,r10,r2 - ldr r2,[sp,#40] - veor d25,d25,d24 - and r12,r12,r3 - add r6,r6,r10 - vshr.u32 d24,d3,#19 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - vsli.32 d24,d3,#13 - add r9,r9,r2 - eor r2,r7,r8 - veor d25,d25,d24 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - vadd.i32 d4,d4,d25 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - vshr.u32 d24,d4,#17 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - vsli.32 d24,d4,#15 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - vshr.u32 d25,d4,#10 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - veor d25,d25,d24 - ldr r2,[sp,#44] - and r3,r3,r12 - vshr.u32 d24,d4,#19 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - vld1.32 {q8},[r14,:128]! - add r8,r8,r2 - vsli.32 d24,d4,#13 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - veor d25,d25,d24 - add r9,r9,r3 - and r2,r2,r5 - vadd.i32 d5,d5,d25 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - vadd.i32 q8,q8,q2 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#48] - and r12,r12,r3 - add r4,r4,r8 - vst1.32 {q8},[r1,:128]! - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vext.8 q8,q3,q0,#4 - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - vext.8 q9,q1,q2,#4 - add r8,r8,r12 - and r2,r2,r4 - eor r12,r0,r4,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vadd.i32 q3,q3,q9 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - vshr.u32 q9,q8,#3 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#52] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - veor q9,q9,q10 - add r6,r6,r2 - vsli.32 q11,q8,#14 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - vshr.u32 d24,d5,#17 - add r7,r7,r3 - and r2,r2,r11 - veor q9,q9,q11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - vsli.32 d24,d5,#15 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - vshr.u32 d25,d5,#10 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - vadd.i32 q3,q3,q9 - add r6,r6,r2 - ldr r2,[sp,#56] - veor d25,d25,d24 - and r12,r12,r3 - add r10,r10,r6 - vshr.u32 d24,d5,#19 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - vsli.32 d24,d5,#13 - add r5,r5,r2 - eor r2,r11,r4 - veor d25,d25,d24 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - vadd.i32 d6,d6,d25 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - vshr.u32 d24,d6,#17 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - vsli.32 d24,d6,#15 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - vshr.u32 d25,d6,#10 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - veor d25,d25,d24 - ldr r2,[sp,#60] - and r3,r3,r12 - vshr.u32 d24,d6,#19 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - vld1.32 {q8},[r14,:128]! - add r4,r4,r2 - vsli.32 d24,d6,#13 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - veor d25,d25,d24 - add r5,r5,r3 - and r2,r2,r9 - vadd.i32 d7,d7,d25 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - vadd.i32 q8,q8,q3 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[r14] - and r12,r12,r3 - add r8,r8,r4 - vst1.32 {q8},[r1,:128]! - add r4,r4,r0,ror#2 - eor r12,r12,r6 - teq r2,#0 @ check for K256 terminator - ldr r2,[sp,#0] - sub r1,r1,#64 - bne .L_00_48 - - ldr r1,[sp,#68] - ldr r0,[sp,#72] - sub r14,r14,#256 @ rewind r14 - teq r1,r0 - subeq r1,r1,#64 @ avoid SEGV - vld1.8 {q0},[r1]! @ load next input block - vld1.8 {q1},[r1]! - vld1.8 {q2},[r1]! - vld1.8 {q3},[r1]! - strne r1,[sp,#68] - mov r1,sp - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - add r4,r4,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r8 - eor r12,r0,r8,ror#19 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vrev32.8 q0,q0 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vadd.i32 q8,q8,q0 - ldr r2,[sp,#4] - and r3,r3,r12 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - add r10,r10,r2 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - add r11,r11,r3 - and r2,r2,r7 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - add r10,r10,r2 - ldr r2,[sp,#8] - and r12,r12,r3 - add r6,r6,r10 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - add r9,r9,r2 - eor r2,r7,r8 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - ldr r2,[sp,#12] - and r3,r3,r12 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - add r8,r8,r2 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - add r9,r9,r3 - and r2,r2,r5 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#16] - and r12,r12,r3 - add r4,r4,r8 - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vst1.32 {q8},[r1,:128]! - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - add r8,r8,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r4 - eor r12,r0,r4,ror#19 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vrev32.8 q1,q1 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vadd.i32 q8,q8,q1 - ldr r2,[sp,#20] - and r3,r3,r12 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - add r6,r6,r2 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - add r7,r7,r3 - and r2,r2,r11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - add r6,r6,r2 - ldr r2,[sp,#24] - and r12,r12,r3 - add r10,r10,r6 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - add r5,r5,r2 - eor r2,r11,r4 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - ldr r2,[sp,#28] - and r3,r3,r12 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - add r4,r4,r2 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - add r5,r5,r3 - and r2,r2,r9 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#32] - and r12,r12,r3 - add r8,r8,r4 - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vst1.32 {q8},[r1,:128]! - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - add r4,r4,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r8 - eor r12,r0,r8,ror#19 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vrev32.8 q2,q2 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vadd.i32 q8,q8,q2 - ldr r2,[sp,#36] - and r3,r3,r12 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - add r10,r10,r2 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - add r11,r11,r3 - and r2,r2,r7 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - add r10,r10,r2 - ldr r2,[sp,#40] - and r12,r12,r3 - add r6,r6,r10 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - add r9,r9,r2 - eor r2,r7,r8 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - ldr r2,[sp,#44] - and r3,r3,r12 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - add r8,r8,r2 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - add r9,r9,r3 - and r2,r2,r5 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#48] - and r12,r12,r3 - add r4,r4,r8 - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vst1.32 {q8},[r1,:128]! - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - add r8,r8,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r4 - eor r12,r0,r4,ror#19 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vrev32.8 q3,q3 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vadd.i32 q8,q8,q3 - ldr r2,[sp,#52] - and r3,r3,r12 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - add r6,r6,r2 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - add r7,r7,r3 - and r2,r2,r11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - add r6,r6,r2 - ldr r2,[sp,#56] - and r12,r12,r3 - add r10,r10,r6 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - add r5,r5,r2 - eor r2,r11,r4 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - ldr r2,[sp,#60] - and r3,r3,r12 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - add r4,r4,r2 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - add r5,r5,r3 - and r2,r2,r9 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#64] - and r12,r12,r3 - add r8,r8,r4 - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vst1.32 {q8},[r1,:128]! - ldr r0,[r2,#0] - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldr r12,[r2,#4] - ldr r3,[r2,#8] - ldr r1,[r2,#12] - add r4,r4,r0 @ accumulate - ldr r0,[r2,#16] - add r5,r5,r12 - ldr r12,[r2,#20] - add r6,r6,r3 - ldr r3,[r2,#24] - add r7,r7,r1 - ldr r1,[r2,#28] - add r8,r8,r0 - str r4,[r2],#4 - add r9,r9,r12 - str r5,[r2],#4 - add r10,r10,r3 - str r6,[r2],#4 - add r11,r11,r1 - str r7,[r2],#4 - stmia r2,{r8-r11} - - movne r1,sp - ldrne r2,[sp,#0] - eorne r12,r12,r12 - ldreq sp,[sp,#76] @ restore original sp - eorne r3,r5,r6 - bne .L_00_48 - - ldmia sp!,{r4-r12,pc} -.size sha256_block_data_order_neon,.-sha256_block_data_order_neon -#endif -#if __ARM_ARCH__>=7 -.type sha256_block_data_order_armv8,%function -.align 5 -sha256_block_data_order_armv8: -.LARMv8: - vld1.32 {q0,q1},[r0] - sub r3,r3,#sha256_block_data_order-K256 - -.Loop_v8: - vld1.8 {q8-q9},[r1]! - vld1.8 {q10-q11},[r1]! - vld1.32 {q12},[r3]! - vrev32.8 q8,q8 - vrev32.8 q9,q9 - vrev32.8 q10,q10 - vrev32.8 q11,q11 - vmov q14,q0 @ offload - vmov q15,q1 - teq r1,r2 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - .byte 0xe2,0x03,0xfa,0xf3 @ sha256su0 q8,q9 - vmov q2,q0 - .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12 - .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12 - .byte 0xe6,0x0c,0x64,0xf3 @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - .byte 0xe4,0x23,0xfa,0xf3 @ sha256su0 q9,q10 - vmov q2,q0 - .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13 - .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13 - .byte 0xe0,0x2c,0x66,0xf3 @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - .byte 0xe6,0x43,0xfa,0xf3 @ sha256su0 q10,q11 - vmov q2,q0 - .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12 - .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12 - .byte 0xe2,0x4c,0x60,0xf3 @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - .byte 0xe0,0x63,0xfa,0xf3 @ sha256su0 q11,q8 - vmov q2,q0 - .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13 - .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13 - .byte 0xe4,0x6c,0x62,0xf3 @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - .byte 0xe2,0x03,0xfa,0xf3 @ sha256su0 q8,q9 - vmov q2,q0 - .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12 - .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12 - .byte 0xe6,0x0c,0x64,0xf3 @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - .byte 0xe4,0x23,0xfa,0xf3 @ sha256su0 q9,q10 - vmov q2,q0 - .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13 - .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13 - .byte 0xe0,0x2c,0x66,0xf3 @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - .byte 0xe6,0x43,0xfa,0xf3 @ sha256su0 q10,q11 - vmov q2,q0 - .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12 - .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12 - .byte 0xe2,0x4c,0x60,0xf3 @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - .byte 0xe0,0x63,0xfa,0xf3 @ sha256su0 q11,q8 - vmov q2,q0 - .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13 - .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13 - .byte 0xe4,0x6c,0x62,0xf3 @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - .byte 0xe2,0x03,0xfa,0xf3 @ sha256su0 q8,q9 - vmov q2,q0 - .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12 - .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12 - .byte 0xe6,0x0c,0x64,0xf3 @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - .byte 0xe4,0x23,0xfa,0xf3 @ sha256su0 q9,q10 - vmov q2,q0 - .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13 - .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13 - .byte 0xe0,0x2c,0x66,0xf3 @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - .byte 0xe6,0x43,0xfa,0xf3 @ sha256su0 q10,q11 - vmov q2,q0 - .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12 - .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12 - .byte 0xe2,0x4c,0x60,0xf3 @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - .byte 0xe0,0x63,0xfa,0xf3 @ sha256su0 q11,q8 - vmov q2,q0 - .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13 - .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13 - .byte 0xe4,0x6c,0x62,0xf3 @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - vmov q2,q0 - .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12 - .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12 - - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - vmov q2,q0 - .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13 - .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13 - - vld1.32 {q13},[r3] - vadd.i32 q12,q12,q10 - sub r3,r3,#256-16 @ rewind - vmov q2,q0 - .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12 - .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12 - - vadd.i32 q13,q13,q11 - vmov q2,q0 - .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13 - .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13 - - vadd.i32 q0,q0,q14 - vadd.i32 q1,q1,q15 - bne .Loop_v8 - - vst1.32 {q0,q1},[r0] - - bx lr @ bx lr -.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8 -#endif -.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by " +.size sha256_block_data_order,.-sha256_block_data_order +.asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by " .align 2 -.comm OPENSSL_armcap_P,4,4 diff --git a/app/openssl/crypto/sha/asm/sha256-armv8.S b/app/openssl/crypto/sha/asm/sha256-armv8.S deleted file mode 100644 index bd43b1fe..00000000 --- a/app/openssl/crypto/sha/asm/sha256-armv8.S +++ /dev/null @@ -1,1141 +0,0 @@ -#include "arm_arch.h" - -.text - -.globl sha256_block_data_order -.type sha256_block_data_order,%function -.align 6 -sha256_block_data_order: - ldr x16,.LOPENSSL_armcap_P - adr x17,.LOPENSSL_armcap_P - add x16,x16,x17 - ldr w16,[x16] - tst w16,#ARMV8_SHA256 - b.ne .Lv8_entry - stp x29,x30,[sp,#-128]! - add x29,sp,#0 - - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - sub sp,sp,#4*4 - - ldp w20,w21,[x0] // load context - ldp w22,w23,[x0,#2*4] - ldp w24,w25,[x0,#4*4] - add x2,x1,x2,lsl#6 // end of input - ldp w26,w27,[x0,#6*4] - adr x30,K256 - stp x0,x2,[x29,#96] - -.Loop: - ldp w3,w4,[x1],#2*4 - ldr w19,[x30],#4 // *K++ - eor w28,w21,w22 // magic seed - str x1,[x29,#112] -#ifndef __ARMEB__ - rev w3,w3 // 0 -#endif - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - eor w6,w24,w24,ror#14 - and w17,w25,w24 - bic w19,w26,w24 - add w27,w27,w3 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w6,ror#11 // Sigma1(e) - ror w6,w20,#2 - add w27,w27,w17 // h+=Ch(e,f,g) - eor w17,w20,w20,ror#9 - add w27,w27,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w23,w23,w27 // d+=h - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w6,w17,ror#13 // Sigma0(a) - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w27,w27,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w4,w4 // 1 -#endif - ldp w5,w6,[x1],#2*4 - add w27,w27,w17 // h+=Sigma0(a) - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - eor w7,w23,w23,ror#14 - and w17,w24,w23 - bic w28,w25,w23 - add w26,w26,w4 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w7,ror#11 // Sigma1(e) - ror w7,w27,#2 - add w26,w26,w17 // h+=Ch(e,f,g) - eor w17,w27,w27,ror#9 - add w26,w26,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w22,w22,w26 // d+=h - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w7,w17,ror#13 // Sigma0(a) - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w26,w26,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w5,w5 // 2 -#endif - add w26,w26,w17 // h+=Sigma0(a) - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - eor w8,w22,w22,ror#14 - and w17,w23,w22 - bic w19,w24,w22 - add w25,w25,w5 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w8,ror#11 // Sigma1(e) - ror w8,w26,#2 - add w25,w25,w17 // h+=Ch(e,f,g) - eor w17,w26,w26,ror#9 - add w25,w25,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w21,w21,w25 // d+=h - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w8,w17,ror#13 // Sigma0(a) - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w25,w25,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w6,w6 // 3 -#endif - ldp w7,w8,[x1],#2*4 - add w25,w25,w17 // h+=Sigma0(a) - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - eor w9,w21,w21,ror#14 - and w17,w22,w21 - bic w28,w23,w21 - add w24,w24,w6 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w9,ror#11 // Sigma1(e) - ror w9,w25,#2 - add w24,w24,w17 // h+=Ch(e,f,g) - eor w17,w25,w25,ror#9 - add w24,w24,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w20,w20,w24 // d+=h - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w9,w17,ror#13 // Sigma0(a) - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w24,w24,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w7,w7 // 4 -#endif - add w24,w24,w17 // h+=Sigma0(a) - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - eor w10,w20,w20,ror#14 - and w17,w21,w20 - bic w19,w22,w20 - add w23,w23,w7 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w10,ror#11 // Sigma1(e) - ror w10,w24,#2 - add w23,w23,w17 // h+=Ch(e,f,g) - eor w17,w24,w24,ror#9 - add w23,w23,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w27,w27,w23 // d+=h - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w10,w17,ror#13 // Sigma0(a) - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w23,w23,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w8,w8 // 5 -#endif - ldp w9,w10,[x1],#2*4 - add w23,w23,w17 // h+=Sigma0(a) - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - eor w11,w27,w27,ror#14 - and w17,w20,w27 - bic w28,w21,w27 - add w22,w22,w8 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w11,ror#11 // Sigma1(e) - ror w11,w23,#2 - add w22,w22,w17 // h+=Ch(e,f,g) - eor w17,w23,w23,ror#9 - add w22,w22,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w26,w26,w22 // d+=h - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w11,w17,ror#13 // Sigma0(a) - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w22,w22,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w9,w9 // 6 -#endif - add w22,w22,w17 // h+=Sigma0(a) - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - eor w12,w26,w26,ror#14 - and w17,w27,w26 - bic w19,w20,w26 - add w21,w21,w9 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w12,ror#11 // Sigma1(e) - ror w12,w22,#2 - add w21,w21,w17 // h+=Ch(e,f,g) - eor w17,w22,w22,ror#9 - add w21,w21,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w25,w25,w21 // d+=h - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w12,w17,ror#13 // Sigma0(a) - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w21,w21,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w10,w10 // 7 -#endif - ldp w11,w12,[x1],#2*4 - add w21,w21,w17 // h+=Sigma0(a) - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - eor w13,w25,w25,ror#14 - and w17,w26,w25 - bic w28,w27,w25 - add w20,w20,w10 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w13,ror#11 // Sigma1(e) - ror w13,w21,#2 - add w20,w20,w17 // h+=Ch(e,f,g) - eor w17,w21,w21,ror#9 - add w20,w20,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w24,w24,w20 // d+=h - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w13,w17,ror#13 // Sigma0(a) - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w20,w20,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w11,w11 // 8 -#endif - add w20,w20,w17 // h+=Sigma0(a) - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - eor w14,w24,w24,ror#14 - and w17,w25,w24 - bic w19,w26,w24 - add w27,w27,w11 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w14,ror#11 // Sigma1(e) - ror w14,w20,#2 - add w27,w27,w17 // h+=Ch(e,f,g) - eor w17,w20,w20,ror#9 - add w27,w27,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w23,w23,w27 // d+=h - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w14,w17,ror#13 // Sigma0(a) - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w27,w27,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w12,w12 // 9 -#endif - ldp w13,w14,[x1],#2*4 - add w27,w27,w17 // h+=Sigma0(a) - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - eor w15,w23,w23,ror#14 - and w17,w24,w23 - bic w28,w25,w23 - add w26,w26,w12 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w15,ror#11 // Sigma1(e) - ror w15,w27,#2 - add w26,w26,w17 // h+=Ch(e,f,g) - eor w17,w27,w27,ror#9 - add w26,w26,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w22,w22,w26 // d+=h - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w15,w17,ror#13 // Sigma0(a) - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w26,w26,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w13,w13 // 10 -#endif - add w26,w26,w17 // h+=Sigma0(a) - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - eor w0,w22,w22,ror#14 - and w17,w23,w22 - bic w19,w24,w22 - add w25,w25,w13 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w0,ror#11 // Sigma1(e) - ror w0,w26,#2 - add w25,w25,w17 // h+=Ch(e,f,g) - eor w17,w26,w26,ror#9 - add w25,w25,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w21,w21,w25 // d+=h - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w0,w17,ror#13 // Sigma0(a) - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w25,w25,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w14,w14 // 11 -#endif - ldp w15,w0,[x1],#2*4 - add w25,w25,w17 // h+=Sigma0(a) - str w6,[sp,#12] - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - eor w6,w21,w21,ror#14 - and w17,w22,w21 - bic w28,w23,w21 - add w24,w24,w14 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w6,ror#11 // Sigma1(e) - ror w6,w25,#2 - add w24,w24,w17 // h+=Ch(e,f,g) - eor w17,w25,w25,ror#9 - add w24,w24,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w20,w20,w24 // d+=h - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w6,w17,ror#13 // Sigma0(a) - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w24,w24,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w15,w15 // 12 -#endif - add w24,w24,w17 // h+=Sigma0(a) - str w7,[sp,#0] - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - eor w7,w20,w20,ror#14 - and w17,w21,w20 - bic w19,w22,w20 - add w23,w23,w15 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w7,ror#11 // Sigma1(e) - ror w7,w24,#2 - add w23,w23,w17 // h+=Ch(e,f,g) - eor w17,w24,w24,ror#9 - add w23,w23,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w27,w27,w23 // d+=h - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w7,w17,ror#13 // Sigma0(a) - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w23,w23,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w0,w0 // 13 -#endif - ldp w1,w2,[x1] - add w23,w23,w17 // h+=Sigma0(a) - str w8,[sp,#4] - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - eor w8,w27,w27,ror#14 - and w17,w20,w27 - bic w28,w21,w27 - add w22,w22,w0 // h+=X[i] - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w8,ror#11 // Sigma1(e) - ror w8,w23,#2 - add w22,w22,w17 // h+=Ch(e,f,g) - eor w17,w23,w23,ror#9 - add w22,w22,w16 // h+=Sigma1(e) - and w19,w19,w28 // (b^c)&=(a^b) - add w26,w26,w22 // d+=h - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w8,w17,ror#13 // Sigma0(a) - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - //add w22,w22,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w1,w1 // 14 -#endif - ldr w6,[sp,#12] - add w22,w22,w17 // h+=Sigma0(a) - str w9,[sp,#8] - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - eor w9,w26,w26,ror#14 - and w17,w27,w26 - bic w19,w20,w26 - add w21,w21,w1 // h+=X[i] - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w9,ror#11 // Sigma1(e) - ror w9,w22,#2 - add w21,w21,w17 // h+=Ch(e,f,g) - eor w17,w22,w22,ror#9 - add w21,w21,w16 // h+=Sigma1(e) - and w28,w28,w19 // (b^c)&=(a^b) - add w25,w25,w21 // d+=h - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w9,w17,ror#13 // Sigma0(a) - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - //add w21,w21,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev w2,w2 // 15 -#endif - ldr w7,[sp,#0] - add w21,w21,w17 // h+=Sigma0(a) - str w10,[sp,#12] - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - ror w9,w4,#7 - and w17,w26,w25 - ror w8,w1,#17 - bic w28,w27,w25 - ror w10,w21,#2 - add w20,w20,w2 // h+=X[i] - eor w16,w16,w25,ror#11 - eor w9,w9,w4,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w25,ror#25 // Sigma1(e) - eor w10,w10,w21,ror#13 - add w20,w20,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w8,w8,w1,ror#19 - eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) - add w20,w20,w16 // h+=Sigma1(e) - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w10,w21,ror#22 // Sigma0(a) - eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) - add w3,w3,w12 - add w24,w24,w20 // d+=h - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w3,w3,w9 - add w20,w20,w17 // h+=Sigma0(a) - add w3,w3,w8 -.Loop_16_xx: - ldr w8,[sp,#4] - str w11,[sp,#0] - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - ror w10,w5,#7 - and w17,w25,w24 - ror w9,w2,#17 - bic w19,w26,w24 - ror w11,w20,#2 - add w27,w27,w3 // h+=X[i] - eor w16,w16,w24,ror#11 - eor w10,w10,w5,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w24,ror#25 // Sigma1(e) - eor w11,w11,w20,ror#13 - add w27,w27,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w9,w9,w2,ror#19 - eor w10,w10,w5,lsr#3 // sigma0(X[i+1]) - add w27,w27,w16 // h+=Sigma1(e) - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w11,w20,ror#22 // Sigma0(a) - eor w9,w9,w2,lsr#10 // sigma1(X[i+14]) - add w4,w4,w13 - add w23,w23,w27 // d+=h - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w4,w4,w10 - add w27,w27,w17 // h+=Sigma0(a) - add w4,w4,w9 - ldr w9,[sp,#8] - str w12,[sp,#4] - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - ror w11,w6,#7 - and w17,w24,w23 - ror w10,w3,#17 - bic w28,w25,w23 - ror w12,w27,#2 - add w26,w26,w4 // h+=X[i] - eor w16,w16,w23,ror#11 - eor w11,w11,w6,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w23,ror#25 // Sigma1(e) - eor w12,w12,w27,ror#13 - add w26,w26,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w10,w10,w3,ror#19 - eor w11,w11,w6,lsr#3 // sigma0(X[i+1]) - add w26,w26,w16 // h+=Sigma1(e) - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w12,w27,ror#22 // Sigma0(a) - eor w10,w10,w3,lsr#10 // sigma1(X[i+14]) - add w5,w5,w14 - add w22,w22,w26 // d+=h - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w5,w5,w11 - add w26,w26,w17 // h+=Sigma0(a) - add w5,w5,w10 - ldr w10,[sp,#12] - str w13,[sp,#8] - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - ror w12,w7,#7 - and w17,w23,w22 - ror w11,w4,#17 - bic w19,w24,w22 - ror w13,w26,#2 - add w25,w25,w5 // h+=X[i] - eor w16,w16,w22,ror#11 - eor w12,w12,w7,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w22,ror#25 // Sigma1(e) - eor w13,w13,w26,ror#13 - add w25,w25,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w11,w11,w4,ror#19 - eor w12,w12,w7,lsr#3 // sigma0(X[i+1]) - add w25,w25,w16 // h+=Sigma1(e) - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w13,w26,ror#22 // Sigma0(a) - eor w11,w11,w4,lsr#10 // sigma1(X[i+14]) - add w6,w6,w15 - add w21,w21,w25 // d+=h - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w6,w6,w12 - add w25,w25,w17 // h+=Sigma0(a) - add w6,w6,w11 - ldr w11,[sp,#0] - str w14,[sp,#12] - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - ror w13,w8,#7 - and w17,w22,w21 - ror w12,w5,#17 - bic w28,w23,w21 - ror w14,w25,#2 - add w24,w24,w6 // h+=X[i] - eor w16,w16,w21,ror#11 - eor w13,w13,w8,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w21,ror#25 // Sigma1(e) - eor w14,w14,w25,ror#13 - add w24,w24,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w12,w12,w5,ror#19 - eor w13,w13,w8,lsr#3 // sigma0(X[i+1]) - add w24,w24,w16 // h+=Sigma1(e) - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w14,w25,ror#22 // Sigma0(a) - eor w12,w12,w5,lsr#10 // sigma1(X[i+14]) - add w7,w7,w0 - add w20,w20,w24 // d+=h - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w7,w7,w13 - add w24,w24,w17 // h+=Sigma0(a) - add w7,w7,w12 - ldr w12,[sp,#4] - str w15,[sp,#0] - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - ror w14,w9,#7 - and w17,w21,w20 - ror w13,w6,#17 - bic w19,w22,w20 - ror w15,w24,#2 - add w23,w23,w7 // h+=X[i] - eor w16,w16,w20,ror#11 - eor w14,w14,w9,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w20,ror#25 // Sigma1(e) - eor w15,w15,w24,ror#13 - add w23,w23,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w13,w13,w6,ror#19 - eor w14,w14,w9,lsr#3 // sigma0(X[i+1]) - add w23,w23,w16 // h+=Sigma1(e) - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w15,w24,ror#22 // Sigma0(a) - eor w13,w13,w6,lsr#10 // sigma1(X[i+14]) - add w8,w8,w1 - add w27,w27,w23 // d+=h - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w8,w8,w14 - add w23,w23,w17 // h+=Sigma0(a) - add w8,w8,w13 - ldr w13,[sp,#8] - str w0,[sp,#4] - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - ror w15,w10,#7 - and w17,w20,w27 - ror w14,w7,#17 - bic w28,w21,w27 - ror w0,w23,#2 - add w22,w22,w8 // h+=X[i] - eor w16,w16,w27,ror#11 - eor w15,w15,w10,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w27,ror#25 // Sigma1(e) - eor w0,w0,w23,ror#13 - add w22,w22,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w14,w14,w7,ror#19 - eor w15,w15,w10,lsr#3 // sigma0(X[i+1]) - add w22,w22,w16 // h+=Sigma1(e) - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w0,w23,ror#22 // Sigma0(a) - eor w14,w14,w7,lsr#10 // sigma1(X[i+14]) - add w9,w9,w2 - add w26,w26,w22 // d+=h - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w9,w9,w15 - add w22,w22,w17 // h+=Sigma0(a) - add w9,w9,w14 - ldr w14,[sp,#12] - str w1,[sp,#8] - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - ror w0,w11,#7 - and w17,w27,w26 - ror w15,w8,#17 - bic w19,w20,w26 - ror w1,w22,#2 - add w21,w21,w9 // h+=X[i] - eor w16,w16,w26,ror#11 - eor w0,w0,w11,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w26,ror#25 // Sigma1(e) - eor w1,w1,w22,ror#13 - add w21,w21,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w15,w15,w8,ror#19 - eor w0,w0,w11,lsr#3 // sigma0(X[i+1]) - add w21,w21,w16 // h+=Sigma1(e) - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w1,w22,ror#22 // Sigma0(a) - eor w15,w15,w8,lsr#10 // sigma1(X[i+14]) - add w10,w10,w3 - add w25,w25,w21 // d+=h - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w10,w10,w0 - add w21,w21,w17 // h+=Sigma0(a) - add w10,w10,w15 - ldr w15,[sp,#0] - str w2,[sp,#12] - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - ror w1,w12,#7 - and w17,w26,w25 - ror w0,w9,#17 - bic w28,w27,w25 - ror w2,w21,#2 - add w20,w20,w10 // h+=X[i] - eor w16,w16,w25,ror#11 - eor w1,w1,w12,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w25,ror#25 // Sigma1(e) - eor w2,w2,w21,ror#13 - add w20,w20,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w0,w0,w9,ror#19 - eor w1,w1,w12,lsr#3 // sigma0(X[i+1]) - add w20,w20,w16 // h+=Sigma1(e) - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w2,w21,ror#22 // Sigma0(a) - eor w0,w0,w9,lsr#10 // sigma1(X[i+14]) - add w11,w11,w4 - add w24,w24,w20 // d+=h - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w11,w11,w1 - add w20,w20,w17 // h+=Sigma0(a) - add w11,w11,w0 - ldr w0,[sp,#4] - str w3,[sp,#0] - ror w16,w24,#6 - add w27,w27,w19 // h+=K[i] - ror w2,w13,#7 - and w17,w25,w24 - ror w1,w10,#17 - bic w19,w26,w24 - ror w3,w20,#2 - add w27,w27,w11 // h+=X[i] - eor w16,w16,w24,ror#11 - eor w2,w2,w13,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w20,w21 // a^b, b^c in next round - eor w16,w16,w24,ror#25 // Sigma1(e) - eor w3,w3,w20,ror#13 - add w27,w27,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w1,w1,w10,ror#19 - eor w2,w2,w13,lsr#3 // sigma0(X[i+1]) - add w27,w27,w16 // h+=Sigma1(e) - eor w28,w28,w21 // Maj(a,b,c) - eor w17,w3,w20,ror#22 // Sigma0(a) - eor w1,w1,w10,lsr#10 // sigma1(X[i+14]) - add w12,w12,w5 - add w23,w23,w27 // d+=h - add w27,w27,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w12,w12,w2 - add w27,w27,w17 // h+=Sigma0(a) - add w12,w12,w1 - ldr w1,[sp,#8] - str w4,[sp,#4] - ror w16,w23,#6 - add w26,w26,w28 // h+=K[i] - ror w3,w14,#7 - and w17,w24,w23 - ror w2,w11,#17 - bic w28,w25,w23 - ror w4,w27,#2 - add w26,w26,w12 // h+=X[i] - eor w16,w16,w23,ror#11 - eor w3,w3,w14,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w27,w20 // a^b, b^c in next round - eor w16,w16,w23,ror#25 // Sigma1(e) - eor w4,w4,w27,ror#13 - add w26,w26,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w2,w2,w11,ror#19 - eor w3,w3,w14,lsr#3 // sigma0(X[i+1]) - add w26,w26,w16 // h+=Sigma1(e) - eor w19,w19,w20 // Maj(a,b,c) - eor w17,w4,w27,ror#22 // Sigma0(a) - eor w2,w2,w11,lsr#10 // sigma1(X[i+14]) - add w13,w13,w6 - add w22,w22,w26 // d+=h - add w26,w26,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w13,w13,w3 - add w26,w26,w17 // h+=Sigma0(a) - add w13,w13,w2 - ldr w2,[sp,#12] - str w5,[sp,#8] - ror w16,w22,#6 - add w25,w25,w19 // h+=K[i] - ror w4,w15,#7 - and w17,w23,w22 - ror w3,w12,#17 - bic w19,w24,w22 - ror w5,w26,#2 - add w25,w25,w13 // h+=X[i] - eor w16,w16,w22,ror#11 - eor w4,w4,w15,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w26,w27 // a^b, b^c in next round - eor w16,w16,w22,ror#25 // Sigma1(e) - eor w5,w5,w26,ror#13 - add w25,w25,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w3,w3,w12,ror#19 - eor w4,w4,w15,lsr#3 // sigma0(X[i+1]) - add w25,w25,w16 // h+=Sigma1(e) - eor w28,w28,w27 // Maj(a,b,c) - eor w17,w5,w26,ror#22 // Sigma0(a) - eor w3,w3,w12,lsr#10 // sigma1(X[i+14]) - add w14,w14,w7 - add w21,w21,w25 // d+=h - add w25,w25,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w14,w14,w4 - add w25,w25,w17 // h+=Sigma0(a) - add w14,w14,w3 - ldr w3,[sp,#0] - str w6,[sp,#12] - ror w16,w21,#6 - add w24,w24,w28 // h+=K[i] - ror w5,w0,#7 - and w17,w22,w21 - ror w4,w13,#17 - bic w28,w23,w21 - ror w6,w25,#2 - add w24,w24,w14 // h+=X[i] - eor w16,w16,w21,ror#11 - eor w5,w5,w0,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w25,w26 // a^b, b^c in next round - eor w16,w16,w21,ror#25 // Sigma1(e) - eor w6,w6,w25,ror#13 - add w24,w24,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w4,w4,w13,ror#19 - eor w5,w5,w0,lsr#3 // sigma0(X[i+1]) - add w24,w24,w16 // h+=Sigma1(e) - eor w19,w19,w26 // Maj(a,b,c) - eor w17,w6,w25,ror#22 // Sigma0(a) - eor w4,w4,w13,lsr#10 // sigma1(X[i+14]) - add w15,w15,w8 - add w20,w20,w24 // d+=h - add w24,w24,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w15,w15,w5 - add w24,w24,w17 // h+=Sigma0(a) - add w15,w15,w4 - ldr w4,[sp,#4] - str w7,[sp,#0] - ror w16,w20,#6 - add w23,w23,w19 // h+=K[i] - ror w6,w1,#7 - and w17,w21,w20 - ror w5,w14,#17 - bic w19,w22,w20 - ror w7,w24,#2 - add w23,w23,w15 // h+=X[i] - eor w16,w16,w20,ror#11 - eor w6,w6,w1,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w24,w25 // a^b, b^c in next round - eor w16,w16,w20,ror#25 // Sigma1(e) - eor w7,w7,w24,ror#13 - add w23,w23,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w5,w5,w14,ror#19 - eor w6,w6,w1,lsr#3 // sigma0(X[i+1]) - add w23,w23,w16 // h+=Sigma1(e) - eor w28,w28,w25 // Maj(a,b,c) - eor w17,w7,w24,ror#22 // Sigma0(a) - eor w5,w5,w14,lsr#10 // sigma1(X[i+14]) - add w0,w0,w9 - add w27,w27,w23 // d+=h - add w23,w23,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w0,w0,w6 - add w23,w23,w17 // h+=Sigma0(a) - add w0,w0,w5 - ldr w5,[sp,#8] - str w8,[sp,#4] - ror w16,w27,#6 - add w22,w22,w28 // h+=K[i] - ror w7,w2,#7 - and w17,w20,w27 - ror w6,w15,#17 - bic w28,w21,w27 - ror w8,w23,#2 - add w22,w22,w0 // h+=X[i] - eor w16,w16,w27,ror#11 - eor w7,w7,w2,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w23,w24 // a^b, b^c in next round - eor w16,w16,w27,ror#25 // Sigma1(e) - eor w8,w8,w23,ror#13 - add w22,w22,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w6,w6,w15,ror#19 - eor w7,w7,w2,lsr#3 // sigma0(X[i+1]) - add w22,w22,w16 // h+=Sigma1(e) - eor w19,w19,w24 // Maj(a,b,c) - eor w17,w8,w23,ror#22 // Sigma0(a) - eor w6,w6,w15,lsr#10 // sigma1(X[i+14]) - add w1,w1,w10 - add w26,w26,w22 // d+=h - add w22,w22,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w1,w1,w7 - add w22,w22,w17 // h+=Sigma0(a) - add w1,w1,w6 - ldr w6,[sp,#12] - str w9,[sp,#8] - ror w16,w26,#6 - add w21,w21,w19 // h+=K[i] - ror w8,w3,#7 - and w17,w27,w26 - ror w7,w0,#17 - bic w19,w20,w26 - ror w9,w22,#2 - add w21,w21,w1 // h+=X[i] - eor w16,w16,w26,ror#11 - eor w8,w8,w3,ror#18 - orr w17,w17,w19 // Ch(e,f,g) - eor w19,w22,w23 // a^b, b^c in next round - eor w16,w16,w26,ror#25 // Sigma1(e) - eor w9,w9,w22,ror#13 - add w21,w21,w17 // h+=Ch(e,f,g) - and w28,w28,w19 // (b^c)&=(a^b) - eor w7,w7,w0,ror#19 - eor w8,w8,w3,lsr#3 // sigma0(X[i+1]) - add w21,w21,w16 // h+=Sigma1(e) - eor w28,w28,w23 // Maj(a,b,c) - eor w17,w9,w22,ror#22 // Sigma0(a) - eor w7,w7,w0,lsr#10 // sigma1(X[i+14]) - add w2,w2,w11 - add w25,w25,w21 // d+=h - add w21,w21,w28 // h+=Maj(a,b,c) - ldr w28,[x30],#4 // *K++, w19 in next round - add w2,w2,w8 - add w21,w21,w17 // h+=Sigma0(a) - add w2,w2,w7 - ldr w7,[sp,#0] - str w10,[sp,#12] - ror w16,w25,#6 - add w20,w20,w28 // h+=K[i] - ror w9,w4,#7 - and w17,w26,w25 - ror w8,w1,#17 - bic w28,w27,w25 - ror w10,w21,#2 - add w20,w20,w2 // h+=X[i] - eor w16,w16,w25,ror#11 - eor w9,w9,w4,ror#18 - orr w17,w17,w28 // Ch(e,f,g) - eor w28,w21,w22 // a^b, b^c in next round - eor w16,w16,w25,ror#25 // Sigma1(e) - eor w10,w10,w21,ror#13 - add w20,w20,w17 // h+=Ch(e,f,g) - and w19,w19,w28 // (b^c)&=(a^b) - eor w8,w8,w1,ror#19 - eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) - add w20,w20,w16 // h+=Sigma1(e) - eor w19,w19,w22 // Maj(a,b,c) - eor w17,w10,w21,ror#22 // Sigma0(a) - eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) - add w3,w3,w12 - add w24,w24,w20 // d+=h - add w20,w20,w19 // h+=Maj(a,b,c) - ldr w19,[x30],#4 // *K++, w28 in next round - add w3,w3,w9 - add w20,w20,w17 // h+=Sigma0(a) - add w3,w3,w8 - cbnz w19,.Loop_16_xx - - ldp x0,x2,[x29,#96] - ldr x1,[x29,#112] - sub x30,x30,#260 // rewind - - ldp w3,w4,[x0] - ldp w5,w6,[x0,#2*4] - add x1,x1,#14*4 // advance input pointer - ldp w7,w8,[x0,#4*4] - add w20,w20,w3 - ldp w9,w10,[x0,#6*4] - add w21,w21,w4 - add w22,w22,w5 - add w23,w23,w6 - stp w20,w21,[x0] - add w24,w24,w7 - add w25,w25,w8 - stp w22,w23,[x0,#2*4] - add w26,w26,w9 - add w27,w27,w10 - cmp x1,x2 - stp w24,w25,[x0,#4*4] - stp w26,w27,[x0,#6*4] - b.ne .Loop - - ldp x19,x20,[x29,#16] - add sp,sp,#4*4 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#128 - ret -.size sha256_block_data_order,.-sha256_block_data_order - -.align 6 -.type K256,%object -K256: - .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 - .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 - .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 - .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 - .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc - .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da - .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 - .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 - .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 - .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 - .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 - .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 - .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 - .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 - .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 - .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 - .long 0 //terminator -.size K256,.-K256 -.align 3 -.LOPENSSL_armcap_P: - .quad OPENSSL_armcap_P-. -.asciz "SHA256 block transform for ARMv8, CRYPTOGAMS by " -.align 2 -.type sha256_block_armv8,%function -.align 6 -sha256_block_armv8: -.Lv8_entry: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - ld1 {v0.4s,v1.4s},[x0] - adr x3,K256 - -.Loop_hw: - ld1 {v4.16b-v7.16b},[x1],#64 - sub x2,x2,#1 - ld1 {v16.4s},[x3],#16 - rev32 v4.16b,v4.16b - rev32 v5.16b,v5.16b - rev32 v6.16b,v6.16b - rev32 v7.16b,v7.16b - orr v18.16b,v0.16b,v0.16b // offload - orr v19.16b,v1.16b,v1.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s - .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s - .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s - .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s - .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v6.4s - .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s - .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v7.4s - .inst 0x5e282887 //sha256su0 v7.16b,v4.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s - .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s - .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s - .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s - .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s - .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v6.4s - .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s - .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v7.4s - .inst 0x5e282887 //sha256su0 v7.16b,v4.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s - .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s - .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s - .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s - .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s - .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v6.4s - .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s - .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v7.4s - .inst 0x5e282887 //sha256su0 v7.16b,v4.16b - orr v2.16b,v0.16b,v0.16b - .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s - .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b - ld1 {v17.4s},[x3],#16 - add v16.4s,v16.4s,v4.4s - orr v2.16b,v0.16b,v0.16b - .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s - .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - - ld1 {v16.4s},[x3],#16 - add v17.4s,v17.4s,v5.4s - orr v2.16b,v0.16b,v0.16b - .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s - .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - - ld1 {v17.4s},[x3] - add v16.4s,v16.4s,v6.4s - sub x3,x3,#64*4-16 // rewind - orr v2.16b,v0.16b,v0.16b - .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s - .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s - - add v17.4s,v17.4s,v7.4s - orr v2.16b,v0.16b,v0.16b - .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s - .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s - - add v0.4s,v0.4s,v18.4s - add v1.4s,v1.4s,v19.4s - - cbnz x2,.Loop_hw - - st1 {v0.4s,v1.4s},[x0] - - ldr x29,[sp],#16 - ret -.size sha256_block_armv8,.-sha256_block_armv8 -.comm OPENSSL_armcap_P,4,4 diff --git a/app/openssl/crypto/sha/asm/sha512-armv4.pl b/app/openssl/crypto/sha/asm/sha512-armv4.pl index 71aa9356..7faf37b1 100644 --- a/app/openssl/crypto/sha/asm/sha512-armv4.pl +++ b/app/openssl/crypto/sha/asm/sha512-armv4.pl @@ -565,7 +565,7 @@ $code.=<<___; bne .Loop_neon vldmia sp!,{d8-d15} @ epilogue - ret @ bx lr + bx lr #endif ___ } @@ -578,6 +578,5 @@ ___ $code =~ s/\`([^\`]*)\`/eval $1/gem; $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 -$code =~ s/\bret\b/bx lr/gm; print $code; close STDOUT; # enforce flush diff --git a/app/openssl/crypto/sha/asm/sha512-armv4.s b/app/openssl/crypto/sha/asm/sha512-armv4.s index fd462771..57301922 100644 --- a/app/openssl/crypto/sha/asm/sha512-armv4.s +++ b/app/openssl/crypto/sha/asm/sha512-armv4.s @@ -1775,7 +1775,7 @@ sha512_block_data_order: bne .Loop_neon vldmia sp!,{d8-d15} @ epilogue - bx lr @ .word 0xe12fff1e + .word 0xe12fff1e #endif .size sha512_block_data_order,.-sha512_block_data_order .asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by " diff --git a/app/openssl/crypto/sha/asm/sha512-armv8.S b/app/openssl/crypto/sha/asm/sha512-armv8.S deleted file mode 100644 index 6b0d1940..00000000 --- a/app/openssl/crypto/sha/asm/sha512-armv8.S +++ /dev/null @@ -1,1021 +0,0 @@ -#include "arm_arch.h" - -.text - -.globl sha512_block_data_order -.type sha512_block_data_order,%function -.align 6 -sha512_block_data_order: - stp x29,x30,[sp,#-128]! - add x29,sp,#0 - - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - sub sp,sp,#4*8 - - ldp x20,x21,[x0] // load context - ldp x22,x23,[x0,#2*8] - ldp x24,x25,[x0,#4*8] - add x2,x1,x2,lsl#7 // end of input - ldp x26,x27,[x0,#6*8] - adr x30,K512 - stp x0,x2,[x29,#96] - -.Loop: - ldp x3,x4,[x1],#2*8 - ldr x19,[x30],#8 // *K++ - eor x28,x21,x22 // magic seed - str x1,[x29,#112] -#ifndef __ARMEB__ - rev x3,x3 // 0 -#endif - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - eor x6,x24,x24,ror#23 - and x17,x25,x24 - bic x19,x26,x24 - add x27,x27,x3 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x6,ror#18 // Sigma1(e) - ror x6,x20,#28 - add x27,x27,x17 // h+=Ch(e,f,g) - eor x17,x20,x20,ror#5 - add x27,x27,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x23,x23,x27 // d+=h - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x6,x17,ror#34 // Sigma0(a) - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x27,x27,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x4,x4 // 1 -#endif - ldp x5,x6,[x1],#2*8 - add x27,x27,x17 // h+=Sigma0(a) - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - eor x7,x23,x23,ror#23 - and x17,x24,x23 - bic x28,x25,x23 - add x26,x26,x4 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x7,ror#18 // Sigma1(e) - ror x7,x27,#28 - add x26,x26,x17 // h+=Ch(e,f,g) - eor x17,x27,x27,ror#5 - add x26,x26,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x22,x22,x26 // d+=h - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x7,x17,ror#34 // Sigma0(a) - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x26,x26,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x5,x5 // 2 -#endif - add x26,x26,x17 // h+=Sigma0(a) - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - eor x8,x22,x22,ror#23 - and x17,x23,x22 - bic x19,x24,x22 - add x25,x25,x5 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x8,ror#18 // Sigma1(e) - ror x8,x26,#28 - add x25,x25,x17 // h+=Ch(e,f,g) - eor x17,x26,x26,ror#5 - add x25,x25,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x21,x21,x25 // d+=h - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x8,x17,ror#34 // Sigma0(a) - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x25,x25,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x6,x6 // 3 -#endif - ldp x7,x8,[x1],#2*8 - add x25,x25,x17 // h+=Sigma0(a) - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - eor x9,x21,x21,ror#23 - and x17,x22,x21 - bic x28,x23,x21 - add x24,x24,x6 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x9,ror#18 // Sigma1(e) - ror x9,x25,#28 - add x24,x24,x17 // h+=Ch(e,f,g) - eor x17,x25,x25,ror#5 - add x24,x24,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x20,x20,x24 // d+=h - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x9,x17,ror#34 // Sigma0(a) - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x24,x24,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x7,x7 // 4 -#endif - add x24,x24,x17 // h+=Sigma0(a) - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - eor x10,x20,x20,ror#23 - and x17,x21,x20 - bic x19,x22,x20 - add x23,x23,x7 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x10,ror#18 // Sigma1(e) - ror x10,x24,#28 - add x23,x23,x17 // h+=Ch(e,f,g) - eor x17,x24,x24,ror#5 - add x23,x23,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x27,x27,x23 // d+=h - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x10,x17,ror#34 // Sigma0(a) - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x23,x23,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x8,x8 // 5 -#endif - ldp x9,x10,[x1],#2*8 - add x23,x23,x17 // h+=Sigma0(a) - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - eor x11,x27,x27,ror#23 - and x17,x20,x27 - bic x28,x21,x27 - add x22,x22,x8 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x11,ror#18 // Sigma1(e) - ror x11,x23,#28 - add x22,x22,x17 // h+=Ch(e,f,g) - eor x17,x23,x23,ror#5 - add x22,x22,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x26,x26,x22 // d+=h - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x11,x17,ror#34 // Sigma0(a) - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x22,x22,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x9,x9 // 6 -#endif - add x22,x22,x17 // h+=Sigma0(a) - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - eor x12,x26,x26,ror#23 - and x17,x27,x26 - bic x19,x20,x26 - add x21,x21,x9 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x12,ror#18 // Sigma1(e) - ror x12,x22,#28 - add x21,x21,x17 // h+=Ch(e,f,g) - eor x17,x22,x22,ror#5 - add x21,x21,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x25,x25,x21 // d+=h - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x12,x17,ror#34 // Sigma0(a) - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x21,x21,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x10,x10 // 7 -#endif - ldp x11,x12,[x1],#2*8 - add x21,x21,x17 // h+=Sigma0(a) - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - eor x13,x25,x25,ror#23 - and x17,x26,x25 - bic x28,x27,x25 - add x20,x20,x10 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x13,ror#18 // Sigma1(e) - ror x13,x21,#28 - add x20,x20,x17 // h+=Ch(e,f,g) - eor x17,x21,x21,ror#5 - add x20,x20,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x24,x24,x20 // d+=h - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x13,x17,ror#34 // Sigma0(a) - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x20,x20,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x11,x11 // 8 -#endif - add x20,x20,x17 // h+=Sigma0(a) - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - eor x14,x24,x24,ror#23 - and x17,x25,x24 - bic x19,x26,x24 - add x27,x27,x11 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x14,ror#18 // Sigma1(e) - ror x14,x20,#28 - add x27,x27,x17 // h+=Ch(e,f,g) - eor x17,x20,x20,ror#5 - add x27,x27,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x23,x23,x27 // d+=h - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x14,x17,ror#34 // Sigma0(a) - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x27,x27,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x12,x12 // 9 -#endif - ldp x13,x14,[x1],#2*8 - add x27,x27,x17 // h+=Sigma0(a) - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - eor x15,x23,x23,ror#23 - and x17,x24,x23 - bic x28,x25,x23 - add x26,x26,x12 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x15,ror#18 // Sigma1(e) - ror x15,x27,#28 - add x26,x26,x17 // h+=Ch(e,f,g) - eor x17,x27,x27,ror#5 - add x26,x26,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x22,x22,x26 // d+=h - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x15,x17,ror#34 // Sigma0(a) - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x26,x26,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x13,x13 // 10 -#endif - add x26,x26,x17 // h+=Sigma0(a) - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - eor x0,x22,x22,ror#23 - and x17,x23,x22 - bic x19,x24,x22 - add x25,x25,x13 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x0,ror#18 // Sigma1(e) - ror x0,x26,#28 - add x25,x25,x17 // h+=Ch(e,f,g) - eor x17,x26,x26,ror#5 - add x25,x25,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x21,x21,x25 // d+=h - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x0,x17,ror#34 // Sigma0(a) - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x25,x25,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x14,x14 // 11 -#endif - ldp x15,x0,[x1],#2*8 - add x25,x25,x17 // h+=Sigma0(a) - str x6,[sp,#24] - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - eor x6,x21,x21,ror#23 - and x17,x22,x21 - bic x28,x23,x21 - add x24,x24,x14 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x6,ror#18 // Sigma1(e) - ror x6,x25,#28 - add x24,x24,x17 // h+=Ch(e,f,g) - eor x17,x25,x25,ror#5 - add x24,x24,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x20,x20,x24 // d+=h - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x6,x17,ror#34 // Sigma0(a) - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x24,x24,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x15,x15 // 12 -#endif - add x24,x24,x17 // h+=Sigma0(a) - str x7,[sp,#0] - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - eor x7,x20,x20,ror#23 - and x17,x21,x20 - bic x19,x22,x20 - add x23,x23,x15 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x7,ror#18 // Sigma1(e) - ror x7,x24,#28 - add x23,x23,x17 // h+=Ch(e,f,g) - eor x17,x24,x24,ror#5 - add x23,x23,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x27,x27,x23 // d+=h - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x7,x17,ror#34 // Sigma0(a) - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x23,x23,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x0,x0 // 13 -#endif - ldp x1,x2,[x1] - add x23,x23,x17 // h+=Sigma0(a) - str x8,[sp,#8] - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - eor x8,x27,x27,ror#23 - and x17,x20,x27 - bic x28,x21,x27 - add x22,x22,x0 // h+=X[i] - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x8,ror#18 // Sigma1(e) - ror x8,x23,#28 - add x22,x22,x17 // h+=Ch(e,f,g) - eor x17,x23,x23,ror#5 - add x22,x22,x16 // h+=Sigma1(e) - and x19,x19,x28 // (b^c)&=(a^b) - add x26,x26,x22 // d+=h - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x8,x17,ror#34 // Sigma0(a) - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - //add x22,x22,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x1,x1 // 14 -#endif - ldr x6,[sp,#24] - add x22,x22,x17 // h+=Sigma0(a) - str x9,[sp,#16] - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - eor x9,x26,x26,ror#23 - and x17,x27,x26 - bic x19,x20,x26 - add x21,x21,x1 // h+=X[i] - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x9,ror#18 // Sigma1(e) - ror x9,x22,#28 - add x21,x21,x17 // h+=Ch(e,f,g) - eor x17,x22,x22,ror#5 - add x21,x21,x16 // h+=Sigma1(e) - and x28,x28,x19 // (b^c)&=(a^b) - add x25,x25,x21 // d+=h - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x9,x17,ror#34 // Sigma0(a) - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - //add x21,x21,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ - rev x2,x2 // 15 -#endif - ldr x7,[sp,#0] - add x21,x21,x17 // h+=Sigma0(a) - str x10,[sp,#24] - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - ror x9,x4,#1 - and x17,x26,x25 - ror x8,x1,#19 - bic x28,x27,x25 - ror x10,x21,#28 - add x20,x20,x2 // h+=X[i] - eor x16,x16,x25,ror#18 - eor x9,x9,x4,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x25,ror#41 // Sigma1(e) - eor x10,x10,x21,ror#34 - add x20,x20,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x8,x8,x1,ror#61 - eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) - add x20,x20,x16 // h+=Sigma1(e) - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x10,x21,ror#39 // Sigma0(a) - eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) - add x3,x3,x12 - add x24,x24,x20 // d+=h - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x3,x3,x9 - add x20,x20,x17 // h+=Sigma0(a) - add x3,x3,x8 -.Loop_16_xx: - ldr x8,[sp,#8] - str x11,[sp,#0] - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - ror x10,x5,#1 - and x17,x25,x24 - ror x9,x2,#19 - bic x19,x26,x24 - ror x11,x20,#28 - add x27,x27,x3 // h+=X[i] - eor x16,x16,x24,ror#18 - eor x10,x10,x5,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x24,ror#41 // Sigma1(e) - eor x11,x11,x20,ror#34 - add x27,x27,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x9,x9,x2,ror#61 - eor x10,x10,x5,lsr#7 // sigma0(X[i+1]) - add x27,x27,x16 // h+=Sigma1(e) - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x11,x20,ror#39 // Sigma0(a) - eor x9,x9,x2,lsr#6 // sigma1(X[i+14]) - add x4,x4,x13 - add x23,x23,x27 // d+=h - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x4,x4,x10 - add x27,x27,x17 // h+=Sigma0(a) - add x4,x4,x9 - ldr x9,[sp,#16] - str x12,[sp,#8] - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - ror x11,x6,#1 - and x17,x24,x23 - ror x10,x3,#19 - bic x28,x25,x23 - ror x12,x27,#28 - add x26,x26,x4 // h+=X[i] - eor x16,x16,x23,ror#18 - eor x11,x11,x6,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x23,ror#41 // Sigma1(e) - eor x12,x12,x27,ror#34 - add x26,x26,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x10,x10,x3,ror#61 - eor x11,x11,x6,lsr#7 // sigma0(X[i+1]) - add x26,x26,x16 // h+=Sigma1(e) - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x12,x27,ror#39 // Sigma0(a) - eor x10,x10,x3,lsr#6 // sigma1(X[i+14]) - add x5,x5,x14 - add x22,x22,x26 // d+=h - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x5,x5,x11 - add x26,x26,x17 // h+=Sigma0(a) - add x5,x5,x10 - ldr x10,[sp,#24] - str x13,[sp,#16] - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - ror x12,x7,#1 - and x17,x23,x22 - ror x11,x4,#19 - bic x19,x24,x22 - ror x13,x26,#28 - add x25,x25,x5 // h+=X[i] - eor x16,x16,x22,ror#18 - eor x12,x12,x7,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x22,ror#41 // Sigma1(e) - eor x13,x13,x26,ror#34 - add x25,x25,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x11,x11,x4,ror#61 - eor x12,x12,x7,lsr#7 // sigma0(X[i+1]) - add x25,x25,x16 // h+=Sigma1(e) - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x13,x26,ror#39 // Sigma0(a) - eor x11,x11,x4,lsr#6 // sigma1(X[i+14]) - add x6,x6,x15 - add x21,x21,x25 // d+=h - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x6,x6,x12 - add x25,x25,x17 // h+=Sigma0(a) - add x6,x6,x11 - ldr x11,[sp,#0] - str x14,[sp,#24] - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - ror x13,x8,#1 - and x17,x22,x21 - ror x12,x5,#19 - bic x28,x23,x21 - ror x14,x25,#28 - add x24,x24,x6 // h+=X[i] - eor x16,x16,x21,ror#18 - eor x13,x13,x8,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x21,ror#41 // Sigma1(e) - eor x14,x14,x25,ror#34 - add x24,x24,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x12,x12,x5,ror#61 - eor x13,x13,x8,lsr#7 // sigma0(X[i+1]) - add x24,x24,x16 // h+=Sigma1(e) - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x14,x25,ror#39 // Sigma0(a) - eor x12,x12,x5,lsr#6 // sigma1(X[i+14]) - add x7,x7,x0 - add x20,x20,x24 // d+=h - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x7,x7,x13 - add x24,x24,x17 // h+=Sigma0(a) - add x7,x7,x12 - ldr x12,[sp,#8] - str x15,[sp,#0] - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - ror x14,x9,#1 - and x17,x21,x20 - ror x13,x6,#19 - bic x19,x22,x20 - ror x15,x24,#28 - add x23,x23,x7 // h+=X[i] - eor x16,x16,x20,ror#18 - eor x14,x14,x9,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x20,ror#41 // Sigma1(e) - eor x15,x15,x24,ror#34 - add x23,x23,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x13,x13,x6,ror#61 - eor x14,x14,x9,lsr#7 // sigma0(X[i+1]) - add x23,x23,x16 // h+=Sigma1(e) - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x15,x24,ror#39 // Sigma0(a) - eor x13,x13,x6,lsr#6 // sigma1(X[i+14]) - add x8,x8,x1 - add x27,x27,x23 // d+=h - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x8,x8,x14 - add x23,x23,x17 // h+=Sigma0(a) - add x8,x8,x13 - ldr x13,[sp,#16] - str x0,[sp,#8] - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - ror x15,x10,#1 - and x17,x20,x27 - ror x14,x7,#19 - bic x28,x21,x27 - ror x0,x23,#28 - add x22,x22,x8 // h+=X[i] - eor x16,x16,x27,ror#18 - eor x15,x15,x10,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x27,ror#41 // Sigma1(e) - eor x0,x0,x23,ror#34 - add x22,x22,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x14,x14,x7,ror#61 - eor x15,x15,x10,lsr#7 // sigma0(X[i+1]) - add x22,x22,x16 // h+=Sigma1(e) - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x0,x23,ror#39 // Sigma0(a) - eor x14,x14,x7,lsr#6 // sigma1(X[i+14]) - add x9,x9,x2 - add x26,x26,x22 // d+=h - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x9,x9,x15 - add x22,x22,x17 // h+=Sigma0(a) - add x9,x9,x14 - ldr x14,[sp,#24] - str x1,[sp,#16] - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - ror x0,x11,#1 - and x17,x27,x26 - ror x15,x8,#19 - bic x19,x20,x26 - ror x1,x22,#28 - add x21,x21,x9 // h+=X[i] - eor x16,x16,x26,ror#18 - eor x0,x0,x11,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x26,ror#41 // Sigma1(e) - eor x1,x1,x22,ror#34 - add x21,x21,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x15,x15,x8,ror#61 - eor x0,x0,x11,lsr#7 // sigma0(X[i+1]) - add x21,x21,x16 // h+=Sigma1(e) - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x1,x22,ror#39 // Sigma0(a) - eor x15,x15,x8,lsr#6 // sigma1(X[i+14]) - add x10,x10,x3 - add x25,x25,x21 // d+=h - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x10,x10,x0 - add x21,x21,x17 // h+=Sigma0(a) - add x10,x10,x15 - ldr x15,[sp,#0] - str x2,[sp,#24] - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - ror x1,x12,#1 - and x17,x26,x25 - ror x0,x9,#19 - bic x28,x27,x25 - ror x2,x21,#28 - add x20,x20,x10 // h+=X[i] - eor x16,x16,x25,ror#18 - eor x1,x1,x12,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x25,ror#41 // Sigma1(e) - eor x2,x2,x21,ror#34 - add x20,x20,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x0,x0,x9,ror#61 - eor x1,x1,x12,lsr#7 // sigma0(X[i+1]) - add x20,x20,x16 // h+=Sigma1(e) - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x2,x21,ror#39 // Sigma0(a) - eor x0,x0,x9,lsr#6 // sigma1(X[i+14]) - add x11,x11,x4 - add x24,x24,x20 // d+=h - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x11,x11,x1 - add x20,x20,x17 // h+=Sigma0(a) - add x11,x11,x0 - ldr x0,[sp,#8] - str x3,[sp,#0] - ror x16,x24,#14 - add x27,x27,x19 // h+=K[i] - ror x2,x13,#1 - and x17,x25,x24 - ror x1,x10,#19 - bic x19,x26,x24 - ror x3,x20,#28 - add x27,x27,x11 // h+=X[i] - eor x16,x16,x24,ror#18 - eor x2,x2,x13,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x20,x21 // a^b, b^c in next round - eor x16,x16,x24,ror#41 // Sigma1(e) - eor x3,x3,x20,ror#34 - add x27,x27,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x1,x1,x10,ror#61 - eor x2,x2,x13,lsr#7 // sigma0(X[i+1]) - add x27,x27,x16 // h+=Sigma1(e) - eor x28,x28,x21 // Maj(a,b,c) - eor x17,x3,x20,ror#39 // Sigma0(a) - eor x1,x1,x10,lsr#6 // sigma1(X[i+14]) - add x12,x12,x5 - add x23,x23,x27 // d+=h - add x27,x27,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x12,x12,x2 - add x27,x27,x17 // h+=Sigma0(a) - add x12,x12,x1 - ldr x1,[sp,#16] - str x4,[sp,#8] - ror x16,x23,#14 - add x26,x26,x28 // h+=K[i] - ror x3,x14,#1 - and x17,x24,x23 - ror x2,x11,#19 - bic x28,x25,x23 - ror x4,x27,#28 - add x26,x26,x12 // h+=X[i] - eor x16,x16,x23,ror#18 - eor x3,x3,x14,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x27,x20 // a^b, b^c in next round - eor x16,x16,x23,ror#41 // Sigma1(e) - eor x4,x4,x27,ror#34 - add x26,x26,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x2,x2,x11,ror#61 - eor x3,x3,x14,lsr#7 // sigma0(X[i+1]) - add x26,x26,x16 // h+=Sigma1(e) - eor x19,x19,x20 // Maj(a,b,c) - eor x17,x4,x27,ror#39 // Sigma0(a) - eor x2,x2,x11,lsr#6 // sigma1(X[i+14]) - add x13,x13,x6 - add x22,x22,x26 // d+=h - add x26,x26,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x13,x13,x3 - add x26,x26,x17 // h+=Sigma0(a) - add x13,x13,x2 - ldr x2,[sp,#24] - str x5,[sp,#16] - ror x16,x22,#14 - add x25,x25,x19 // h+=K[i] - ror x4,x15,#1 - and x17,x23,x22 - ror x3,x12,#19 - bic x19,x24,x22 - ror x5,x26,#28 - add x25,x25,x13 // h+=X[i] - eor x16,x16,x22,ror#18 - eor x4,x4,x15,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x26,x27 // a^b, b^c in next round - eor x16,x16,x22,ror#41 // Sigma1(e) - eor x5,x5,x26,ror#34 - add x25,x25,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x3,x3,x12,ror#61 - eor x4,x4,x15,lsr#7 // sigma0(X[i+1]) - add x25,x25,x16 // h+=Sigma1(e) - eor x28,x28,x27 // Maj(a,b,c) - eor x17,x5,x26,ror#39 // Sigma0(a) - eor x3,x3,x12,lsr#6 // sigma1(X[i+14]) - add x14,x14,x7 - add x21,x21,x25 // d+=h - add x25,x25,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x14,x14,x4 - add x25,x25,x17 // h+=Sigma0(a) - add x14,x14,x3 - ldr x3,[sp,#0] - str x6,[sp,#24] - ror x16,x21,#14 - add x24,x24,x28 // h+=K[i] - ror x5,x0,#1 - and x17,x22,x21 - ror x4,x13,#19 - bic x28,x23,x21 - ror x6,x25,#28 - add x24,x24,x14 // h+=X[i] - eor x16,x16,x21,ror#18 - eor x5,x5,x0,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x25,x26 // a^b, b^c in next round - eor x16,x16,x21,ror#41 // Sigma1(e) - eor x6,x6,x25,ror#34 - add x24,x24,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x4,x4,x13,ror#61 - eor x5,x5,x0,lsr#7 // sigma0(X[i+1]) - add x24,x24,x16 // h+=Sigma1(e) - eor x19,x19,x26 // Maj(a,b,c) - eor x17,x6,x25,ror#39 // Sigma0(a) - eor x4,x4,x13,lsr#6 // sigma1(X[i+14]) - add x15,x15,x8 - add x20,x20,x24 // d+=h - add x24,x24,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x15,x15,x5 - add x24,x24,x17 // h+=Sigma0(a) - add x15,x15,x4 - ldr x4,[sp,#8] - str x7,[sp,#0] - ror x16,x20,#14 - add x23,x23,x19 // h+=K[i] - ror x6,x1,#1 - and x17,x21,x20 - ror x5,x14,#19 - bic x19,x22,x20 - ror x7,x24,#28 - add x23,x23,x15 // h+=X[i] - eor x16,x16,x20,ror#18 - eor x6,x6,x1,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x24,x25 // a^b, b^c in next round - eor x16,x16,x20,ror#41 // Sigma1(e) - eor x7,x7,x24,ror#34 - add x23,x23,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x5,x5,x14,ror#61 - eor x6,x6,x1,lsr#7 // sigma0(X[i+1]) - add x23,x23,x16 // h+=Sigma1(e) - eor x28,x28,x25 // Maj(a,b,c) - eor x17,x7,x24,ror#39 // Sigma0(a) - eor x5,x5,x14,lsr#6 // sigma1(X[i+14]) - add x0,x0,x9 - add x27,x27,x23 // d+=h - add x23,x23,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x0,x0,x6 - add x23,x23,x17 // h+=Sigma0(a) - add x0,x0,x5 - ldr x5,[sp,#16] - str x8,[sp,#8] - ror x16,x27,#14 - add x22,x22,x28 // h+=K[i] - ror x7,x2,#1 - and x17,x20,x27 - ror x6,x15,#19 - bic x28,x21,x27 - ror x8,x23,#28 - add x22,x22,x0 // h+=X[i] - eor x16,x16,x27,ror#18 - eor x7,x7,x2,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x23,x24 // a^b, b^c in next round - eor x16,x16,x27,ror#41 // Sigma1(e) - eor x8,x8,x23,ror#34 - add x22,x22,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x6,x6,x15,ror#61 - eor x7,x7,x2,lsr#7 // sigma0(X[i+1]) - add x22,x22,x16 // h+=Sigma1(e) - eor x19,x19,x24 // Maj(a,b,c) - eor x17,x8,x23,ror#39 // Sigma0(a) - eor x6,x6,x15,lsr#6 // sigma1(X[i+14]) - add x1,x1,x10 - add x26,x26,x22 // d+=h - add x22,x22,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x1,x1,x7 - add x22,x22,x17 // h+=Sigma0(a) - add x1,x1,x6 - ldr x6,[sp,#24] - str x9,[sp,#16] - ror x16,x26,#14 - add x21,x21,x19 // h+=K[i] - ror x8,x3,#1 - and x17,x27,x26 - ror x7,x0,#19 - bic x19,x20,x26 - ror x9,x22,#28 - add x21,x21,x1 // h+=X[i] - eor x16,x16,x26,ror#18 - eor x8,x8,x3,ror#8 - orr x17,x17,x19 // Ch(e,f,g) - eor x19,x22,x23 // a^b, b^c in next round - eor x16,x16,x26,ror#41 // Sigma1(e) - eor x9,x9,x22,ror#34 - add x21,x21,x17 // h+=Ch(e,f,g) - and x28,x28,x19 // (b^c)&=(a^b) - eor x7,x7,x0,ror#61 - eor x8,x8,x3,lsr#7 // sigma0(X[i+1]) - add x21,x21,x16 // h+=Sigma1(e) - eor x28,x28,x23 // Maj(a,b,c) - eor x17,x9,x22,ror#39 // Sigma0(a) - eor x7,x7,x0,lsr#6 // sigma1(X[i+14]) - add x2,x2,x11 - add x25,x25,x21 // d+=h - add x21,x21,x28 // h+=Maj(a,b,c) - ldr x28,[x30],#8 // *K++, x19 in next round - add x2,x2,x8 - add x21,x21,x17 // h+=Sigma0(a) - add x2,x2,x7 - ldr x7,[sp,#0] - str x10,[sp,#24] - ror x16,x25,#14 - add x20,x20,x28 // h+=K[i] - ror x9,x4,#1 - and x17,x26,x25 - ror x8,x1,#19 - bic x28,x27,x25 - ror x10,x21,#28 - add x20,x20,x2 // h+=X[i] - eor x16,x16,x25,ror#18 - eor x9,x9,x4,ror#8 - orr x17,x17,x28 // Ch(e,f,g) - eor x28,x21,x22 // a^b, b^c in next round - eor x16,x16,x25,ror#41 // Sigma1(e) - eor x10,x10,x21,ror#34 - add x20,x20,x17 // h+=Ch(e,f,g) - and x19,x19,x28 // (b^c)&=(a^b) - eor x8,x8,x1,ror#61 - eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) - add x20,x20,x16 // h+=Sigma1(e) - eor x19,x19,x22 // Maj(a,b,c) - eor x17,x10,x21,ror#39 // Sigma0(a) - eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) - add x3,x3,x12 - add x24,x24,x20 // d+=h - add x20,x20,x19 // h+=Maj(a,b,c) - ldr x19,[x30],#8 // *K++, x28 in next round - add x3,x3,x9 - add x20,x20,x17 // h+=Sigma0(a) - add x3,x3,x8 - cbnz x19,.Loop_16_xx - - ldp x0,x2,[x29,#96] - ldr x1,[x29,#112] - sub x30,x30,#648 // rewind - - ldp x3,x4,[x0] - ldp x5,x6,[x0,#2*8] - add x1,x1,#14*8 // advance input pointer - ldp x7,x8,[x0,#4*8] - add x20,x20,x3 - ldp x9,x10,[x0,#6*8] - add x21,x21,x4 - add x22,x22,x5 - add x23,x23,x6 - stp x20,x21,[x0] - add x24,x24,x7 - add x25,x25,x8 - stp x22,x23,[x0,#2*8] - add x26,x26,x9 - add x27,x27,x10 - cmp x1,x2 - stp x24,x25,[x0,#4*8] - stp x26,x27,[x0,#6*8] - b.ne .Loop - - ldp x19,x20,[x29,#16] - add sp,sp,#4*8 - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#128 - ret -.size sha512_block_data_order,.-sha512_block_data_order - -.align 6 -.type K512,%object -K512: - .quad 0x428a2f98d728ae22,0x7137449123ef65cd - .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc - .quad 0x3956c25bf348b538,0x59f111f1b605d019 - .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 - .quad 0xd807aa98a3030242,0x12835b0145706fbe - .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 - .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 - .quad 0x9bdc06a725c71235,0xc19bf174cf692694 - .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 - .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 - .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 - .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 - .quad 0x983e5152ee66dfab,0xa831c66d2db43210 - .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 - .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 - .quad 0x06ca6351e003826f,0x142929670a0e6e70 - .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 - .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df - .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 - .quad 0x81c2c92e47edaee6,0x92722c851482353b - .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 - .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 - .quad 0xd192e819d6ef5218,0xd69906245565a910 - .quad 0xf40e35855771202a,0x106aa07032bbd1b8 - .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 - .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 - .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb - .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 - .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 - .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec - .quad 0x90befffa23631e28,0xa4506cebde82bde9 - .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b - .quad 0xca273eceea26619c,0xd186b8c721c0c207 - .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 - .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 - .quad 0x113f9804bef90dae,0x1b710b35131c471b - .quad 0x28db77f523047d84,0x32caab7b40c72493 - .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c - .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a - .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 - .quad 0 // terminator -.size K512,.-K512 -.align 3 -.LOPENSSL_armcap_P: - .quad OPENSSL_armcap_P-. -.asciz "SHA512 block transform for ARMv8, CRYPTOGAMS by " -.align 2 -.comm OPENSSL_armcap_P,4,4 diff --git a/app/openssl/crypto/sha/asm/sha512-armv8.pl b/app/openssl/crypto/sha/asm/sha512-armv8.pl deleted file mode 100644 index 6935ed65..00000000 --- a/app/openssl/crypto/sha/asm/sha512-armv8.pl +++ /dev/null @@ -1,414 +0,0 @@ -#!/usr/bin/env perl -# -# ==================================================================== -# Written by Andy Polyakov for the OpenSSL -# project. The module is, however, dual licensed under OpenSSL and -# CRYPTOGAMS licenses depending on where you obtain it. For further -# details see http://www.openssl.org/~appro/cryptogams/. -# ==================================================================== -# -# SHA256/512 for ARMv8. -# -# Performance in cycles per processed byte and improvement coefficient -# over code generated with "default" compiler: -# -# SHA256-hw SHA256(*) SHA512 -# Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) -# Cortex-A5x n/a n/a n/a -# -# (*) Software SHA256 results are of lesser relevance, presented -# mostly for informational purposes. -# (**) The result is a trade-off: it's possible to improve it by -# 10%, but at the cost of 20% loss on Cortex-A5x. - -$flavour=shift; -$output=shift; -open STDOUT,">$output"; - -if ($output =~ /512/) { - $BITS=512; - $SZ=8; - @Sigma0=(28,34,39); - @Sigma1=(14,18,41); - @sigma0=(1, 8, 7); - @sigma1=(19,61, 6); - $rounds=80; - $reg_t="x"; -} else { - $BITS=256; - $SZ=4; - @Sigma0=( 2,13,22); - @Sigma1=( 6,11,25); - @sigma0=( 7,18, 3); - @sigma1=(17,19,10); - $rounds=64; - $reg_t="w"; -} - -$func="sha${BITS}_block_data_order"; - -($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30)); - -@X=map("$reg_t$_",(3..15,0..2)); -@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27)); -($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28)); - -sub BODY_00_xx { -my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_; -my $j=($i+1)&15; -my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]); - $T0=@X[$i+3] if ($i<11); - -$code.=<<___ if ($i<16); -#ifndef __ARMEB__ - rev @X[$i],@X[$i] // $i -#endif -___ -$code.=<<___ if ($i<13 && ($i&1)); - ldp @X[$i+1],@X[$i+2],[$inp],#2*$SZ -___ -$code.=<<___ if ($i==13); - ldp @X[14],@X[15],[$inp] -___ -$code.=<<___ if ($i>=14); - ldr @X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`] -___ -$code.=<<___ if ($i>0 && $i<16); - add $a,$a,$t1 // h+=Sigma0(a) -___ -$code.=<<___ if ($i>=11); - str @X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`] -___ -# While ARMv8 specifies merged rotate-n-logical operation such as -# 'eor x,y,z,ror#n', it was found to negatively affect performance -# on Apple A7. The reason seems to be that it requires even 'y' to -# be available earlier. This means that such merged instruction is -# not necessarily best choice on critical path... On the other hand -# Cortex-A5x handles merged instructions much better than disjoint -# rotate and logical... See (**) footnote above. -$code.=<<___ if ($i<15); - ror $t0,$e,#$Sigma1[0] - add $h,$h,$t2 // h+=K[i] - eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]` - and $t1,$f,$e - bic $t2,$g,$e - add $h,$h,@X[$i&15] // h+=X[i] - orr $t1,$t1,$t2 // Ch(e,f,g) - eor $t2,$a,$b // a^b, b^c in next round - eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e) - ror $T0,$a,#$Sigma0[0] - add $h,$h,$t1 // h+=Ch(e,f,g) - eor $t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]` - add $h,$h,$t0 // h+=Sigma1(e) - and $t3,$t3,$t2 // (b^c)&=(a^b) - add $d,$d,$h // d+=h - eor $t3,$t3,$b // Maj(a,b,c) - eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a) - add $h,$h,$t3 // h+=Maj(a,b,c) - ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round - //add $h,$h,$t1 // h+=Sigma0(a) -___ -$code.=<<___ if ($i>=15); - ror $t0,$e,#$Sigma1[0] - add $h,$h,$t2 // h+=K[i] - ror $T1,@X[($j+1)&15],#$sigma0[0] - and $t1,$f,$e - ror $T2,@X[($j+14)&15],#$sigma1[0] - bic $t2,$g,$e - ror $T0,$a,#$Sigma0[0] - add $h,$h,@X[$i&15] // h+=X[i] - eor $t0,$t0,$e,ror#$Sigma1[1] - eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1] - orr $t1,$t1,$t2 // Ch(e,f,g) - eor $t2,$a,$b // a^b, b^c in next round - eor $t0,$t0,$e,ror#$Sigma1[2] // Sigma1(e) - eor $T0,$T0,$a,ror#$Sigma0[1] - add $h,$h,$t1 // h+=Ch(e,f,g) - and $t3,$t3,$t2 // (b^c)&=(a^b) - eor $T2,$T2,@X[($j+14)&15],ror#$sigma1[1] - eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1]) - add $h,$h,$t0 // h+=Sigma1(e) - eor $t3,$t3,$b // Maj(a,b,c) - eor $t1,$T0,$a,ror#$Sigma0[2] // Sigma0(a) - eor $T2,$T2,@X[($j+14)&15],lsr#$sigma1[2] // sigma1(X[i+14]) - add @X[$j],@X[$j],@X[($j+9)&15] - add $d,$d,$h // d+=h - add $h,$h,$t3 // h+=Maj(a,b,c) - ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round - add @X[$j],@X[$j],$T1 - add $h,$h,$t1 // h+=Sigma0(a) - add @X[$j],@X[$j],$T2 -___ - ($t2,$t3)=($t3,$t2); -} - -$code.=<<___; -#include "arm_arch.h" - -.text - -.globl $func -.type $func,%function -.align 6 -$func: -___ -$code.=<<___ if ($SZ==4); - ldr x16,.LOPENSSL_armcap_P - adr x17,.LOPENSSL_armcap_P - add x16,x16,x17 - ldr w16,[x16] - tst w16,#ARMV8_SHA256 - b.ne .Lv8_entry -___ -$code.=<<___; - stp x29,x30,[sp,#-128]! - add x29,sp,#0 - - stp x19,x20,[sp,#16] - stp x21,x22,[sp,#32] - stp x23,x24,[sp,#48] - stp x25,x26,[sp,#64] - stp x27,x28,[sp,#80] - sub sp,sp,#4*$SZ - - ldp $A,$B,[$ctx] // load context - ldp $C,$D,[$ctx,#2*$SZ] - ldp $E,$F,[$ctx,#4*$SZ] - add $num,$inp,$num,lsl#`log(16*$SZ)/log(2)` // end of input - ldp $G,$H,[$ctx,#6*$SZ] - adr $Ktbl,K$BITS - stp $ctx,$num,[x29,#96] - -.Loop: - ldp @X[0],@X[1],[$inp],#2*$SZ - ldr $t2,[$Ktbl],#$SZ // *K++ - eor $t3,$B,$C // magic seed - str $inp,[x29,#112] -___ -for ($i=0;$i<16;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); } -$code.=".Loop_16_xx:\n"; -for (;$i<32;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); } -$code.=<<___; - cbnz $t2,.Loop_16_xx - - ldp $ctx,$num,[x29,#96] - ldr $inp,[x29,#112] - sub $Ktbl,$Ktbl,#`$SZ*($rounds+1)` // rewind - - ldp @X[0],@X[1],[$ctx] - ldp @X[2],@X[3],[$ctx,#2*$SZ] - add $inp,$inp,#14*$SZ // advance input pointer - ldp @X[4],@X[5],[$ctx,#4*$SZ] - add $A,$A,@X[0] - ldp @X[6],@X[7],[$ctx,#6*$SZ] - add $B,$B,@X[1] - add $C,$C,@X[2] - add $D,$D,@X[3] - stp $A,$B,[$ctx] - add $E,$E,@X[4] - add $F,$F,@X[5] - stp $C,$D,[$ctx,#2*$SZ] - add $G,$G,@X[6] - add $H,$H,@X[7] - cmp $inp,$num - stp $E,$F,[$ctx,#4*$SZ] - stp $G,$H,[$ctx,#6*$SZ] - b.ne .Loop - - ldp x19,x20,[x29,#16] - add sp,sp,#4*$SZ - ldp x21,x22,[x29,#32] - ldp x23,x24,[x29,#48] - ldp x25,x26,[x29,#64] - ldp x27,x28,[x29,#80] - ldp x29,x30,[sp],#128 - ret -.size $func,.-$func - -.align 6 -.type K$BITS,%object -K$BITS: -___ -$code.=<<___ if ($SZ==8); - .quad 0x428a2f98d728ae22,0x7137449123ef65cd - .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc - .quad 0x3956c25bf348b538,0x59f111f1b605d019 - .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 - .quad 0xd807aa98a3030242,0x12835b0145706fbe - .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 - .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 - .quad 0x9bdc06a725c71235,0xc19bf174cf692694 - .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 - .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 - .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 - .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 - .quad 0x983e5152ee66dfab,0xa831c66d2db43210 - .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 - .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 - .quad 0x06ca6351e003826f,0x142929670a0e6e70 - .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 - .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df - .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 - .quad 0x81c2c92e47edaee6,0x92722c851482353b - .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 - .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 - .quad 0xd192e819d6ef5218,0xd69906245565a910 - .quad 0xf40e35855771202a,0x106aa07032bbd1b8 - .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 - .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 - .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb - .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 - .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 - .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec - .quad 0x90befffa23631e28,0xa4506cebde82bde9 - .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b - .quad 0xca273eceea26619c,0xd186b8c721c0c207 - .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 - .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 - .quad 0x113f9804bef90dae,0x1b710b35131c471b - .quad 0x28db77f523047d84,0x32caab7b40c72493 - .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c - .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a - .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 - .quad 0 // terminator -___ -$code.=<<___ if ($SZ==4); - .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 - .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 - .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 - .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 - .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc - .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da - .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 - .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 - .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 - .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 - .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 - .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 - .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 - .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 - .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 - .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 - .long 0 //terminator -___ -$code.=<<___; -.size K$BITS,.-K$BITS -.align 3 -.LOPENSSL_armcap_P: - .quad OPENSSL_armcap_P-. -.asciz "SHA$BITS block transform for ARMv8, CRYPTOGAMS by " -.align 2 -___ - -if ($SZ==4) { -my $Ktbl="x3"; - -my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2)); -my @MSG=map("v$_.16b",(4..7)); -my ($W0,$W1)=("v16.4s","v17.4s"); -my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b"); - -$code.=<<___; -.type sha256_block_armv8,%function -.align 6 -sha256_block_armv8: -.Lv8_entry: - stp x29,x30,[sp,#-16]! - add x29,sp,#0 - - ld1.32 {$ABCD,$EFGH},[$ctx] - adr $Ktbl,K256 - -.Loop_hw: - ld1 {@MSG[0]-@MSG[3]},[$inp],#64 - sub $num,$num,#1 - ld1.32 {$W0},[$Ktbl],#16 - rev32 @MSG[0],@MSG[0] - rev32 @MSG[1],@MSG[1] - rev32 @MSG[2],@MSG[2] - rev32 @MSG[3],@MSG[3] - orr $ABCD_SAVE,$ABCD,$ABCD // offload - orr $EFGH_SAVE,$EFGH,$EFGH -___ -for($i=0;$i<12;$i++) { -$code.=<<___; - ld1.32 {$W1},[$Ktbl],#16 - add.i32 $W0,$W0,@MSG[0] - sha256su0 @MSG[0],@MSG[1] - orr $abcd,$ABCD,$ABCD - sha256h $ABCD,$EFGH,$W0 - sha256h2 $EFGH,$abcd,$W0 - sha256su1 @MSG[0],@MSG[2],@MSG[3] -___ - ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG)); -} -$code.=<<___; - ld1.32 {$W1},[$Ktbl],#16 - add.i32 $W0,$W0,@MSG[0] - orr $abcd,$ABCD,$ABCD - sha256h $ABCD,$EFGH,$W0 - sha256h2 $EFGH,$abcd,$W0 - - ld1.32 {$W0},[$Ktbl],#16 - add.i32 $W1,$W1,@MSG[1] - orr $abcd,$ABCD,$ABCD - sha256h $ABCD,$EFGH,$W1 - sha256h2 $EFGH,$abcd,$W1 - - ld1.32 {$W1},[$Ktbl] - add.i32 $W0,$W0,@MSG[2] - sub $Ktbl,$Ktbl,#$rounds*$SZ-16 // rewind - orr $abcd,$ABCD,$ABCD - sha256h $ABCD,$EFGH,$W0 - sha256h2 $EFGH,$abcd,$W0 - - add.i32 $W1,$W1,@MSG[3] - orr $abcd,$ABCD,$ABCD - sha256h $ABCD,$EFGH,$W1 - sha256h2 $EFGH,$abcd,$W1 - - add.i32 $ABCD,$ABCD,$ABCD_SAVE - add.i32 $EFGH,$EFGH,$EFGH_SAVE - - cbnz $num,.Loop_hw - - st1.32 {$ABCD,$EFGH},[$ctx] - - ldr x29,[sp],#16 - ret -.size sha256_block_armv8,.-sha256_block_armv8 -___ -} - -$code.=<<___; -.comm OPENSSL_armcap_P,4,4 -___ - -{ my %opcode = ( - "sha256h" => 0x5e004000, "sha256h2" => 0x5e005000, - "sha256su0" => 0x5e282800, "sha256su1" => 0x5e006000 ); - - sub unsha256 { - my ($mnemonic,$arg)=@_; - - $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o - && - sprintf ".inst\t0x%08x\t//%s %s", - $opcode{$mnemonic}|$1|($2<<5)|($3<<16), - $mnemonic,$arg; - } -} - -foreach(split("\n",$code)) { - - s/\`([^\`]*)\`/eval($1)/geo; - - s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/geo; - - s/\.\w?32\b//o and s/\.16b/\.4s/go; - m/(ld|st)1[^\[]+\[0\]/o and s/\.4s/\.s/go; - - print $_,"\n"; -} - -close STDOUT; -- cgit v1.2.3