summaryrefslogtreecommitdiff
path: root/app/openssl/crypto/sha/asm
diff options
context:
space:
mode:
Diffstat (limited to 'app/openssl/crypto/sha/asm')
-rw-r--r--[l---------]app/openssl/crypto/sha/asm/sha1-armv4-large.S1451
-rw-r--r--app/openssl/crypto/sha/asm/sha1-armv4-large.pl446
-rw-r--r--app/openssl/crypto/sha/asm/sha1-armv4-large.s452
-rw-r--r--app/openssl/crypto/sha/asm/sha1-armv8.S1211
-rw-r--r--app/openssl/crypto/sha/asm/sha1-armv8.pl333
-rw-r--r--[l---------]app/openssl/crypto/sha/asm/sha256-armv4.S2691
-rw-r--r--app/openssl/crypto/sha/asm/sha256-armv4.pl585
-rw-r--r--app/openssl/crypto/sha/asm/sha256-armv4.s1517
-rw-r--r--app/openssl/crypto/sha/asm/sha256-armv8.S1141
-rw-r--r--[l---------]app/openssl/crypto/sha/asm/sha512-armv4.S1784
-rw-r--r--app/openssl/crypto/sha/asm/sha512-armv4.pl3
-rw-r--r--app/openssl/crypto/sha/asm/sha512-armv4.s1783
-rw-r--r--app/openssl/crypto/sha/asm/sha512-armv8.S1021
-rw-r--r--app/openssl/crypto/sha/asm/sha512-armv8.pl414
14 files changed, 10998 insertions, 3834 deletions
diff --git a/app/openssl/crypto/sha/asm/sha1-armv4-large.S b/app/openssl/crypto/sha/asm/sha1-armv4-large.S
index 6523cbdd..a1562883 120000..100644
--- a/app/openssl/crypto/sha/asm/sha1-armv4-large.S
+++ b/app/openssl/crypto/sha/asm/sha1-armv4-large.S
@@ -1 +1,1450 @@
-sha1-armv4-large.s \ No newline at end of file
+#include "arm_arch.h"
+
+.text
+.code 32
+
+.global sha1_block_data_order
+.type sha1_block_data_order,%function
+
+.align 5
+sha1_block_data_order:
+#if __ARM_ARCH__>=7
+ sub r3,pc,#8 @ sha1_block_data_order
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#ARMV8_SHA1
+ bne .LARMv8
+ tst r12,#ARMV7_NEON
+ bne .LNEON
+#endif
+ stmdb sp!,{r4-r12,lr}
+ add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
+ ldmia r0,{r3,r4,r5,r6,r7}
+.Lloop:
+ ldr r8,.LK_00_19
+ mov r14,sp
+ sub sp,sp,#15*4
+ mov r5,r5,ror#30
+ mov r6,r6,ror#30
+ mov r7,r7,ror#30 @ [6]
+.L_00_15:
+#if __ARM_ARCH__<7
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r7,r8,r7,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r5,r6 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r7,r8,r7,ror#2 @ E+=K_00_19
+ eor r10,r5,r6 @ F_xx_xx
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r4,r10,ror#2
+ add r7,r7,r9 @ E+=X[i]
+ eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r7,r7,r10 @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r6,r8,r6,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r4,r5 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r6,r8,r6,ror#2 @ E+=K_00_19
+ eor r10,r4,r5 @ F_xx_xx
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r3,r10,ror#2
+ add r6,r6,r9 @ E+=X[i]
+ eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r6,r6,r10 @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r5,r8,r5,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r3,r4 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r5,r8,r5,ror#2 @ E+=K_00_19
+ eor r10,r3,r4 @ F_xx_xx
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r7,r10,ror#2
+ add r5,r5,r9 @ E+=X[i]
+ eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r5,r5,r10 @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r4,r8,r4,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r7,r3 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r4,r8,r4,ror#2 @ E+=K_00_19
+ eor r10,r7,r3 @ F_xx_xx
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r6,r10,ror#2
+ add r4,r4,r9 @ E+=X[i]
+ eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r4,r4,r10 @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r3,r8,r3,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r6,r7 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r3,r8,r3,ror#2 @ E+=K_00_19
+ eor r10,r6,r7 @ F_xx_xx
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r5,r10,ror#2
+ add r3,r3,r9 @ E+=X[i]
+ eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r3,r3,r10 @ E+=F_00_19(B,C,D)
+ teq r14,sp
+ bne .L_00_15 @ [((11+4)*5+2)*3]
+ sub sp,sp,#25*4
+#if __ARM_ARCH__<7
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r7,r8,r7,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r5,r6 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r7,r8,r7,ror#2 @ E+=K_00_19
+ eor r10,r5,r6 @ F_xx_xx
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r4,r10,ror#2
+ add r7,r7,r9 @ E+=X[i]
+ eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r7,r7,r10 @ E+=F_00_19(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r6,r8,r6,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r4,r5 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r3,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r6,r6,r9 @ E+=X[i]
+ eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
+ add r6,r6,r10 @ E+=F_00_19(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r5,r8,r5,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r3,r4 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r7,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r5,r5,r9 @ E+=X[i]
+ eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
+ add r5,r5,r10 @ E+=F_00_19(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r4,r8,r4,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r7,r3 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r6,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r4,r4,r9 @ E+=X[i]
+ eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
+ add r4,r4,r10 @ E+=F_00_19(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r3,r8,r3,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r6,r7 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r5,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r3,r3,r9 @ E+=X[i]
+ eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
+ add r3,r3,r10 @ E+=F_00_19(B,C,D)
+
+ ldr r8,.LK_20_39 @ [+15+16*4]
+ cmn sp,#0 @ [+3], clear carry to denote 20_39
+.L_20_39_or_60_79:
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r7,r8,r7,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r5,r6 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ eor r10,r4,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r7,r7,r9 @ E+=X[i]
+ add r7,r7,r10 @ E+=F_20_39(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r6,r8,r6,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r4,r5 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ eor r10,r3,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r6,r6,r9 @ E+=X[i]
+ add r6,r6,r10 @ E+=F_20_39(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r5,r8,r5,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r3,r4 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ eor r10,r7,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r5,r5,r9 @ E+=X[i]
+ add r5,r5,r10 @ E+=F_20_39(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r4,r8,r4,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r7,r3 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ eor r10,r6,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r4,r4,r9 @ E+=X[i]
+ add r4,r4,r10 @ E+=F_20_39(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r3,r8,r3,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r6,r7 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ eor r10,r5,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r3,r3,r9 @ E+=X[i]
+ add r3,r3,r10 @ E+=F_20_39(B,C,D)
+ teq r14,sp @ preserve carry
+ bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
+ bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
+
+ ldr r8,.LK_40_59
+ sub sp,sp,#20*4 @ [+2]
+.L_40_59:
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r7,r8,r7,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r5,r6 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r4,r10,ror#2 @ F_xx_xx
+ and r11,r5,r6 @ F_xx_xx
+ add r7,r7,r9 @ E+=X[i]
+ add r7,r7,r10 @ E+=F_40_59(B,C,D)
+ add r7,r7,r11,ror#2
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r6,r8,r6,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r4,r5 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r3,r10,ror#2 @ F_xx_xx
+ and r11,r4,r5 @ F_xx_xx
+ add r6,r6,r9 @ E+=X[i]
+ add r6,r6,r10 @ E+=F_40_59(B,C,D)
+ add r6,r6,r11,ror#2
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r5,r8,r5,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r3,r4 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r7,r10,ror#2 @ F_xx_xx
+ and r11,r3,r4 @ F_xx_xx
+ add r5,r5,r9 @ E+=X[i]
+ add r5,r5,r10 @ E+=F_40_59(B,C,D)
+ add r5,r5,r11,ror#2
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r4,r8,r4,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r7,r3 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r6,r10,ror#2 @ F_xx_xx
+ and r11,r7,r3 @ F_xx_xx
+ add r4,r4,r9 @ E+=X[i]
+ add r4,r4,r10 @ E+=F_40_59(B,C,D)
+ add r4,r4,r11,ror#2
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r3,r8,r3,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r6,r7 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r5,r10,ror#2 @ F_xx_xx
+ and r11,r6,r7 @ F_xx_xx
+ add r3,r3,r9 @ E+=X[i]
+ add r3,r3,r10 @ E+=F_40_59(B,C,D)
+ add r3,r3,r11,ror#2
+ teq r14,sp
+ bne .L_40_59 @ [+((12+5)*5+2)*4]
+
+ ldr r8,.LK_60_79
+ sub sp,sp,#20*4
+ cmp sp,#0 @ set carry to denote 60_79
+ b .L_20_39_or_60_79 @ [+4], spare 300 bytes
+.L_done:
+ add sp,sp,#80*4 @ "deallocate" stack frame
+ ldmia r0,{r8,r9,r10,r11,r12}
+ add r3,r8,r3
+ add r4,r9,r4
+ add r5,r10,r5,ror#2
+ add r6,r11,r6,ror#2
+ add r7,r12,r7,ror#2
+ stmia r0,{r3,r4,r5,r6,r7}
+ teq r1,r2
+ bne .Lloop @ [+18], total 1307
+
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r12,pc}
+#else
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+.size sha1_block_data_order,.-sha1_block_data_order
+
+.align 5
+.LK_00_19: .word 0x5a827999
+.LK_20_39: .word 0x6ed9eba1
+.LK_40_59: .word 0x8f1bbcdc
+.LK_60_79: .word 0xca62c1d6
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha1_block_data_order
+.asciz "SHA1 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align 5
+#if __ARM_ARCH__>=7
+.fpu neon
+
+.type sha1_block_data_order_neon,%function
+.align 4
+sha1_block_data_order_neon:
+.LNEON:
+ stmdb sp!,{r4-r12,lr}
+ add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
+ @ dmb @ errata #451034 on early Cortex A8
+ @ vstmdb sp!,{d8-d15} @ ABI specification says so
+ mov r14,sp
+ sub sp,sp,#64 @ alloca
+ adr r8,.LK_00_19
+ bic sp,sp,#15 @ align for 128-bit stores
+
+ ldmia r0,{r3,r4,r5,r6,r7} @ load context
+ mov r12,sp
+
+ vld1.8 {q0-q1},[r1]! @ handles unaligned
+ veor q15,q15,q15
+ vld1.8 {q2-q3},[r1]!
+ vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19
+ vrev32.8 q0,q0 @ yes, even on
+ vrev32.8 q1,q1 @ big-endian...
+ vrev32.8 q2,q2
+ vadd.i32 q8,q0,q14
+ vrev32.8 q3,q3
+ vadd.i32 q9,q1,q14
+ vst1.32 {q8},[r12,:128]!
+ vadd.i32 q10,q2,q14
+ vst1.32 {q9},[r12,:128]!
+ vst1.32 {q10},[r12,:128]!
+ ldr r9,[sp] @ big RAW stall
+
+.Loop_neon:
+ vext.8 q8,q0,q1,#8
+ bic r10,r6,r4
+ add r7,r7,r9
+ and r11,r5,r4
+ vadd.i32 q13,q3,q14
+ ldr r9,[sp,#4]
+ add r7,r7,r3,ror#27
+ vext.8 q12,q3,q15,#4
+ eor r11,r11,r10
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ veor q8,q8,q0
+ bic r10,r5,r3
+ add r6,r6,r9
+ veor q12,q12,q2
+ and r11,r4,r3
+ ldr r9,[sp,#8]
+ veor q12,q12,q8
+ add r6,r6,r7,ror#27
+ eor r11,r11,r10
+ vst1.32 {q13},[r12,:128]!
+ sub r12,r12,#64
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ vext.8 q13,q15,q12,#4
+ bic r10,r4,r7
+ add r5,r5,r9
+ vadd.i32 q8,q12,q12
+ and r11,r3,r7
+ ldr r9,[sp,#12]
+ vsri.32 q8,q12,#31
+ add r5,r5,r6,ror#27
+ eor r11,r11,r10
+ mov r7,r7,ror#2
+ vshr.u32 q12,q13,#30
+ add r5,r5,r11
+ bic r10,r3,r6
+ vshl.u32 q13,q13,#2
+ add r4,r4,r9
+ and r11,r7,r6
+ veor q8,q8,q12
+ ldr r9,[sp,#16]
+ add r4,r4,r5,ror#27
+ veor q8,q8,q13
+ eor r11,r11,r10
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ vext.8 q9,q1,q2,#8
+ bic r10,r7,r5
+ add r3,r3,r9
+ and r11,r6,r5
+ vadd.i32 q13,q8,q14
+ ldr r9,[sp,#20]
+ vld1.32 {d28[],d29[]},[r8,:32]!
+ add r3,r3,r4,ror#27
+ vext.8 q12,q8,q15,#4
+ eor r11,r11,r10
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ veor q9,q9,q1
+ bic r10,r6,r4
+ add r7,r7,r9
+ veor q12,q12,q3
+ and r11,r5,r4
+ ldr r9,[sp,#24]
+ veor q12,q12,q9
+ add r7,r7,r3,ror#27
+ eor r11,r11,r10
+ vst1.32 {q13},[r12,:128]!
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ vext.8 q13,q15,q12,#4
+ bic r10,r5,r3
+ add r6,r6,r9
+ vadd.i32 q9,q12,q12
+ and r11,r4,r3
+ ldr r9,[sp,#28]
+ vsri.32 q9,q12,#31
+ add r6,r6,r7,ror#27
+ eor r11,r11,r10
+ mov r3,r3,ror#2
+ vshr.u32 q12,q13,#30
+ add r6,r6,r11
+ bic r10,r4,r7
+ vshl.u32 q13,q13,#2
+ add r5,r5,r9
+ and r11,r3,r7
+ veor q9,q9,q12
+ ldr r9,[sp,#32]
+ add r5,r5,r6,ror#27
+ veor q9,q9,q13
+ eor r11,r11,r10
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ vext.8 q10,q2,q3,#8
+ bic r10,r3,r6
+ add r4,r4,r9
+ and r11,r7,r6
+ vadd.i32 q13,q9,q14
+ ldr r9,[sp,#36]
+ add r4,r4,r5,ror#27
+ vext.8 q12,q9,q15,#4
+ eor r11,r11,r10
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ veor q10,q10,q2
+ bic r10,r7,r5
+ add r3,r3,r9
+ veor q12,q12,q8
+ and r11,r6,r5
+ ldr r9,[sp,#40]
+ veor q12,q12,q10
+ add r3,r3,r4,ror#27
+ eor r11,r11,r10
+ vst1.32 {q13},[r12,:128]!
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ vext.8 q13,q15,q12,#4
+ bic r10,r6,r4
+ add r7,r7,r9
+ vadd.i32 q10,q12,q12
+ and r11,r5,r4
+ ldr r9,[sp,#44]
+ vsri.32 q10,q12,#31
+ add r7,r7,r3,ror#27
+ eor r11,r11,r10
+ mov r4,r4,ror#2
+ vshr.u32 q12,q13,#30
+ add r7,r7,r11
+ bic r10,r5,r3
+ vshl.u32 q13,q13,#2
+ add r6,r6,r9
+ and r11,r4,r3
+ veor q10,q10,q12
+ ldr r9,[sp,#48]
+ add r6,r6,r7,ror#27
+ veor q10,q10,q13
+ eor r11,r11,r10
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ vext.8 q11,q3,q8,#8
+ bic r10,r4,r7
+ add r5,r5,r9
+ and r11,r3,r7
+ vadd.i32 q13,q10,q14
+ ldr r9,[sp,#52]
+ add r5,r5,r6,ror#27
+ vext.8 q12,q10,q15,#4
+ eor r11,r11,r10
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ veor q11,q11,q3
+ bic r10,r3,r6
+ add r4,r4,r9
+ veor q12,q12,q9
+ and r11,r7,r6
+ ldr r9,[sp,#56]
+ veor q12,q12,q11
+ add r4,r4,r5,ror#27
+ eor r11,r11,r10
+ vst1.32 {q13},[r12,:128]!
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ vext.8 q13,q15,q12,#4
+ bic r10,r7,r5
+ add r3,r3,r9
+ vadd.i32 q11,q12,q12
+ and r11,r6,r5
+ ldr r9,[sp,#60]
+ vsri.32 q11,q12,#31
+ add r3,r3,r4,ror#27
+ eor r11,r11,r10
+ mov r5,r5,ror#2
+ vshr.u32 q12,q13,#30
+ add r3,r3,r11
+ bic r10,r6,r4
+ vshl.u32 q13,q13,#2
+ add r7,r7,r9
+ and r11,r5,r4
+ veor q11,q11,q12
+ ldr r9,[sp,#0]
+ add r7,r7,r3,ror#27
+ veor q11,q11,q13
+ eor r11,r11,r10
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ vext.8 q12,q10,q11,#8
+ bic r10,r5,r3
+ add r6,r6,r9
+ and r11,r4,r3
+ veor q0,q0,q8
+ ldr r9,[sp,#4]
+ add r6,r6,r7,ror#27
+ veor q0,q0,q1
+ eor r11,r11,r10
+ mov r3,r3,ror#2
+ vadd.i32 q13,q11,q14
+ add r6,r6,r11
+ bic r10,r4,r7
+ veor q12,q12,q0
+ add r5,r5,r9
+ and r11,r3,r7
+ vshr.u32 q0,q12,#30
+ ldr r9,[sp,#8]
+ add r5,r5,r6,ror#27
+ vst1.32 {q13},[r12,:128]!
+ sub r12,r12,#64
+ eor r11,r11,r10
+ mov r7,r7,ror#2
+ vsli.32 q0,q12,#2
+ add r5,r5,r11
+ bic r10,r3,r6
+ add r4,r4,r9
+ and r11,r7,r6
+ ldr r9,[sp,#12]
+ add r4,r4,r5,ror#27
+ eor r11,r11,r10
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ bic r10,r7,r5
+ add r3,r3,r9
+ and r11,r6,r5
+ ldr r9,[sp,#16]
+ add r3,r3,r4,ror#27
+ eor r11,r11,r10
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ vext.8 q12,q11,q0,#8
+ eor r10,r4,r6
+ add r7,r7,r9
+ ldr r9,[sp,#20]
+ veor q1,q1,q9
+ eor r11,r10,r5
+ add r7,r7,r3,ror#27
+ veor q1,q1,q2
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ vadd.i32 q13,q0,q14
+ eor r10,r3,r5
+ add r6,r6,r9
+ veor q12,q12,q1
+ ldr r9,[sp,#24]
+ eor r11,r10,r4
+ vshr.u32 q1,q12,#30
+ add r6,r6,r7,ror#27
+ mov r3,r3,ror#2
+ vst1.32 {q13},[r12,:128]!
+ add r6,r6,r11
+ eor r10,r7,r4
+ vsli.32 q1,q12,#2
+ add r5,r5,r9
+ ldr r9,[sp,#28]
+ eor r11,r10,r3
+ add r5,r5,r6,ror#27
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ eor r10,r6,r3
+ add r4,r4,r9
+ ldr r9,[sp,#32]
+ eor r11,r10,r7
+ add r4,r4,r5,ror#27
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ vext.8 q12,q0,q1,#8
+ eor r10,r5,r7
+ add r3,r3,r9
+ ldr r9,[sp,#36]
+ veor q2,q2,q10
+ eor r11,r10,r6
+ add r3,r3,r4,ror#27
+ veor q2,q2,q3
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ vadd.i32 q13,q1,q14
+ eor r10,r4,r6
+ vld1.32 {d28[],d29[]},[r8,:32]!
+ add r7,r7,r9
+ veor q12,q12,q2
+ ldr r9,[sp,#40]
+ eor r11,r10,r5
+ vshr.u32 q2,q12,#30
+ add r7,r7,r3,ror#27
+ mov r4,r4,ror#2
+ vst1.32 {q13},[r12,:128]!
+ add r7,r7,r11
+ eor r10,r3,r5
+ vsli.32 q2,q12,#2
+ add r6,r6,r9
+ ldr r9,[sp,#44]
+ eor r11,r10,r4
+ add r6,r6,r7,ror#27
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ eor r10,r7,r4
+ add r5,r5,r9
+ ldr r9,[sp,#48]
+ eor r11,r10,r3
+ add r5,r5,r6,ror#27
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ vext.8 q12,q1,q2,#8
+ eor r10,r6,r3
+ add r4,r4,r9
+ ldr r9,[sp,#52]
+ veor q3,q3,q11
+ eor r11,r10,r7
+ add r4,r4,r5,ror#27
+ veor q3,q3,q8
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ vadd.i32 q13,q2,q14
+ eor r10,r5,r7
+ add r3,r3,r9
+ veor q12,q12,q3
+ ldr r9,[sp,#56]
+ eor r11,r10,r6
+ vshr.u32 q3,q12,#30
+ add r3,r3,r4,ror#27
+ mov r5,r5,ror#2
+ vst1.32 {q13},[r12,:128]!
+ add r3,r3,r11
+ eor r10,r4,r6
+ vsli.32 q3,q12,#2
+ add r7,r7,r9
+ ldr r9,[sp,#60]
+ eor r11,r10,r5
+ add r7,r7,r3,ror#27
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ eor r10,r3,r5
+ add r6,r6,r9
+ ldr r9,[sp,#0]
+ eor r11,r10,r4
+ add r6,r6,r7,ror#27
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ vext.8 q12,q2,q3,#8
+ eor r10,r7,r4
+ add r5,r5,r9
+ ldr r9,[sp,#4]
+ veor q8,q8,q0
+ eor r11,r10,r3
+ add r5,r5,r6,ror#27
+ veor q8,q8,q9
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ vadd.i32 q13,q3,q14
+ eor r10,r6,r3
+ add r4,r4,r9
+ veor q12,q12,q8
+ ldr r9,[sp,#8]
+ eor r11,r10,r7
+ vshr.u32 q8,q12,#30
+ add r4,r4,r5,ror#27
+ mov r6,r6,ror#2
+ vst1.32 {q13},[r12,:128]!
+ sub r12,r12,#64
+ add r4,r4,r11
+ eor r10,r5,r7
+ vsli.32 q8,q12,#2
+ add r3,r3,r9
+ ldr r9,[sp,#12]
+ eor r11,r10,r6
+ add r3,r3,r4,ror#27
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ eor r10,r4,r6
+ add r7,r7,r9
+ ldr r9,[sp,#16]
+ eor r11,r10,r5
+ add r7,r7,r3,ror#27
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ vext.8 q12,q3,q8,#8
+ eor r10,r3,r5
+ add r6,r6,r9
+ ldr r9,[sp,#20]
+ veor q9,q9,q1
+ eor r11,r10,r4
+ add r6,r6,r7,ror#27
+ veor q9,q9,q10
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ vadd.i32 q13,q8,q14
+ eor r10,r7,r4
+ add r5,r5,r9
+ veor q12,q12,q9
+ ldr r9,[sp,#24]
+ eor r11,r10,r3
+ vshr.u32 q9,q12,#30
+ add r5,r5,r6,ror#27
+ mov r7,r7,ror#2
+ vst1.32 {q13},[r12,:128]!
+ add r5,r5,r11
+ eor r10,r6,r3
+ vsli.32 q9,q12,#2
+ add r4,r4,r9
+ ldr r9,[sp,#28]
+ eor r11,r10,r7
+ add r4,r4,r5,ror#27
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ eor r10,r5,r7
+ add r3,r3,r9
+ ldr r9,[sp,#32]
+ eor r11,r10,r6
+ add r3,r3,r4,ror#27
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ vext.8 q12,q8,q9,#8
+ add r7,r7,r9
+ and r10,r5,r6
+ ldr r9,[sp,#36]
+ veor q10,q10,q2
+ add r7,r7,r3,ror#27
+ eor r11,r5,r6
+ veor q10,q10,q11
+ add r7,r7,r10
+ and r11,r11,r4
+ vadd.i32 q13,q9,q14
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ veor q12,q12,q10
+ add r6,r6,r9
+ and r10,r4,r5
+ vshr.u32 q10,q12,#30
+ ldr r9,[sp,#40]
+ add r6,r6,r7,ror#27
+ vst1.32 {q13},[r12,:128]!
+ eor r11,r4,r5
+ add r6,r6,r10
+ vsli.32 q10,q12,#2
+ and r11,r11,r3
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ add r5,r5,r9
+ and r10,r3,r4
+ ldr r9,[sp,#44]
+ add r5,r5,r6,ror#27
+ eor r11,r3,r4
+ add r5,r5,r10
+ and r11,r11,r7
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ add r4,r4,r9
+ and r10,r7,r3
+ ldr r9,[sp,#48]
+ add r4,r4,r5,ror#27
+ eor r11,r7,r3
+ add r4,r4,r10
+ and r11,r11,r6
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ vext.8 q12,q9,q10,#8
+ add r3,r3,r9
+ and r10,r6,r7
+ ldr r9,[sp,#52]
+ veor q11,q11,q3
+ add r3,r3,r4,ror#27
+ eor r11,r6,r7
+ veor q11,q11,q0
+ add r3,r3,r10
+ and r11,r11,r5
+ vadd.i32 q13,q10,q14
+ mov r5,r5,ror#2
+ vld1.32 {d28[],d29[]},[r8,:32]!
+ add r3,r3,r11
+ veor q12,q12,q11
+ add r7,r7,r9
+ and r10,r5,r6
+ vshr.u32 q11,q12,#30
+ ldr r9,[sp,#56]
+ add r7,r7,r3,ror#27
+ vst1.32 {q13},[r12,:128]!
+ eor r11,r5,r6
+ add r7,r7,r10
+ vsli.32 q11,q12,#2
+ and r11,r11,r4
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ add r6,r6,r9
+ and r10,r4,r5
+ ldr r9,[sp,#60]
+ add r6,r6,r7,ror#27
+ eor r11,r4,r5
+ add r6,r6,r10
+ and r11,r11,r3
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ add r5,r5,r9
+ and r10,r3,r4
+ ldr r9,[sp,#0]
+ add r5,r5,r6,ror#27
+ eor r11,r3,r4
+ add r5,r5,r10
+ and r11,r11,r7
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ vext.8 q12,q10,q11,#8
+ add r4,r4,r9
+ and r10,r7,r3
+ ldr r9,[sp,#4]
+ veor q0,q0,q8
+ add r4,r4,r5,ror#27
+ eor r11,r7,r3
+ veor q0,q0,q1
+ add r4,r4,r10
+ and r11,r11,r6
+ vadd.i32 q13,q11,q14
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ veor q12,q12,q0
+ add r3,r3,r9
+ and r10,r6,r7
+ vshr.u32 q0,q12,#30
+ ldr r9,[sp,#8]
+ add r3,r3,r4,ror#27
+ vst1.32 {q13},[r12,:128]!
+ sub r12,r12,#64
+ eor r11,r6,r7
+ add r3,r3,r10
+ vsli.32 q0,q12,#2
+ and r11,r11,r5
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ add r7,r7,r9
+ and r10,r5,r6
+ ldr r9,[sp,#12]
+ add r7,r7,r3,ror#27
+ eor r11,r5,r6
+ add r7,r7,r10
+ and r11,r11,r4
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ add r6,r6,r9
+ and r10,r4,r5
+ ldr r9,[sp,#16]
+ add r6,r6,r7,ror#27
+ eor r11,r4,r5
+ add r6,r6,r10
+ and r11,r11,r3
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ vext.8 q12,q11,q0,#8
+ add r5,r5,r9
+ and r10,r3,r4
+ ldr r9,[sp,#20]
+ veor q1,q1,q9
+ add r5,r5,r6,ror#27
+ eor r11,r3,r4
+ veor q1,q1,q2
+ add r5,r5,r10
+ and r11,r11,r7
+ vadd.i32 q13,q0,q14
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ veor q12,q12,q1
+ add r4,r4,r9
+ and r10,r7,r3
+ vshr.u32 q1,q12,#30
+ ldr r9,[sp,#24]
+ add r4,r4,r5,ror#27
+ vst1.32 {q13},[r12,:128]!
+ eor r11,r7,r3
+ add r4,r4,r10
+ vsli.32 q1,q12,#2
+ and r11,r11,r6
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ add r3,r3,r9
+ and r10,r6,r7
+ ldr r9,[sp,#28]
+ add r3,r3,r4,ror#27
+ eor r11,r6,r7
+ add r3,r3,r10
+ and r11,r11,r5
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ add r7,r7,r9
+ and r10,r5,r6
+ ldr r9,[sp,#32]
+ add r7,r7,r3,ror#27
+ eor r11,r5,r6
+ add r7,r7,r10
+ and r11,r11,r4
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ vext.8 q12,q0,q1,#8
+ add r6,r6,r9
+ and r10,r4,r5
+ ldr r9,[sp,#36]
+ veor q2,q2,q10
+ add r6,r6,r7,ror#27
+ eor r11,r4,r5
+ veor q2,q2,q3
+ add r6,r6,r10
+ and r11,r11,r3
+ vadd.i32 q13,q1,q14
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ veor q12,q12,q2
+ add r5,r5,r9
+ and r10,r3,r4
+ vshr.u32 q2,q12,#30
+ ldr r9,[sp,#40]
+ add r5,r5,r6,ror#27
+ vst1.32 {q13},[r12,:128]!
+ eor r11,r3,r4
+ add r5,r5,r10
+ vsli.32 q2,q12,#2
+ and r11,r11,r7
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ add r4,r4,r9
+ and r10,r7,r3
+ ldr r9,[sp,#44]
+ add r4,r4,r5,ror#27
+ eor r11,r7,r3
+ add r4,r4,r10
+ and r11,r11,r6
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ add r3,r3,r9
+ and r10,r6,r7
+ ldr r9,[sp,#48]
+ add r3,r3,r4,ror#27
+ eor r11,r6,r7
+ add r3,r3,r10
+ and r11,r11,r5
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ vext.8 q12,q1,q2,#8
+ eor r10,r4,r6
+ add r7,r7,r9
+ ldr r9,[sp,#52]
+ veor q3,q3,q11
+ eor r11,r10,r5
+ add r7,r7,r3,ror#27
+ veor q3,q3,q8
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ vadd.i32 q13,q2,q14
+ eor r10,r3,r5
+ add r6,r6,r9
+ veor q12,q12,q3
+ ldr r9,[sp,#56]
+ eor r11,r10,r4
+ vshr.u32 q3,q12,#30
+ add r6,r6,r7,ror#27
+ mov r3,r3,ror#2
+ vst1.32 {q13},[r12,:128]!
+ add r6,r6,r11
+ eor r10,r7,r4
+ vsli.32 q3,q12,#2
+ add r5,r5,r9
+ ldr r9,[sp,#60]
+ eor r11,r10,r3
+ add r5,r5,r6,ror#27
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ eor r10,r6,r3
+ add r4,r4,r9
+ ldr r9,[sp,#0]
+ eor r11,r10,r7
+ add r4,r4,r5,ror#27
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ vadd.i32 q13,q3,q14
+ eor r10,r5,r7
+ add r3,r3,r9
+ vst1.32 {q13},[r12,:128]!
+ sub r12,r12,#64
+ teq r1,r2
+ sub r8,r8,#16
+ subeq r1,r1,#64
+ vld1.8 {q0-q1},[r1]!
+ ldr r9,[sp,#4]
+ eor r11,r10,r6
+ vld1.8 {q2-q3},[r1]!
+ add r3,r3,r4,ror#27
+ mov r5,r5,ror#2
+ vld1.32 {d28[],d29[]},[r8,:32]!
+ add r3,r3,r11
+ eor r10,r4,r6
+ vrev32.8 q0,q0
+ add r7,r7,r9
+ ldr r9,[sp,#8]
+ eor r11,r10,r5
+ add r7,r7,r3,ror#27
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ eor r10,r3,r5
+ add r6,r6,r9
+ ldr r9,[sp,#12]
+ eor r11,r10,r4
+ add r6,r6,r7,ror#27
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ eor r10,r7,r4
+ add r5,r5,r9
+ ldr r9,[sp,#16]
+ eor r11,r10,r3
+ add r5,r5,r6,ror#27
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ vrev32.8 q1,q1
+ eor r10,r6,r3
+ add r4,r4,r9
+ vadd.i32 q8,q0,q14
+ ldr r9,[sp,#20]
+ eor r11,r10,r7
+ vst1.32 {q8},[r12,:128]!
+ add r4,r4,r5,ror#27
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ eor r10,r5,r7
+ add r3,r3,r9
+ ldr r9,[sp,#24]
+ eor r11,r10,r6
+ add r3,r3,r4,ror#27
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ eor r10,r4,r6
+ add r7,r7,r9
+ ldr r9,[sp,#28]
+ eor r11,r10,r5
+ add r7,r7,r3,ror#27
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ eor r10,r3,r5
+ add r6,r6,r9
+ ldr r9,[sp,#32]
+ eor r11,r10,r4
+ add r6,r6,r7,ror#27
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ vrev32.8 q2,q2
+ eor r10,r7,r4
+ add r5,r5,r9
+ vadd.i32 q9,q1,q14
+ ldr r9,[sp,#36]
+ eor r11,r10,r3
+ vst1.32 {q9},[r12,:128]!
+ add r5,r5,r6,ror#27
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ eor r10,r6,r3
+ add r4,r4,r9
+ ldr r9,[sp,#40]
+ eor r11,r10,r7
+ add r4,r4,r5,ror#27
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ eor r10,r5,r7
+ add r3,r3,r9
+ ldr r9,[sp,#44]
+ eor r11,r10,r6
+ add r3,r3,r4,ror#27
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ eor r10,r4,r6
+ add r7,r7,r9
+ ldr r9,[sp,#48]
+ eor r11,r10,r5
+ add r7,r7,r3,ror#27
+ mov r4,r4,ror#2
+ add r7,r7,r11
+ vrev32.8 q3,q3
+ eor r10,r3,r5
+ add r6,r6,r9
+ vadd.i32 q10,q2,q14
+ ldr r9,[sp,#52]
+ eor r11,r10,r4
+ vst1.32 {q10},[r12,:128]!
+ add r6,r6,r7,ror#27
+ mov r3,r3,ror#2
+ add r6,r6,r11
+ eor r10,r7,r4
+ add r5,r5,r9
+ ldr r9,[sp,#56]
+ eor r11,r10,r3
+ add r5,r5,r6,ror#27
+ mov r7,r7,ror#2
+ add r5,r5,r11
+ eor r10,r6,r3
+ add r4,r4,r9
+ ldr r9,[sp,#60]
+ eor r11,r10,r7
+ add r4,r4,r5,ror#27
+ mov r6,r6,ror#2
+ add r4,r4,r11
+ eor r10,r5,r7
+ add r3,r3,r9
+ eor r11,r10,r6
+ add r3,r3,r4,ror#27
+ mov r5,r5,ror#2
+ add r3,r3,r11
+ ldmia r0,{r9,r10,r11,r12} @ accumulate context
+ add r3,r3,r9
+ ldr r9,[r0,#16]
+ add r4,r4,r10
+ add r5,r5,r11
+ add r6,r6,r12
+ moveq sp,r14
+ add r7,r7,r9
+ ldrne r9,[sp]
+ stmia r0,{r3,r4,r5,r6,r7}
+ addne r12,sp,#3*16
+ bne .Loop_neon
+
+ @ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r12,pc}
+.size sha1_block_data_order_neon,.-sha1_block_data_order_neon
+#endif
+#if __ARM_ARCH__>=7
+.type sha1_block_data_order_armv8,%function
+.align 5
+sha1_block_data_order_armv8:
+.LARMv8:
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ veor q1,q1,q1
+ adr r3,.LK_00_19
+ vld1.32 {q0},[r0]!
+ vld1.32 {d2[0]},[r0]
+ sub r0,r0,#16
+ vld1.32 {d16[],d17[]},[r3,:32]!
+ vld1.32 {d18[],d19[]},[r3,:32]!
+ vld1.32 {d20[],d21[]},[r3,:32]!
+ vld1.32 {d22[],d23[]},[r3,:32]
+
+.Loop_v8:
+ vld1.8 {q4-q5},[r1]!
+ vld1.8 {q6-q7},[r1]!
+ vrev32.8 q4,q4
+ vrev32.8 q5,q5
+
+ vadd.i32 q12,q8,q4
+ vrev32.8 q6,q6
+ vmov q14,q0 @ offload
+ subs r2,r2,#1
+
+ vadd.i32 q13,q8,q5
+ vrev32.8 q7,q7
+ .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 0
+ .byte 0x68,0x0c,0x02,0xf2 @ sha1c q0,q1,q12
+ vadd.i32 q12,q8,q6
+ .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
+ .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 1
+ .byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13
+ vadd.i32 q13,q8,q7
+ .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
+ .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
+ .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 2
+ .byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12
+ vadd.i32 q12,q8,q4
+ .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
+ .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
+ .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 3
+ .byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13
+ vadd.i32 q13,q9,q5
+ .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
+ .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
+ .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 4
+ .byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12
+ vadd.i32 q12,q9,q6
+ .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
+ .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
+ .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 5
+ .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+ vadd.i32 q13,q9,q7
+ .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
+ .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
+ .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 6
+ .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
+ vadd.i32 q12,q9,q4
+ .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
+ .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
+ .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 7
+ .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+ vadd.i32 q13,q9,q5
+ .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
+ .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
+ .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 8
+ .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
+ vadd.i32 q12,q10,q6
+ .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
+ .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
+ .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 9
+ .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+ vadd.i32 q13,q10,q7
+ .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
+ .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
+ .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 10
+ .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
+ vadd.i32 q12,q10,q4
+ .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
+ .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
+ .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 11
+ .byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13
+ vadd.i32 q13,q10,q5
+ .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
+ .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
+ .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 12
+ .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
+ vadd.i32 q12,q10,q6
+ .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
+ .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
+ .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 13
+ .byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13
+ vadd.i32 q13,q11,q7
+ .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
+ .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
+ .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 14
+ .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
+ vadd.i32 q12,q11,q4
+ .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
+ .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
+ .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 15
+ .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+ vadd.i32 q13,q11,q5
+ .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
+ .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
+ .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 16
+ .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
+ vadd.i32 q12,q11,q6
+ .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
+ .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 17
+ .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+ vadd.i32 q13,q11,q7
+
+ .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 18
+ .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
+
+ .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 19
+ .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+
+ vadd.i32 q1,q1,q2
+ vadd.i32 q0,q0,q14
+ bne .Loop_v8
+
+ vst1.32 {q0},[r0]!
+ vst1.32 {d2[0]},[r0]
+
+ vldmia sp!,{d8-d15}
+ bx lr @ bx lr
+.size sha1_block_data_order_armv8,.-sha1_block_data_order_armv8
+#endif
+.comm OPENSSL_armcap_P,4,4
diff --git a/app/openssl/crypto/sha/asm/sha1-armv4-large.pl b/app/openssl/crypto/sha/asm/sha1-armv4-large.pl
index 33da3e0e..50bd07b3 100644
--- a/app/openssl/crypto/sha/asm/sha1-armv4-large.pl
+++ b/app/openssl/crypto/sha/asm/sha1-armv4-large.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -52,6 +52,20 @@
# Profiler-assisted and platform-specific optimization resulted in 10%
# improvement on Cortex A8 core and 12.2 cycles per byte.
+# September 2013.
+#
+# Add NEON implementation (see sha1-586.pl for background info). On
+# Cortex A8 it was measured to process one byte in 6.7 cycles or >80%
+# faster than integer-only code. Because [fully unrolled] NEON code
+# is ~2.5x larger and there are some redundant instructions executed
+# when processing last block, improvement is not as big for smallest
+# blocks, only ~30%. Snapdragon S4 is a tad faster, 6.4 cycles per
+# byte, which is also >80% faster than integer-only code.
+
+# May 2014.
+#
+# Add ARMv8 code path performing at 2.35 cpb on Apple A7.
+
while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
@@ -153,12 +167,22 @@ $code=<<___;
#include "arm_arch.h"
.text
+.code 32
.global sha1_block_data_order
.type sha1_block_data_order,%function
-.align 2
+.align 5
sha1_block_data_order:
+#if __ARM_ARCH__>=7
+ sub r3,pc,#8 @ sha1_block_data_order
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#ARMV8_SHA1
+ bne .LARMv8
+ tst r12,#ARMV7_NEON
+ bne .LNEON
+#endif
stmdb sp!,{r4-r12,lr}
add $len,$inp,$len,lsl#6 @ $len to point at the end of $inp
ldmia $ctx,{$a,$b,$c,$d,$e}
@@ -233,16 +257,422 @@ $code.=<<___;
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
#endif
-.align 2
+.size sha1_block_data_order,.-sha1_block_data_order
+
+.align 5
.LK_00_19: .word 0x5a827999
.LK_20_39: .word 0x6ed9eba1
.LK_40_59: .word 0x8f1bbcdc
.LK_60_79: .word 0xca62c1d6
-.size sha1_block_data_order,.-sha1_block_data_order
-.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
-.align 2
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha1_block_data_order
+.asciz "SHA1 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align 5
+___
+#####################################################################
+# NEON stuff
+#
+{{{
+my @V=($a,$b,$c,$d,$e);
+my ($K_XX_XX,$Ki,$t0,$t1,$Xfer,$saved_sp)=map("r$_",(8..12,14));
+my $Xi=4;
+my @X=map("q$_",(8..11,0..3));
+my @Tx=("q12","q13");
+my ($K,$zero)=("q14","q15");
+my $j=0;
+
+sub AUTOLOAD() # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+ my $arg = pop;
+ $arg = "#$arg" if ($arg*1 eq $arg);
+ $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+sub body_00_19 () {
+ (
+ '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'.
+ '&bic ($t0,$d,$b)',
+ '&add ($e,$e,$Ki)', # e+=X[i]+K
+ '&and ($t1,$c,$b)',
+ '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))',
+ '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27)
+ '&eor ($t1,$t1,$t0)', # F_00_19
+ '&mov ($b,$b,"ror#2")', # b=ROR(b,2)
+ '&add ($e,$e,$t1);'. # e+=F_00_19
+ '$j++; unshift(@V,pop(@V));'
+ )
+}
+sub body_20_39 () {
+ (
+ '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'.
+ '&eor ($t0,$b,$d)',
+ '&add ($e,$e,$Ki)', # e+=X[i]+K
+ '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15)) if ($j<79)',
+ '&eor ($t1,$t0,$c)', # F_20_39
+ '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27)
+ '&mov ($b,$b,"ror#2")', # b=ROR(b,2)
+ '&add ($e,$e,$t1);'. # e+=F_20_39
+ '$j++; unshift(@V,pop(@V));'
+ )
+}
+sub body_40_59 () {
+ (
+ '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'.
+ '&add ($e,$e,$Ki)', # e+=X[i]+K
+ '&and ($t0,$c,$d)',
+ '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))',
+ '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27)
+ '&eor ($t1,$c,$d)',
+ '&add ($e,$e,$t0)',
+ '&and ($t1,$t1,$b)',
+ '&mov ($b,$b,"ror#2")', # b=ROR(b,2)
+ '&add ($e,$e,$t1);'. # e+=F_40_59
+ '$j++; unshift(@V,pop(@V));'
+ )
+}
+
+sub Xupdate_16_31 ()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e);
+
+ &vext_8 (@X[0],@X[-4&7],@X[-3&7],8); # compose "X[-14]" in "X[0]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@Tx[1],@X[-1&7],$K);
+ eval(shift(@insns));
+ &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!") if ($Xi%5==0);
+ eval(shift(@insns));
+ &vext_8 (@Tx[0],@X[-1&7],$zero,4); # "X[-3]", 3 words
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@Tx[0],@Tx[0],@X[0]); # "X[0]"^="X[-3]"^"X[-8]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!"); # X[]+K xfer
+ &sub ($Xfer,$Xfer,64) if ($Xi%4==0);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vext_8 (@Tx[1],$zero,@Tx[0],4); # "X[0]"<<96, extract one dword
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@X[0],@Tx[0],@Tx[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsri_32 (@X[0],@Tx[0],31); # "X[0]"<<<=1
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 (@Tx[0],@Tx[1],30);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshl_u32 (@Tx[1],@Tx[1],2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@X[0],@X[0],@Tx[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@X[0],@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2
+
+ foreach (@insns) { eval; } # remaining instructions [if any]
+
+ $Xi++; push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub Xupdate_32_79 ()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e);
+
+ &vext_8 (@Tx[0],@X[-2&7],@X[-1&7],8); # compose "X[-6]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@Tx[1],@X[-1&7],$K);
+ eval(shift(@insns));
+ &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!") if ($Xi%5==0);
+ eval(shift(@insns));
+ &veor (@Tx[0],@Tx[0],@X[0]); # "X[-6]"^="X[0]"
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 (@X[0],@Tx[0],30);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!"); # X[]+K xfer
+ &sub ($Xfer,$Xfer,64) if ($Xi%4==0);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 (@X[0],@Tx[0],2); # "X[0]"="X[-6]"<<<2
+
+ foreach (@insns) { eval; } # remaining instructions [if any]
+
+ $Xi++; push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub Xuplast_80 ()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e);
+
+ &vadd_i32 (@Tx[1],@X[-1&7],$K);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!");
+ &sub ($Xfer,$Xfer,64);
+
+ &teq ($inp,$len);
+ &sub ($K_XX_XX,$K_XX_XX,16); # rewind $K_XX_XX
+ &subeq ($inp,$inp,64); # reload last block to avoid SEGV
+ &vld1_8 ("{@X[-4&7]-@X[-3&7]}","[$inp]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vld1_8 ("{@X[-2&7]-@X[-1&7]}","[$inp]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!"); # load K_00_19
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vrev32_8 (@X[-4&7],@X[-4&7]);
+
+ foreach (@insns) { eval; } # remaining instructions
+
+ $Xi=0;
+}
+
+sub Xloop()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e);
+
+ &vrev32_8 (@X[($Xi-3)&7],@X[($Xi-3)&7]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@X[$Xi&7],@X[($Xi-4)&7],$K);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vst1_32 ("{@X[$Xi&7]}","[$Xfer,:128]!");# X[]+K xfer to IALU
+
+ foreach (@insns) { eval; }
+
+ $Xi++;
+}
+
+$code.=<<___;
+#if __ARM_ARCH__>=7
+.fpu neon
+
+.type sha1_block_data_order_neon,%function
+.align 4
+sha1_block_data_order_neon:
+.LNEON:
+ stmdb sp!,{r4-r12,lr}
+ add $len,$inp,$len,lsl#6 @ $len to point at the end of $inp
+ @ dmb @ errata #451034 on early Cortex A8
+ @ vstmdb sp!,{d8-d15} @ ABI specification says so
+ mov $saved_sp,sp
+ sub sp,sp,#64 @ alloca
+ adr $K_XX_XX,.LK_00_19
+ bic sp,sp,#15 @ align for 128-bit stores
+
+ ldmia $ctx,{$a,$b,$c,$d,$e} @ load context
+ mov $Xfer,sp
+
+ vld1.8 {@X[-4&7]-@X[-3&7]},[$inp]! @ handles unaligned
+ veor $zero,$zero,$zero
+ vld1.8 {@X[-2&7]-@X[-1&7]},[$inp]!
+ vld1.32 {${K}\[]},[$K_XX_XX,:32]! @ load K_00_19
+ vrev32.8 @X[-4&7],@X[-4&7] @ yes, even on
+ vrev32.8 @X[-3&7],@X[-3&7] @ big-endian...
+ vrev32.8 @X[-2&7],@X[-2&7]
+ vadd.i32 @X[0],@X[-4&7],$K
+ vrev32.8 @X[-1&7],@X[-1&7]
+ vadd.i32 @X[1],@X[-3&7],$K
+ vst1.32 {@X[0]},[$Xfer,:128]!
+ vadd.i32 @X[2],@X[-2&7],$K
+ vst1.32 {@X[1]},[$Xfer,:128]!
+ vst1.32 {@X[2]},[$Xfer,:128]!
+ ldr $Ki,[sp] @ big RAW stall
+
+.Loop_neon:
+___
+ &Xupdate_16_31(\&body_00_19);
+ &Xupdate_16_31(\&body_00_19);
+ &Xupdate_16_31(\&body_00_19);
+ &Xupdate_16_31(\&body_00_19);
+ &Xupdate_32_79(\&body_00_19);
+ &Xupdate_32_79(\&body_20_39);
+ &Xupdate_32_79(\&body_20_39);
+ &Xupdate_32_79(\&body_20_39);
+ &Xupdate_32_79(\&body_20_39);
+ &Xupdate_32_79(\&body_20_39);
+ &Xupdate_32_79(\&body_40_59);
+ &Xupdate_32_79(\&body_40_59);
+ &Xupdate_32_79(\&body_40_59);
+ &Xupdate_32_79(\&body_40_59);
+ &Xupdate_32_79(\&body_40_59);
+ &Xupdate_32_79(\&body_20_39);
+ &Xuplast_80(\&body_20_39);
+ &Xloop(\&body_20_39);
+ &Xloop(\&body_20_39);
+ &Xloop(\&body_20_39);
+$code.=<<___;
+ ldmia $ctx,{$Ki,$t0,$t1,$Xfer} @ accumulate context
+ add $a,$a,$Ki
+ ldr $Ki,[$ctx,#16]
+ add $b,$b,$t0
+ add $c,$c,$t1
+ add $d,$d,$Xfer
+ moveq sp,$saved_sp
+ add $e,$e,$Ki
+ ldrne $Ki,[sp]
+ stmia $ctx,{$a,$b,$c,$d,$e}
+ addne $Xfer,sp,#3*16
+ bne .Loop_neon
+
+ @ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r12,pc}
+.size sha1_block_data_order_neon,.-sha1_block_data_order_neon
+#endif
+___
+}}}
+#####################################################################
+# ARMv8 stuff
+#
+{{{
+my ($ABCD,$E,$E0,$E1)=map("q$_",(0..3));
+my @MSG=map("q$_",(4..7));
+my @Kxx=map("q$_",(8..11));
+my ($W0,$W1,$ABCD_SAVE)=map("q$_",(12..14));
+
+$code.=<<___;
+#if __ARM_ARCH__>=7
+.type sha1_block_data_order_armv8,%function
+.align 5
+sha1_block_data_order_armv8:
+.LARMv8:
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ veor $E,$E,$E
+ adr r3,.LK_00_19
+ vld1.32 {$ABCD},[$ctx]!
+ vld1.32 {$E\[0]},[$ctx]
+ sub $ctx,$ctx,#16
+ vld1.32 {@Kxx[0]\[]},[r3,:32]!
+ vld1.32 {@Kxx[1]\[]},[r3,:32]!
+ vld1.32 {@Kxx[2]\[]},[r3,:32]!
+ vld1.32 {@Kxx[3]\[]},[r3,:32]
+
+.Loop_v8:
+ vld1.8 {@MSG[0]-@MSG[1]},[$inp]!
+ vld1.8 {@MSG[2]-@MSG[3]},[$inp]!
+ vrev32.8 @MSG[0],@MSG[0]
+ vrev32.8 @MSG[1],@MSG[1]
+
+ vadd.i32 $W0,@Kxx[0],@MSG[0]
+ vrev32.8 @MSG[2],@MSG[2]
+ vmov $ABCD_SAVE,$ABCD @ offload
+ subs $len,$len,#1
+
+ vadd.i32 $W1,@Kxx[0],@MSG[1]
+ vrev32.8 @MSG[3],@MSG[3]
+ sha1h $E1,$ABCD @ 0
+ sha1c $ABCD,$E,$W0
+ vadd.i32 $W0,@Kxx[$j],@MSG[2]
+ sha1su0 @MSG[0],@MSG[1],@MSG[2]
+___
+for ($j=0,$i=1;$i<20-3;$i++) {
+my $f=("c","p","m","p")[$i/5];
+$code.=<<___;
+ sha1h $E0,$ABCD @ $i
+ sha1$f $ABCD,$E1,$W1
+ vadd.i32 $W1,@Kxx[$j],@MSG[3]
+ sha1su1 @MSG[0],@MSG[3]
+___
+$code.=<<___ if ($i<20-4);
+ sha1su0 @MSG[1],@MSG[2],@MSG[3]
___
+ ($E0,$E1)=($E1,$E0); ($W0,$W1)=($W1,$W0);
+ push(@MSG,shift(@MSG)); $j++ if ((($i+3)%5)==0);
+}
+$code.=<<___;
+ sha1h $E0,$ABCD @ $i
+ sha1p $ABCD,$E1,$W1
+ vadd.i32 $W1,@Kxx[$j],@MSG[3]
+
+ sha1h $E1,$ABCD @ 18
+ sha1p $ABCD,$E0,$W0
+
+ sha1h $E0,$ABCD @ 19
+ sha1p $ABCD,$E1,$W1
+
+ vadd.i32 $E,$E,$E0
+ vadd.i32 $ABCD,$ABCD,$ABCD_SAVE
+ bne .Loop_v8
+
+ vst1.32 {$ABCD},[$ctx]!
+ vst1.32 {$E\[0]},[$ctx]
+
+ vldmia sp!,{d8-d15}
+ ret @ bx lr
+.size sha1_block_data_order_armv8,.-sha1_block_data_order_armv8
+#endif
+___
+}}}
+$code.=<<___;
+.comm OPENSSL_armcap_P,4,4
+___
+
+{ my %opcode = (
+ "sha1c" => 0xf2000c40, "sha1p" => 0xf2100c40,
+ "sha1m" => 0xf2200c40, "sha1su0" => 0xf2300c40,
+ "sha1h" => 0xf3b902c0, "sha1su1" => 0xf3ba0380 );
+
+ sub unsha1 {
+ my ($mnemonic,$arg)=@_;
+
+ if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
+ my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
+ |(($2&7)<<17)|(($2&8)<<4)
+ |(($3&7)<<1) |(($3&8)<<2);
+ # since ARMv7 instructions are always encoded little-endian.
+ # correct solution is to use .inst directive, but older
+ # assemblers don't implement it:-(
+ sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
+ $word&0xff,($word>>8)&0xff,
+ ($word>>16)&0xff,($word>>24)&0xff,
+ $mnemonic,$arg;
+ }
+ }
+}
+
+foreach (split($/,$code)) {
+ s/{q([0-9]+)\[\]}/sprintf "{d%d[],d%d[]}",2*$1,2*$1+1/eo or
+ s/{q([0-9]+)\[0\]}/sprintf "{d%d[0]}",2*$1/eo;
+
+ s/\b(sha1\w+)\s+(q.*)/unsha1($1,$2)/geo;
+
+ s/\bret\b/bx lr/o or
+ s/\bbx\s+lr\b/.word\t0xe12fff1e/o; # make it possible to compile with -march=armv4
+
+ print $_,$/;
+}
-$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
-print $code;
close STDOUT; # enforce flush
diff --git a/app/openssl/crypto/sha/asm/sha1-armv4-large.s b/app/openssl/crypto/sha/asm/sha1-armv4-large.s
deleted file mode 100644
index 639ae78a..00000000
--- a/app/openssl/crypto/sha/asm/sha1-armv4-large.s
+++ /dev/null
@@ -1,452 +0,0 @@
-#include "arm_arch.h"
-
-.text
-
-.global sha1_block_data_order
-.type sha1_block_data_order,%function
-
-.align 2
-sha1_block_data_order:
- stmdb sp!,{r4-r12,lr}
- add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
- ldmia r0,{r3,r4,r5,r6,r7}
-.Lloop:
- ldr r8,.LK_00_19
- mov r14,sp
- sub sp,sp,#15*4
- mov r5,r5,ror#30
- mov r6,r6,ror#30
- mov r7,r7,ror#30 @ [6]
-.L_00_15:
-#if __ARM_ARCH__<7
- ldrb r10,[r1,#2]
- ldrb r9,[r1,#3]
- ldrb r11,[r1,#1]
- add r7,r8,r7,ror#2 @ E+=K_00_19
- ldrb r12,[r1],#4
- orr r9,r9,r10,lsl#8
- eor r10,r5,r6 @ F_xx_xx
- orr r9,r9,r11,lsl#16
- add r7,r7,r3,ror#27 @ E+=ROR(A,27)
- orr r9,r9,r12,lsl#24
-#else
- ldr r9,[r1],#4 @ handles unaligned
- add r7,r8,r7,ror#2 @ E+=K_00_19
- eor r10,r5,r6 @ F_xx_xx
- add r7,r7,r3,ror#27 @ E+=ROR(A,27)
-#ifdef __ARMEL__
- rev r9,r9 @ byte swap
-#endif
-#endif
- and r10,r4,r10,ror#2
- add r7,r7,r9 @ E+=X[i]
- eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
- str r9,[r14,#-4]!
- add r7,r7,r10 @ E+=F_00_19(B,C,D)
-#if __ARM_ARCH__<7
- ldrb r10,[r1,#2]
- ldrb r9,[r1,#3]
- ldrb r11,[r1,#1]
- add r6,r8,r6,ror#2 @ E+=K_00_19
- ldrb r12,[r1],#4
- orr r9,r9,r10,lsl#8
- eor r10,r4,r5 @ F_xx_xx
- orr r9,r9,r11,lsl#16
- add r6,r6,r7,ror#27 @ E+=ROR(A,27)
- orr r9,r9,r12,lsl#24
-#else
- ldr r9,[r1],#4 @ handles unaligned
- add r6,r8,r6,ror#2 @ E+=K_00_19
- eor r10,r4,r5 @ F_xx_xx
- add r6,r6,r7,ror#27 @ E+=ROR(A,27)
-#ifdef __ARMEL__
- rev r9,r9 @ byte swap
-#endif
-#endif
- and r10,r3,r10,ror#2
- add r6,r6,r9 @ E+=X[i]
- eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
- str r9,[r14,#-4]!
- add r6,r6,r10 @ E+=F_00_19(B,C,D)
-#if __ARM_ARCH__<7
- ldrb r10,[r1,#2]
- ldrb r9,[r1,#3]
- ldrb r11,[r1,#1]
- add r5,r8,r5,ror#2 @ E+=K_00_19
- ldrb r12,[r1],#4
- orr r9,r9,r10,lsl#8
- eor r10,r3,r4 @ F_xx_xx
- orr r9,r9,r11,lsl#16
- add r5,r5,r6,ror#27 @ E+=ROR(A,27)
- orr r9,r9,r12,lsl#24
-#else
- ldr r9,[r1],#4 @ handles unaligned
- add r5,r8,r5,ror#2 @ E+=K_00_19
- eor r10,r3,r4 @ F_xx_xx
- add r5,r5,r6,ror#27 @ E+=ROR(A,27)
-#ifdef __ARMEL__
- rev r9,r9 @ byte swap
-#endif
-#endif
- and r10,r7,r10,ror#2
- add r5,r5,r9 @ E+=X[i]
- eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
- str r9,[r14,#-4]!
- add r5,r5,r10 @ E+=F_00_19(B,C,D)
-#if __ARM_ARCH__<7
- ldrb r10,[r1,#2]
- ldrb r9,[r1,#3]
- ldrb r11,[r1,#1]
- add r4,r8,r4,ror#2 @ E+=K_00_19
- ldrb r12,[r1],#4
- orr r9,r9,r10,lsl#8
- eor r10,r7,r3 @ F_xx_xx
- orr r9,r9,r11,lsl#16
- add r4,r4,r5,ror#27 @ E+=ROR(A,27)
- orr r9,r9,r12,lsl#24
-#else
- ldr r9,[r1],#4 @ handles unaligned
- add r4,r8,r4,ror#2 @ E+=K_00_19
- eor r10,r7,r3 @ F_xx_xx
- add r4,r4,r5,ror#27 @ E+=ROR(A,27)
-#ifdef __ARMEL__
- rev r9,r9 @ byte swap
-#endif
-#endif
- and r10,r6,r10,ror#2
- add r4,r4,r9 @ E+=X[i]
- eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
- str r9,[r14,#-4]!
- add r4,r4,r10 @ E+=F_00_19(B,C,D)
-#if __ARM_ARCH__<7
- ldrb r10,[r1,#2]
- ldrb r9,[r1,#3]
- ldrb r11,[r1,#1]
- add r3,r8,r3,ror#2 @ E+=K_00_19
- ldrb r12,[r1],#4
- orr r9,r9,r10,lsl#8
- eor r10,r6,r7 @ F_xx_xx
- orr r9,r9,r11,lsl#16
- add r3,r3,r4,ror#27 @ E+=ROR(A,27)
- orr r9,r9,r12,lsl#24
-#else
- ldr r9,[r1],#4 @ handles unaligned
- add r3,r8,r3,ror#2 @ E+=K_00_19
- eor r10,r6,r7 @ F_xx_xx
- add r3,r3,r4,ror#27 @ E+=ROR(A,27)
-#ifdef __ARMEL__
- rev r9,r9 @ byte swap
-#endif
-#endif
- and r10,r5,r10,ror#2
- add r3,r3,r9 @ E+=X[i]
- eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
- str r9,[r14,#-4]!
- add r3,r3,r10 @ E+=F_00_19(B,C,D)
- teq r14,sp
- bne .L_00_15 @ [((11+4)*5+2)*3]
- sub sp,sp,#25*4
-#if __ARM_ARCH__<7
- ldrb r10,[r1,#2]
- ldrb r9,[r1,#3]
- ldrb r11,[r1,#1]
- add r7,r8,r7,ror#2 @ E+=K_00_19
- ldrb r12,[r1],#4
- orr r9,r9,r10,lsl#8
- eor r10,r5,r6 @ F_xx_xx
- orr r9,r9,r11,lsl#16
- add r7,r7,r3,ror#27 @ E+=ROR(A,27)
- orr r9,r9,r12,lsl#24
-#else
- ldr r9,[r1],#4 @ handles unaligned
- add r7,r8,r7,ror#2 @ E+=K_00_19
- eor r10,r5,r6 @ F_xx_xx
- add r7,r7,r3,ror#27 @ E+=ROR(A,27)
-#ifdef __ARMEL__
- rev r9,r9 @ byte swap
-#endif
-#endif
- and r10,r4,r10,ror#2
- add r7,r7,r9 @ E+=X[i]
- eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
- str r9,[r14,#-4]!
- add r7,r7,r10 @ E+=F_00_19(B,C,D)
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r6,r8,r6,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r4,r5 @ F_xx_xx
- mov r9,r9,ror#31
- add r6,r6,r7,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- and r10,r3,r10,ror#2 @ F_xx_xx
- @ F_xx_xx
- add r6,r6,r9 @ E+=X[i]
- eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
- add r6,r6,r10 @ E+=F_00_19(B,C,D)
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r5,r8,r5,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r3,r4 @ F_xx_xx
- mov r9,r9,ror#31
- add r5,r5,r6,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- and r10,r7,r10,ror#2 @ F_xx_xx
- @ F_xx_xx
- add r5,r5,r9 @ E+=X[i]
- eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
- add r5,r5,r10 @ E+=F_00_19(B,C,D)
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r4,r8,r4,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r7,r3 @ F_xx_xx
- mov r9,r9,ror#31
- add r4,r4,r5,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- and r10,r6,r10,ror#2 @ F_xx_xx
- @ F_xx_xx
- add r4,r4,r9 @ E+=X[i]
- eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
- add r4,r4,r10 @ E+=F_00_19(B,C,D)
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r3,r8,r3,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r6,r7 @ F_xx_xx
- mov r9,r9,ror#31
- add r3,r3,r4,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- and r10,r5,r10,ror#2 @ F_xx_xx
- @ F_xx_xx
- add r3,r3,r9 @ E+=X[i]
- eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
- add r3,r3,r10 @ E+=F_00_19(B,C,D)
-
- ldr r8,.LK_20_39 @ [+15+16*4]
- cmn sp,#0 @ [+3], clear carry to denote 20_39
-.L_20_39_or_60_79:
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r7,r8,r7,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r5,r6 @ F_xx_xx
- mov r9,r9,ror#31
- add r7,r7,r3,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- eor r10,r4,r10,ror#2 @ F_xx_xx
- @ F_xx_xx
- add r7,r7,r9 @ E+=X[i]
- add r7,r7,r10 @ E+=F_20_39(B,C,D)
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r6,r8,r6,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r4,r5 @ F_xx_xx
- mov r9,r9,ror#31
- add r6,r6,r7,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- eor r10,r3,r10,ror#2 @ F_xx_xx
- @ F_xx_xx
- add r6,r6,r9 @ E+=X[i]
- add r6,r6,r10 @ E+=F_20_39(B,C,D)
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r5,r8,r5,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r3,r4 @ F_xx_xx
- mov r9,r9,ror#31
- add r5,r5,r6,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- eor r10,r7,r10,ror#2 @ F_xx_xx
- @ F_xx_xx
- add r5,r5,r9 @ E+=X[i]
- add r5,r5,r10 @ E+=F_20_39(B,C,D)
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r4,r8,r4,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r7,r3 @ F_xx_xx
- mov r9,r9,ror#31
- add r4,r4,r5,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- eor r10,r6,r10,ror#2 @ F_xx_xx
- @ F_xx_xx
- add r4,r4,r9 @ E+=X[i]
- add r4,r4,r10 @ E+=F_20_39(B,C,D)
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r3,r8,r3,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r6,r7 @ F_xx_xx
- mov r9,r9,ror#31
- add r3,r3,r4,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- eor r10,r5,r10,ror#2 @ F_xx_xx
- @ F_xx_xx
- add r3,r3,r9 @ E+=X[i]
- add r3,r3,r10 @ E+=F_20_39(B,C,D)
- teq r14,sp @ preserve carry
- bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
- bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
-
- ldr r8,.LK_40_59
- sub sp,sp,#20*4 @ [+2]
-.L_40_59:
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r7,r8,r7,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r5,r6 @ F_xx_xx
- mov r9,r9,ror#31
- add r7,r7,r3,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- and r10,r4,r10,ror#2 @ F_xx_xx
- and r11,r5,r6 @ F_xx_xx
- add r7,r7,r9 @ E+=X[i]
- add r7,r7,r10 @ E+=F_40_59(B,C,D)
- add r7,r7,r11,ror#2
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r6,r8,r6,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r4,r5 @ F_xx_xx
- mov r9,r9,ror#31
- add r6,r6,r7,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- and r10,r3,r10,ror#2 @ F_xx_xx
- and r11,r4,r5 @ F_xx_xx
- add r6,r6,r9 @ E+=X[i]
- add r6,r6,r10 @ E+=F_40_59(B,C,D)
- add r6,r6,r11,ror#2
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r5,r8,r5,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r3,r4 @ F_xx_xx
- mov r9,r9,ror#31
- add r5,r5,r6,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- and r10,r7,r10,ror#2 @ F_xx_xx
- and r11,r3,r4 @ F_xx_xx
- add r5,r5,r9 @ E+=X[i]
- add r5,r5,r10 @ E+=F_40_59(B,C,D)
- add r5,r5,r11,ror#2
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r4,r8,r4,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r7,r3 @ F_xx_xx
- mov r9,r9,ror#31
- add r4,r4,r5,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- and r10,r6,r10,ror#2 @ F_xx_xx
- and r11,r7,r3 @ F_xx_xx
- add r4,r4,r9 @ E+=X[i]
- add r4,r4,r10 @ E+=F_40_59(B,C,D)
- add r4,r4,r11,ror#2
- ldr r9,[r14,#15*4]
- ldr r10,[r14,#13*4]
- ldr r11,[r14,#7*4]
- add r3,r8,r3,ror#2 @ E+=K_xx_xx
- ldr r12,[r14,#2*4]
- eor r9,r9,r10
- eor r11,r11,r12 @ 1 cycle stall
- eor r10,r6,r7 @ F_xx_xx
- mov r9,r9,ror#31
- add r3,r3,r4,ror#27 @ E+=ROR(A,27)
- eor r9,r9,r11,ror#31
- str r9,[r14,#-4]!
- and r10,r5,r10,ror#2 @ F_xx_xx
- and r11,r6,r7 @ F_xx_xx
- add r3,r3,r9 @ E+=X[i]
- add r3,r3,r10 @ E+=F_40_59(B,C,D)
- add r3,r3,r11,ror#2
- teq r14,sp
- bne .L_40_59 @ [+((12+5)*5+2)*4]
-
- ldr r8,.LK_60_79
- sub sp,sp,#20*4
- cmp sp,#0 @ set carry to denote 60_79
- b .L_20_39_or_60_79 @ [+4], spare 300 bytes
-.L_done:
- add sp,sp,#80*4 @ "deallocate" stack frame
- ldmia r0,{r8,r9,r10,r11,r12}
- add r3,r8,r3
- add r4,r9,r4
- add r5,r10,r5,ror#2
- add r6,r11,r6,ror#2
- add r7,r12,r7,ror#2
- stmia r0,{r3,r4,r5,r6,r7}
- teq r1,r2
- bne .Lloop @ [+18], total 1307
-
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.align 2
-.LK_00_19: .word 0x5a827999
-.LK_20_39: .word 0x6ed9eba1
-.LK_40_59: .word 0x8f1bbcdc
-.LK_60_79: .word 0xca62c1d6
-.size sha1_block_data_order,.-sha1_block_data_order
-.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
diff --git a/app/openssl/crypto/sha/asm/sha1-armv8.S b/app/openssl/crypto/sha/asm/sha1-armv8.S
new file mode 100644
index 00000000..f9d12625
--- /dev/null
+++ b/app/openssl/crypto/sha/asm/sha1-armv8.S
@@ -0,0 +1,1211 @@
+#include "arm_arch.h"
+
+.text
+
+.globl sha1_block_data_order
+.type sha1_block_data_order,%function
+.align 6
+sha1_block_data_order:
+ ldr x16,.LOPENSSL_armcap_P
+ adr x17,.LOPENSSL_armcap_P
+ add x16,x16,x17
+ ldr w16,[x16]
+ tst w16,#ARMV8_SHA1
+ b.ne .Lv8_entry
+
+ stp x29,x30,[sp,#-96]!
+ add x29,sp,#0
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+
+ ldp w20,w21,[x0]
+ ldp w22,w23,[x0,#8]
+ ldr w24,[x0,#16]
+
+.Loop:
+ ldr x3,[x1],#64
+ movz w28,#0x7999
+ sub x2,x2,#1
+ movk w28,#0x5a82,lsl#16
+#ifdef __ARMEB__
+ ror x3,x3,#32
+#else
+ rev32 x3,x3
+#endif
+ add w24,w24,w28 // warm it up
+ add w24,w24,w3
+ lsr x4,x3,#32
+ ldr x5,[x1,#-56]
+ bic w25,w23,w21
+ and w26,w22,w21
+ ror w27,w20,#27
+ add w23,w23,w28 // future e+=K
+ orr w25,w25,w26
+ add w24,w24,w27 // e+=rot(a,5)
+ ror w21,w21,#2
+ add w23,w23,w4 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+#ifdef __ARMEB__
+ ror x5,x5,#32
+#else
+ rev32 x5,x5
+#endif
+ bic w25,w22,w20
+ and w26,w21,w20
+ ror w27,w24,#27
+ add w22,w22,w28 // future e+=K
+ orr w25,w25,w26
+ add w23,w23,w27 // e+=rot(a,5)
+ ror w20,w20,#2
+ add w22,w22,w5 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ lsr x6,x5,#32
+ ldr x7,[x1,#-48]
+ bic w25,w21,w24
+ and w26,w20,w24
+ ror w27,w23,#27
+ add w21,w21,w28 // future e+=K
+ orr w25,w25,w26
+ add w22,w22,w27 // e+=rot(a,5)
+ ror w24,w24,#2
+ add w21,w21,w6 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+#ifdef __ARMEB__
+ ror x7,x7,#32
+#else
+ rev32 x7,x7
+#endif
+ bic w25,w20,w23
+ and w26,w24,w23
+ ror w27,w22,#27
+ add w20,w20,w28 // future e+=K
+ orr w25,w25,w26
+ add w21,w21,w27 // e+=rot(a,5)
+ ror w23,w23,#2
+ add w20,w20,w7 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ lsr x8,x7,#32
+ ldr x9,[x1,#-40]
+ bic w25,w24,w22
+ and w26,w23,w22
+ ror w27,w21,#27
+ add w24,w24,w28 // future e+=K
+ orr w25,w25,w26
+ add w20,w20,w27 // e+=rot(a,5)
+ ror w22,w22,#2
+ add w24,w24,w8 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+#ifdef __ARMEB__
+ ror x9,x9,#32
+#else
+ rev32 x9,x9
+#endif
+ bic w25,w23,w21
+ and w26,w22,w21
+ ror w27,w20,#27
+ add w23,w23,w28 // future e+=K
+ orr w25,w25,w26
+ add w24,w24,w27 // e+=rot(a,5)
+ ror w21,w21,#2
+ add w23,w23,w9 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ lsr x10,x9,#32
+ ldr x11,[x1,#-32]
+ bic w25,w22,w20
+ and w26,w21,w20
+ ror w27,w24,#27
+ add w22,w22,w28 // future e+=K
+ orr w25,w25,w26
+ add w23,w23,w27 // e+=rot(a,5)
+ ror w20,w20,#2
+ add w22,w22,w10 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+#ifdef __ARMEB__
+ ror x11,x11,#32
+#else
+ rev32 x11,x11
+#endif
+ bic w25,w21,w24
+ and w26,w20,w24
+ ror w27,w23,#27
+ add w21,w21,w28 // future e+=K
+ orr w25,w25,w26
+ add w22,w22,w27 // e+=rot(a,5)
+ ror w24,w24,#2
+ add w21,w21,w11 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ lsr x12,x11,#32
+ ldr x13,[x1,#-24]
+ bic w25,w20,w23
+ and w26,w24,w23
+ ror w27,w22,#27
+ add w20,w20,w28 // future e+=K
+ orr w25,w25,w26
+ add w21,w21,w27 // e+=rot(a,5)
+ ror w23,w23,#2
+ add w20,w20,w12 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+#ifdef __ARMEB__
+ ror x13,x13,#32
+#else
+ rev32 x13,x13
+#endif
+ bic w25,w24,w22
+ and w26,w23,w22
+ ror w27,w21,#27
+ add w24,w24,w28 // future e+=K
+ orr w25,w25,w26
+ add w20,w20,w27 // e+=rot(a,5)
+ ror w22,w22,#2
+ add w24,w24,w13 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ lsr x14,x13,#32
+ ldr x15,[x1,#-16]
+ bic w25,w23,w21
+ and w26,w22,w21
+ ror w27,w20,#27
+ add w23,w23,w28 // future e+=K
+ orr w25,w25,w26
+ add w24,w24,w27 // e+=rot(a,5)
+ ror w21,w21,#2
+ add w23,w23,w14 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+#ifdef __ARMEB__
+ ror x15,x15,#32
+#else
+ rev32 x15,x15
+#endif
+ bic w25,w22,w20
+ and w26,w21,w20
+ ror w27,w24,#27
+ add w22,w22,w28 // future e+=K
+ orr w25,w25,w26
+ add w23,w23,w27 // e+=rot(a,5)
+ ror w20,w20,#2
+ add w22,w22,w15 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ lsr x16,x15,#32
+ ldr x17,[x1,#-8]
+ bic w25,w21,w24
+ and w26,w20,w24
+ ror w27,w23,#27
+ add w21,w21,w28 // future e+=K
+ orr w25,w25,w26
+ add w22,w22,w27 // e+=rot(a,5)
+ ror w24,w24,#2
+ add w21,w21,w16 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+#ifdef __ARMEB__
+ ror x17,x17,#32
+#else
+ rev32 x17,x17
+#endif
+ bic w25,w20,w23
+ and w26,w24,w23
+ ror w27,w22,#27
+ add w20,w20,w28 // future e+=K
+ orr w25,w25,w26
+ add w21,w21,w27 // e+=rot(a,5)
+ ror w23,w23,#2
+ add w20,w20,w17 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ lsr x19,x17,#32
+ eor w3,w3,w5
+ bic w25,w24,w22
+ and w26,w23,w22
+ ror w27,w21,#27
+ eor w3,w3,w11
+ add w24,w24,w28 // future e+=K
+ orr w25,w25,w26
+ add w20,w20,w27 // e+=rot(a,5)
+ eor w3,w3,w16
+ ror w22,w22,#2
+ add w24,w24,w19 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w3,w3,#31
+ eor w4,w4,w6
+ bic w25,w23,w21
+ and w26,w22,w21
+ ror w27,w20,#27
+ eor w4,w4,w12
+ add w23,w23,w28 // future e+=K
+ orr w25,w25,w26
+ add w24,w24,w27 // e+=rot(a,5)
+ eor w4,w4,w17
+ ror w21,w21,#2
+ add w23,w23,w3 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w4,w4,#31
+ eor w5,w5,w7
+ bic w25,w22,w20
+ and w26,w21,w20
+ ror w27,w24,#27
+ eor w5,w5,w13
+ add w22,w22,w28 // future e+=K
+ orr w25,w25,w26
+ add w23,w23,w27 // e+=rot(a,5)
+ eor w5,w5,w19
+ ror w20,w20,#2
+ add w22,w22,w4 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w5,w5,#31
+ eor w6,w6,w8
+ bic w25,w21,w24
+ and w26,w20,w24
+ ror w27,w23,#27
+ eor w6,w6,w14
+ add w21,w21,w28 // future e+=K
+ orr w25,w25,w26
+ add w22,w22,w27 // e+=rot(a,5)
+ eor w6,w6,w3
+ ror w24,w24,#2
+ add w21,w21,w5 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w6,w6,#31
+ eor w7,w7,w9
+ bic w25,w20,w23
+ and w26,w24,w23
+ ror w27,w22,#27
+ eor w7,w7,w15
+ add w20,w20,w28 // future e+=K
+ orr w25,w25,w26
+ add w21,w21,w27 // e+=rot(a,5)
+ eor w7,w7,w4
+ ror w23,w23,#2
+ add w20,w20,w6 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w7,w7,#31
+ movz w28,#0xeba1
+ movk w28,#0x6ed9,lsl#16
+ eor w8,w8,w10
+ bic w25,w24,w22
+ and w26,w23,w22
+ ror w27,w21,#27
+ eor w8,w8,w16
+ add w24,w24,w28 // future e+=K
+ orr w25,w25,w26
+ add w20,w20,w27 // e+=rot(a,5)
+ eor w8,w8,w5
+ ror w22,w22,#2
+ add w24,w24,w7 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w8,w8,#31
+ eor w9,w9,w11
+ eor w25,w23,w21
+ ror w27,w20,#27
+ add w23,w23,w28 // future e+=K
+ eor w9,w9,w17
+ eor w25,w25,w22
+ add w24,w24,w27 // e+=rot(a,5)
+ ror w21,w21,#2
+ eor w9,w9,w6
+ add w23,w23,w8 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w9,w9,#31
+ eor w10,w10,w12
+ eor w25,w22,w20
+ ror w27,w24,#27
+ add w22,w22,w28 // future e+=K
+ eor w10,w10,w19
+ eor w25,w25,w21
+ add w23,w23,w27 // e+=rot(a,5)
+ ror w20,w20,#2
+ eor w10,w10,w7
+ add w22,w22,w9 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w10,w10,#31
+ eor w11,w11,w13
+ eor w25,w21,w24
+ ror w27,w23,#27
+ add w21,w21,w28 // future e+=K
+ eor w11,w11,w3
+ eor w25,w25,w20
+ add w22,w22,w27 // e+=rot(a,5)
+ ror w24,w24,#2
+ eor w11,w11,w8
+ add w21,w21,w10 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w11,w11,#31
+ eor w12,w12,w14
+ eor w25,w20,w23
+ ror w27,w22,#27
+ add w20,w20,w28 // future e+=K
+ eor w12,w12,w4
+ eor w25,w25,w24
+ add w21,w21,w27 // e+=rot(a,5)
+ ror w23,w23,#2
+ eor w12,w12,w9
+ add w20,w20,w11 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w12,w12,#31
+ eor w13,w13,w15
+ eor w25,w24,w22
+ ror w27,w21,#27
+ add w24,w24,w28 // future e+=K
+ eor w13,w13,w5
+ eor w25,w25,w23
+ add w20,w20,w27 // e+=rot(a,5)
+ ror w22,w22,#2
+ eor w13,w13,w10
+ add w24,w24,w12 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w13,w13,#31
+ eor w14,w14,w16
+ eor w25,w23,w21
+ ror w27,w20,#27
+ add w23,w23,w28 // future e+=K
+ eor w14,w14,w6
+ eor w25,w25,w22
+ add w24,w24,w27 // e+=rot(a,5)
+ ror w21,w21,#2
+ eor w14,w14,w11
+ add w23,w23,w13 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w14,w14,#31
+ eor w15,w15,w17
+ eor w25,w22,w20
+ ror w27,w24,#27
+ add w22,w22,w28 // future e+=K
+ eor w15,w15,w7
+ eor w25,w25,w21
+ add w23,w23,w27 // e+=rot(a,5)
+ ror w20,w20,#2
+ eor w15,w15,w12
+ add w22,w22,w14 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w15,w15,#31
+ eor w16,w16,w19
+ eor w25,w21,w24
+ ror w27,w23,#27
+ add w21,w21,w28 // future e+=K
+ eor w16,w16,w8
+ eor w25,w25,w20
+ add w22,w22,w27 // e+=rot(a,5)
+ ror w24,w24,#2
+ eor w16,w16,w13
+ add w21,w21,w15 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w16,w16,#31
+ eor w17,w17,w3
+ eor w25,w20,w23
+ ror w27,w22,#27
+ add w20,w20,w28 // future e+=K
+ eor w17,w17,w9
+ eor w25,w25,w24
+ add w21,w21,w27 // e+=rot(a,5)
+ ror w23,w23,#2
+ eor w17,w17,w14
+ add w20,w20,w16 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w17,w17,#31
+ eor w19,w19,w4
+ eor w25,w24,w22
+ ror w27,w21,#27
+ add w24,w24,w28 // future e+=K
+ eor w19,w19,w10
+ eor w25,w25,w23
+ add w20,w20,w27 // e+=rot(a,5)
+ ror w22,w22,#2
+ eor w19,w19,w15
+ add w24,w24,w17 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w19,w19,#31
+ eor w3,w3,w5
+ eor w25,w23,w21
+ ror w27,w20,#27
+ add w23,w23,w28 // future e+=K
+ eor w3,w3,w11
+ eor w25,w25,w22
+ add w24,w24,w27 // e+=rot(a,5)
+ ror w21,w21,#2
+ eor w3,w3,w16
+ add w23,w23,w19 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w3,w3,#31
+ eor w4,w4,w6
+ eor w25,w22,w20
+ ror w27,w24,#27
+ add w22,w22,w28 // future e+=K
+ eor w4,w4,w12
+ eor w25,w25,w21
+ add w23,w23,w27 // e+=rot(a,5)
+ ror w20,w20,#2
+ eor w4,w4,w17
+ add w22,w22,w3 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w4,w4,#31
+ eor w5,w5,w7
+ eor w25,w21,w24
+ ror w27,w23,#27
+ add w21,w21,w28 // future e+=K
+ eor w5,w5,w13
+ eor w25,w25,w20
+ add w22,w22,w27 // e+=rot(a,5)
+ ror w24,w24,#2
+ eor w5,w5,w19
+ add w21,w21,w4 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w5,w5,#31
+ eor w6,w6,w8
+ eor w25,w20,w23
+ ror w27,w22,#27
+ add w20,w20,w28 // future e+=K
+ eor w6,w6,w14
+ eor w25,w25,w24
+ add w21,w21,w27 // e+=rot(a,5)
+ ror w23,w23,#2
+ eor w6,w6,w3
+ add w20,w20,w5 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w6,w6,#31
+ eor w7,w7,w9
+ eor w25,w24,w22
+ ror w27,w21,#27
+ add w24,w24,w28 // future e+=K
+ eor w7,w7,w15
+ eor w25,w25,w23
+ add w20,w20,w27 // e+=rot(a,5)
+ ror w22,w22,#2
+ eor w7,w7,w4
+ add w24,w24,w6 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w7,w7,#31
+ eor w8,w8,w10
+ eor w25,w23,w21
+ ror w27,w20,#27
+ add w23,w23,w28 // future e+=K
+ eor w8,w8,w16
+ eor w25,w25,w22
+ add w24,w24,w27 // e+=rot(a,5)
+ ror w21,w21,#2
+ eor w8,w8,w5
+ add w23,w23,w7 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w8,w8,#31
+ eor w9,w9,w11
+ eor w25,w22,w20
+ ror w27,w24,#27
+ add w22,w22,w28 // future e+=K
+ eor w9,w9,w17
+ eor w25,w25,w21
+ add w23,w23,w27 // e+=rot(a,5)
+ ror w20,w20,#2
+ eor w9,w9,w6
+ add w22,w22,w8 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w9,w9,#31
+ eor w10,w10,w12
+ eor w25,w21,w24
+ ror w27,w23,#27
+ add w21,w21,w28 // future e+=K
+ eor w10,w10,w19
+ eor w25,w25,w20
+ add w22,w22,w27 // e+=rot(a,5)
+ ror w24,w24,#2
+ eor w10,w10,w7
+ add w21,w21,w9 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w10,w10,#31
+ eor w11,w11,w13
+ eor w25,w20,w23
+ ror w27,w22,#27
+ add w20,w20,w28 // future e+=K
+ eor w11,w11,w3
+ eor w25,w25,w24
+ add w21,w21,w27 // e+=rot(a,5)
+ ror w23,w23,#2
+ eor w11,w11,w8
+ add w20,w20,w10 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w11,w11,#31
+ movz w28,#0xbcdc
+ movk w28,#0x8f1b,lsl#16
+ eor w12,w12,w14
+ eor w25,w24,w22
+ ror w27,w21,#27
+ add w24,w24,w28 // future e+=K
+ eor w12,w12,w4
+ eor w25,w25,w23
+ add w20,w20,w27 // e+=rot(a,5)
+ ror w22,w22,#2
+ eor w12,w12,w9
+ add w24,w24,w11 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w12,w12,#31
+ orr w25,w21,w22
+ and w26,w21,w22
+ eor w13,w13,w15
+ ror w27,w20,#27
+ and w25,w25,w23
+ add w23,w23,w28 // future e+=K
+ eor w13,w13,w5
+ add w24,w24,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w21,w21,#2
+ eor w13,w13,w10
+ add w23,w23,w12 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w13,w13,#31
+ orr w25,w20,w21
+ and w26,w20,w21
+ eor w14,w14,w16
+ ror w27,w24,#27
+ and w25,w25,w22
+ add w22,w22,w28 // future e+=K
+ eor w14,w14,w6
+ add w23,w23,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w20,w20,#2
+ eor w14,w14,w11
+ add w22,w22,w13 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w14,w14,#31
+ orr w25,w24,w20
+ and w26,w24,w20
+ eor w15,w15,w17
+ ror w27,w23,#27
+ and w25,w25,w21
+ add w21,w21,w28 // future e+=K
+ eor w15,w15,w7
+ add w22,w22,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w24,w24,#2
+ eor w15,w15,w12
+ add w21,w21,w14 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w15,w15,#31
+ orr w25,w23,w24
+ and w26,w23,w24
+ eor w16,w16,w19
+ ror w27,w22,#27
+ and w25,w25,w20
+ add w20,w20,w28 // future e+=K
+ eor w16,w16,w8
+ add w21,w21,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w23,w23,#2
+ eor w16,w16,w13
+ add w20,w20,w15 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w16,w16,#31
+ orr w25,w22,w23
+ and w26,w22,w23
+ eor w17,w17,w3
+ ror w27,w21,#27
+ and w25,w25,w24
+ add w24,w24,w28 // future e+=K
+ eor w17,w17,w9
+ add w20,w20,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w22,w22,#2
+ eor w17,w17,w14
+ add w24,w24,w16 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w17,w17,#31
+ orr w25,w21,w22
+ and w26,w21,w22
+ eor w19,w19,w4
+ ror w27,w20,#27
+ and w25,w25,w23
+ add w23,w23,w28 // future e+=K
+ eor w19,w19,w10
+ add w24,w24,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w21,w21,#2
+ eor w19,w19,w15
+ add w23,w23,w17 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w19,w19,#31
+ orr w25,w20,w21
+ and w26,w20,w21
+ eor w3,w3,w5
+ ror w27,w24,#27
+ and w25,w25,w22
+ add w22,w22,w28 // future e+=K
+ eor w3,w3,w11
+ add w23,w23,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w20,w20,#2
+ eor w3,w3,w16
+ add w22,w22,w19 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w3,w3,#31
+ orr w25,w24,w20
+ and w26,w24,w20
+ eor w4,w4,w6
+ ror w27,w23,#27
+ and w25,w25,w21
+ add w21,w21,w28 // future e+=K
+ eor w4,w4,w12
+ add w22,w22,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w24,w24,#2
+ eor w4,w4,w17
+ add w21,w21,w3 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w4,w4,#31
+ orr w25,w23,w24
+ and w26,w23,w24
+ eor w5,w5,w7
+ ror w27,w22,#27
+ and w25,w25,w20
+ add w20,w20,w28 // future e+=K
+ eor w5,w5,w13
+ add w21,w21,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w23,w23,#2
+ eor w5,w5,w19
+ add w20,w20,w4 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w5,w5,#31
+ orr w25,w22,w23
+ and w26,w22,w23
+ eor w6,w6,w8
+ ror w27,w21,#27
+ and w25,w25,w24
+ add w24,w24,w28 // future e+=K
+ eor w6,w6,w14
+ add w20,w20,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w22,w22,#2
+ eor w6,w6,w3
+ add w24,w24,w5 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w6,w6,#31
+ orr w25,w21,w22
+ and w26,w21,w22
+ eor w7,w7,w9
+ ror w27,w20,#27
+ and w25,w25,w23
+ add w23,w23,w28 // future e+=K
+ eor w7,w7,w15
+ add w24,w24,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w21,w21,#2
+ eor w7,w7,w4
+ add w23,w23,w6 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w7,w7,#31
+ orr w25,w20,w21
+ and w26,w20,w21
+ eor w8,w8,w10
+ ror w27,w24,#27
+ and w25,w25,w22
+ add w22,w22,w28 // future e+=K
+ eor w8,w8,w16
+ add w23,w23,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w20,w20,#2
+ eor w8,w8,w5
+ add w22,w22,w7 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w8,w8,#31
+ orr w25,w24,w20
+ and w26,w24,w20
+ eor w9,w9,w11
+ ror w27,w23,#27
+ and w25,w25,w21
+ add w21,w21,w28 // future e+=K
+ eor w9,w9,w17
+ add w22,w22,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w24,w24,#2
+ eor w9,w9,w6
+ add w21,w21,w8 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w9,w9,#31
+ orr w25,w23,w24
+ and w26,w23,w24
+ eor w10,w10,w12
+ ror w27,w22,#27
+ and w25,w25,w20
+ add w20,w20,w28 // future e+=K
+ eor w10,w10,w19
+ add w21,w21,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w23,w23,#2
+ eor w10,w10,w7
+ add w20,w20,w9 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w10,w10,#31
+ orr w25,w22,w23
+ and w26,w22,w23
+ eor w11,w11,w13
+ ror w27,w21,#27
+ and w25,w25,w24
+ add w24,w24,w28 // future e+=K
+ eor w11,w11,w3
+ add w20,w20,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w22,w22,#2
+ eor w11,w11,w8
+ add w24,w24,w10 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w11,w11,#31
+ orr w25,w21,w22
+ and w26,w21,w22
+ eor w12,w12,w14
+ ror w27,w20,#27
+ and w25,w25,w23
+ add w23,w23,w28 // future e+=K
+ eor w12,w12,w4
+ add w24,w24,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w21,w21,#2
+ eor w12,w12,w9
+ add w23,w23,w11 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w12,w12,#31
+ orr w25,w20,w21
+ and w26,w20,w21
+ eor w13,w13,w15
+ ror w27,w24,#27
+ and w25,w25,w22
+ add w22,w22,w28 // future e+=K
+ eor w13,w13,w5
+ add w23,w23,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w20,w20,#2
+ eor w13,w13,w10
+ add w22,w22,w12 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w13,w13,#31
+ orr w25,w24,w20
+ and w26,w24,w20
+ eor w14,w14,w16
+ ror w27,w23,#27
+ and w25,w25,w21
+ add w21,w21,w28 // future e+=K
+ eor w14,w14,w6
+ add w22,w22,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w24,w24,#2
+ eor w14,w14,w11
+ add w21,w21,w13 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w14,w14,#31
+ orr w25,w23,w24
+ and w26,w23,w24
+ eor w15,w15,w17
+ ror w27,w22,#27
+ and w25,w25,w20
+ add w20,w20,w28 // future e+=K
+ eor w15,w15,w7
+ add w21,w21,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w23,w23,#2
+ eor w15,w15,w12
+ add w20,w20,w14 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w15,w15,#31
+ movz w28,#0xc1d6
+ movk w28,#0xca62,lsl#16
+ orr w25,w22,w23
+ and w26,w22,w23
+ eor w16,w16,w19
+ ror w27,w21,#27
+ and w25,w25,w24
+ add w24,w24,w28 // future e+=K
+ eor w16,w16,w8
+ add w20,w20,w27 // e+=rot(a,5)
+ orr w25,w25,w26
+ ror w22,w22,#2
+ eor w16,w16,w13
+ add w24,w24,w15 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w16,w16,#31
+ eor w17,w17,w3
+ eor w25,w23,w21
+ ror w27,w20,#27
+ add w23,w23,w28 // future e+=K
+ eor w17,w17,w9
+ eor w25,w25,w22
+ add w24,w24,w27 // e+=rot(a,5)
+ ror w21,w21,#2
+ eor w17,w17,w14
+ add w23,w23,w16 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w17,w17,#31
+ eor w19,w19,w4
+ eor w25,w22,w20
+ ror w27,w24,#27
+ add w22,w22,w28 // future e+=K
+ eor w19,w19,w10
+ eor w25,w25,w21
+ add w23,w23,w27 // e+=rot(a,5)
+ ror w20,w20,#2
+ eor w19,w19,w15
+ add w22,w22,w17 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w19,w19,#31
+ eor w3,w3,w5
+ eor w25,w21,w24
+ ror w27,w23,#27
+ add w21,w21,w28 // future e+=K
+ eor w3,w3,w11
+ eor w25,w25,w20
+ add w22,w22,w27 // e+=rot(a,5)
+ ror w24,w24,#2
+ eor w3,w3,w16
+ add w21,w21,w19 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w3,w3,#31
+ eor w4,w4,w6
+ eor w25,w20,w23
+ ror w27,w22,#27
+ add w20,w20,w28 // future e+=K
+ eor w4,w4,w12
+ eor w25,w25,w24
+ add w21,w21,w27 // e+=rot(a,5)
+ ror w23,w23,#2
+ eor w4,w4,w17
+ add w20,w20,w3 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w4,w4,#31
+ eor w5,w5,w7
+ eor w25,w24,w22
+ ror w27,w21,#27
+ add w24,w24,w28 // future e+=K
+ eor w5,w5,w13
+ eor w25,w25,w23
+ add w20,w20,w27 // e+=rot(a,5)
+ ror w22,w22,#2
+ eor w5,w5,w19
+ add w24,w24,w4 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w5,w5,#31
+ eor w6,w6,w8
+ eor w25,w23,w21
+ ror w27,w20,#27
+ add w23,w23,w28 // future e+=K
+ eor w6,w6,w14
+ eor w25,w25,w22
+ add w24,w24,w27 // e+=rot(a,5)
+ ror w21,w21,#2
+ eor w6,w6,w3
+ add w23,w23,w5 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w6,w6,#31
+ eor w7,w7,w9
+ eor w25,w22,w20
+ ror w27,w24,#27
+ add w22,w22,w28 // future e+=K
+ eor w7,w7,w15
+ eor w25,w25,w21
+ add w23,w23,w27 // e+=rot(a,5)
+ ror w20,w20,#2
+ eor w7,w7,w4
+ add w22,w22,w6 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w7,w7,#31
+ eor w8,w8,w10
+ eor w25,w21,w24
+ ror w27,w23,#27
+ add w21,w21,w28 // future e+=K
+ eor w8,w8,w16
+ eor w25,w25,w20
+ add w22,w22,w27 // e+=rot(a,5)
+ ror w24,w24,#2
+ eor w8,w8,w5
+ add w21,w21,w7 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w8,w8,#31
+ eor w9,w9,w11
+ eor w25,w20,w23
+ ror w27,w22,#27
+ add w20,w20,w28 // future e+=K
+ eor w9,w9,w17
+ eor w25,w25,w24
+ add w21,w21,w27 // e+=rot(a,5)
+ ror w23,w23,#2
+ eor w9,w9,w6
+ add w20,w20,w8 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w9,w9,#31
+ eor w10,w10,w12
+ eor w25,w24,w22
+ ror w27,w21,#27
+ add w24,w24,w28 // future e+=K
+ eor w10,w10,w19
+ eor w25,w25,w23
+ add w20,w20,w27 // e+=rot(a,5)
+ ror w22,w22,#2
+ eor w10,w10,w7
+ add w24,w24,w9 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w10,w10,#31
+ eor w11,w11,w13
+ eor w25,w23,w21
+ ror w27,w20,#27
+ add w23,w23,w28 // future e+=K
+ eor w11,w11,w3
+ eor w25,w25,w22
+ add w24,w24,w27 // e+=rot(a,5)
+ ror w21,w21,#2
+ eor w11,w11,w8
+ add w23,w23,w10 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w11,w11,#31
+ eor w12,w12,w14
+ eor w25,w22,w20
+ ror w27,w24,#27
+ add w22,w22,w28 // future e+=K
+ eor w12,w12,w4
+ eor w25,w25,w21
+ add w23,w23,w27 // e+=rot(a,5)
+ ror w20,w20,#2
+ eor w12,w12,w9
+ add w22,w22,w11 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w12,w12,#31
+ eor w13,w13,w15
+ eor w25,w21,w24
+ ror w27,w23,#27
+ add w21,w21,w28 // future e+=K
+ eor w13,w13,w5
+ eor w25,w25,w20
+ add w22,w22,w27 // e+=rot(a,5)
+ ror w24,w24,#2
+ eor w13,w13,w10
+ add w21,w21,w12 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w13,w13,#31
+ eor w14,w14,w16
+ eor w25,w20,w23
+ ror w27,w22,#27
+ add w20,w20,w28 // future e+=K
+ eor w14,w14,w6
+ eor w25,w25,w24
+ add w21,w21,w27 // e+=rot(a,5)
+ ror w23,w23,#2
+ eor w14,w14,w11
+ add w20,w20,w13 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ror w14,w14,#31
+ eor w15,w15,w17
+ eor w25,w24,w22
+ ror w27,w21,#27
+ add w24,w24,w28 // future e+=K
+ eor w15,w15,w7
+ eor w25,w25,w23
+ add w20,w20,w27 // e+=rot(a,5)
+ ror w22,w22,#2
+ eor w15,w15,w12
+ add w24,w24,w14 // future e+=X[i]
+ add w20,w20,w25 // e+=F(b,c,d)
+ ror w15,w15,#31
+ eor w16,w16,w19
+ eor w25,w23,w21
+ ror w27,w20,#27
+ add w23,w23,w28 // future e+=K
+ eor w16,w16,w8
+ eor w25,w25,w22
+ add w24,w24,w27 // e+=rot(a,5)
+ ror w21,w21,#2
+ eor w16,w16,w13
+ add w23,w23,w15 // future e+=X[i]
+ add w24,w24,w25 // e+=F(b,c,d)
+ ror w16,w16,#31
+ eor w17,w17,w3
+ eor w25,w22,w20
+ ror w27,w24,#27
+ add w22,w22,w28 // future e+=K
+ eor w17,w17,w9
+ eor w25,w25,w21
+ add w23,w23,w27 // e+=rot(a,5)
+ ror w20,w20,#2
+ eor w17,w17,w14
+ add w22,w22,w16 // future e+=X[i]
+ add w23,w23,w25 // e+=F(b,c,d)
+ ror w17,w17,#31
+ eor w19,w19,w4
+ eor w25,w21,w24
+ ror w27,w23,#27
+ add w21,w21,w28 // future e+=K
+ eor w19,w19,w10
+ eor w25,w25,w20
+ add w22,w22,w27 // e+=rot(a,5)
+ ror w24,w24,#2
+ eor w19,w19,w15
+ add w21,w21,w17 // future e+=X[i]
+ add w22,w22,w25 // e+=F(b,c,d)
+ ror w19,w19,#31
+ ldp w4,w5,[x0]
+ eor w25,w20,w23
+ ror w27,w22,#27
+ add w20,w20,w28 // future e+=K
+ eor w25,w25,w24
+ add w21,w21,w27 // e+=rot(a,5)
+ ror w23,w23,#2
+ add w20,w20,w19 // future e+=X[i]
+ add w21,w21,w25 // e+=F(b,c,d)
+ ldp w6,w7,[x0,#8]
+ eor w25,w24,w22
+ ror w27,w21,#27
+ eor w25,w25,w23
+ add w20,w20,w27 // e+=rot(a,5)
+ ror w22,w22,#2
+ ldr w8,[x0,#16]
+ add w20,w20,w25 // e+=F(b,c,d)
+ add w21,w21,w5
+ add w22,w22,w6
+ add w20,w20,w4
+ add w23,w23,w7
+ add w24,w24,w8
+ stp w20,w21,[x0]
+ stp w22,w23,[x0,#8]
+ str w24,[x0,#16]
+ cbnz x2,.Loop
+
+ ldp x19,x20,[sp,#16]
+ ldp x21,x22,[sp,#32]
+ ldp x23,x24,[sp,#48]
+ ldp x25,x26,[sp,#64]
+ ldp x27,x28,[sp,#80]
+ ldr x29,[sp],#96
+ ret
+.size sha1_block_data_order,.-sha1_block_data_order
+.type sha1_block_armv8,%function
+.align 6
+sha1_block_armv8:
+.Lv8_entry:
+ stp x29,x30,[sp,#-16]!
+ add x29,sp,#0
+
+ adr x4,.Lconst
+ eor v1.16b,v1.16b,v1.16b
+ ld1 {v0.4s},[x0],#16
+ ld1 {v1.s}[0],[x0]
+ sub x0,x0,#16
+ ld1 {v16.4s-v19.4s},[x4]
+
+.Loop_hw:
+ ld1 {v4.16b-v7.16b},[x1],#64
+ sub x2,x2,#1
+ rev32 v4.16b,v4.16b
+ rev32 v5.16b,v5.16b
+
+ add v20.4s,v16.4s,v4.4s
+ rev32 v6.16b,v6.16b
+ orr v22.16b,v0.16b,v0.16b // offload
+
+ add v21.4s,v16.4s,v5.4s
+ rev32 v7.16b,v7.16b
+ .inst 0x5e280803 //sha1h v3.16b,v0.16b
+ .inst 0x5e140020 //sha1c v0.16b,v1.16b,v20.4s // 0
+ add v20.4s,v16.4s,v6.4s
+ .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b
+ .inst 0x5e280802 //sha1h v2.16b,v0.16b // 1
+ .inst 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s
+ add v21.4s,v16.4s,v7.4s
+ .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b
+ .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b
+ .inst 0x5e280803 //sha1h v3.16b,v0.16b // 2
+ .inst 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s
+ add v20.4s,v16.4s,v4.4s
+ .inst 0x5e281885 //sha1su1 v5.16b,v4.16b
+ .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b
+ .inst 0x5e280802 //sha1h v2.16b,v0.16b // 3
+ .inst 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s
+ add v21.4s,v17.4s,v5.4s
+ .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b
+ .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b
+ .inst 0x5e280803 //sha1h v3.16b,v0.16b // 4
+ .inst 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s
+ add v20.4s,v17.4s,v6.4s
+ .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b
+ .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b
+ .inst 0x5e280802 //sha1h v2.16b,v0.16b // 5
+ .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
+ add v21.4s,v17.4s,v7.4s
+ .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b
+ .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b
+ .inst 0x5e280803 //sha1h v3.16b,v0.16b // 6
+ .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s
+ add v20.4s,v17.4s,v4.4s
+ .inst 0x5e281885 //sha1su1 v5.16b,v4.16b
+ .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b
+ .inst 0x5e280802 //sha1h v2.16b,v0.16b // 7
+ .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
+ add v21.4s,v17.4s,v5.4s
+ .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b
+ .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b
+ .inst 0x5e280803 //sha1h v3.16b,v0.16b // 8
+ .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s
+ add v20.4s,v18.4s,v6.4s
+ .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b
+ .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b
+ .inst 0x5e280802 //sha1h v2.16b,v0.16b // 9
+ .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
+ add v21.4s,v18.4s,v7.4s
+ .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b
+ .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b
+ .inst 0x5e280803 //sha1h v3.16b,v0.16b // 10
+ .inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s
+ add v20.4s,v18.4s,v4.4s
+ .inst 0x5e281885 //sha1su1 v5.16b,v4.16b
+ .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b
+ .inst 0x5e280802 //sha1h v2.16b,v0.16b // 11
+ .inst 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s
+ add v21.4s,v18.4s,v5.4s
+ .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b
+ .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b
+ .inst 0x5e280803 //sha1h v3.16b,v0.16b // 12
+ .inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s
+ add v20.4s,v18.4s,v6.4s
+ .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b
+ .inst 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b
+ .inst 0x5e280802 //sha1h v2.16b,v0.16b // 13
+ .inst 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s
+ add v21.4s,v19.4s,v7.4s
+ .inst 0x5e2818e4 //sha1su1 v4.16b,v7.16b
+ .inst 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b
+ .inst 0x5e280803 //sha1h v3.16b,v0.16b // 14
+ .inst 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s
+ add v20.4s,v19.4s,v4.4s
+ .inst 0x5e281885 //sha1su1 v5.16b,v4.16b
+ .inst 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b
+ .inst 0x5e280802 //sha1h v2.16b,v0.16b // 15
+ .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
+ add v21.4s,v19.4s,v5.4s
+ .inst 0x5e2818a6 //sha1su1 v6.16b,v5.16b
+ .inst 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b
+ .inst 0x5e280803 //sha1h v3.16b,v0.16b // 16
+ .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s
+ add v20.4s,v19.4s,v6.4s
+ .inst 0x5e2818c7 //sha1su1 v7.16b,v6.16b
+ .inst 0x5e280802 //sha1h v2.16b,v0.16b // 17
+ .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
+ add v21.4s,v19.4s,v7.4s
+
+ .inst 0x5e280803 //sha1h v3.16b,v0.16b // 18
+ .inst 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s
+
+ .inst 0x5e280802 //sha1h v2.16b,v0.16b // 19
+ .inst 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
+
+ add v1.4s,v1.4s,v2.4s
+ add v0.4s,v0.4s,v22.4s
+
+ cbnz x2,.Loop_hw
+
+ st1 {v0.4s},[x0],#16
+ st1 {v1.s}[0],[x0]
+
+ ldr x29,[sp],#16
+ ret
+.size sha1_block_armv8,.-sha1_block_armv8
+.align 6
+.Lconst:
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79
+.LOPENSSL_armcap_P:
+.quad OPENSSL_armcap_P-.
+.asciz "SHA1 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+.comm OPENSSL_armcap_P,4,4
diff --git a/app/openssl/crypto/sha/asm/sha1-armv8.pl b/app/openssl/crypto/sha/asm/sha1-armv8.pl
new file mode 100644
index 00000000..c1f552b6
--- /dev/null
+++ b/app/openssl/crypto/sha/asm/sha1-armv8.pl
@@ -0,0 +1,333 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA1 for ARMv8.
+#
+# Performance in cycles per processed byte and improvement coefficient
+# over code generated with "default" compiler:
+#
+# hardware-assisted software(*)
+# Apple A7 2.31 4.13 (+14%)
+# Cortex-A5x n/a n/a
+#
+# (*) Software results are presented mostly for reference purposes.
+
+$flavour = shift;
+open STDOUT,">".shift;
+
+($ctx,$inp,$num)=("x0","x1","x2");
+@Xw=map("w$_",(3..17,19));
+@Xx=map("x$_",(3..17,19));
+@V=($A,$B,$C,$D,$E)=map("w$_",(20..24));
+($t0,$t1,$t2,$K)=map("w$_",(25..28));
+
+
+sub BODY_00_19 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=($i+2)&15;
+
+$code.=<<___ if ($i<15 && !($i&1));
+ lsr @Xx[$i+1],@Xx[$i],#32
+___
+$code.=<<___ if ($i<14 && !($i&1));
+ ldr @Xx[$i+2],[$inp,#`($i+2)*4-64`]
+___
+$code.=<<___ if ($i<14 && ($i&1));
+#ifdef __ARMEB__
+ ror @Xx[$i+1],@Xx[$i+1],#32
+#else
+ rev32 @Xx[$i+1],@Xx[$i+1]
+#endif
+___
+$code.=<<___ if ($i<14);
+ bic $t0,$d,$b
+ and $t1,$c,$b
+ ror $t2,$a,#27
+ add $d,$d,$K // future e+=K
+ orr $t0,$t0,$t1
+ add $e,$e,$t2 // e+=rot(a,5)
+ ror $b,$b,#2
+ add $d,$d,@Xw[($i+1)&15] // future e+=X[i]
+ add $e,$e,$t0 // e+=F(b,c,d)
+___
+$code.=<<___ if ($i==19);
+ movz $K,#0xeba1
+ movk $K,#0x6ed9,lsl#16
+___
+$code.=<<___ if ($i>=14);
+ eor @Xw[$j],@Xw[$j],@Xw[($j+2)&15]
+ bic $t0,$d,$b
+ and $t1,$c,$b
+ ror $t2,$a,#27
+ eor @Xw[$j],@Xw[$j],@Xw[($j+8)&15]
+ add $d,$d,$K // future e+=K
+ orr $t0,$t0,$t1
+ add $e,$e,$t2 // e+=rot(a,5)
+ eor @Xw[$j],@Xw[$j],@Xw[($j+13)&15]
+ ror $b,$b,#2
+ add $d,$d,@Xw[($i+1)&15] // future e+=X[i]
+ add $e,$e,$t0 // e+=F(b,c,d)
+ ror @Xw[$j],@Xw[$j],#31
+___
+}
+
+sub BODY_40_59 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=($i+2)&15;
+
+$code.=<<___ if ($i==59);
+ movz $K,#0xc1d6
+ movk $K,#0xca62,lsl#16
+___
+$code.=<<___;
+ orr $t0,$b,$c
+ and $t1,$b,$c
+ eor @Xw[$j],@Xw[$j],@Xw[($j+2)&15]
+ ror $t2,$a,#27
+ and $t0,$t0,$d
+ add $d,$d,$K // future e+=K
+ eor @Xw[$j],@Xw[$j],@Xw[($j+8)&15]
+ add $e,$e,$t2 // e+=rot(a,5)
+ orr $t0,$t0,$t1
+ ror $b,$b,#2
+ eor @Xw[$j],@Xw[$j],@Xw[($j+13)&15]
+ add $d,$d,@Xw[($i+1)&15] // future e+=X[i]
+ add $e,$e,$t0 // e+=F(b,c,d)
+ ror @Xw[$j],@Xw[$j],#31
+___
+}
+
+sub BODY_20_39 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=($i+2)&15;
+
+$code.=<<___ if ($i==39);
+ movz $K,#0xbcdc
+ movk $K,#0x8f1b,lsl#16
+___
+$code.=<<___ if ($i<78);
+ eor @Xw[$j],@Xw[$j],@Xw[($j+2)&15]
+ eor $t0,$d,$b
+ ror $t2,$a,#27
+ add $d,$d,$K // future e+=K
+ eor @Xw[$j],@Xw[$j],@Xw[($j+8)&15]
+ eor $t0,$t0,$c
+ add $e,$e,$t2 // e+=rot(a,5)
+ ror $b,$b,#2
+ eor @Xw[$j],@Xw[$j],@Xw[($j+13)&15]
+ add $d,$d,@Xw[($i+1)&15] // future e+=X[i]
+ add $e,$e,$t0 // e+=F(b,c,d)
+ ror @Xw[$j],@Xw[$j],#31
+___
+$code.=<<___ if ($i==78);
+ ldp @Xw[1],@Xw[2],[$ctx]
+ eor $t0,$d,$b
+ ror $t2,$a,#27
+ add $d,$d,$K // future e+=K
+ eor $t0,$t0,$c
+ add $e,$e,$t2 // e+=rot(a,5)
+ ror $b,$b,#2
+ add $d,$d,@Xw[($i+1)&15] // future e+=X[i]
+ add $e,$e,$t0 // e+=F(b,c,d)
+___
+$code.=<<___ if ($i==79);
+ ldp @Xw[3],@Xw[4],[$ctx,#8]
+ eor $t0,$d,$b
+ ror $t2,$a,#27
+ eor $t0,$t0,$c
+ add $e,$e,$t2 // e+=rot(a,5)
+ ror $b,$b,#2
+ ldr @Xw[5],[$ctx,#16]
+ add $e,$e,$t0 // e+=F(b,c,d)
+___
+}
+
+$code.=<<___;
+#include "arm_arch.h"
+
+.text
+
+.globl sha1_block_data_order
+.type sha1_block_data_order,%function
+.align 6
+sha1_block_data_order:
+ ldr x16,.LOPENSSL_armcap_P
+ adr x17,.LOPENSSL_armcap_P
+ add x16,x16,x17
+ ldr w16,[x16]
+ tst w16,#ARMV8_SHA1
+ b.ne .Lv8_entry
+
+ stp x29,x30,[sp,#-96]!
+ add x29,sp,#0
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+
+ ldp $A,$B,[$ctx]
+ ldp $C,$D,[$ctx,#8]
+ ldr $E,[$ctx,#16]
+
+.Loop:
+ ldr @Xx[0],[$inp],#64
+ movz $K,#0x7999
+ sub $num,$num,#1
+ movk $K,#0x5a82,lsl#16
+#ifdef __ARMEB__
+ ror $Xx[0],@Xx[0],#32
+#else
+ rev32 @Xx[0],@Xx[0]
+#endif
+ add $E,$E,$K // warm it up
+ add $E,$E,@Xw[0]
+___
+for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
+for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
+for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ add $B,$B,@Xw[2]
+ add $C,$C,@Xw[3]
+ add $A,$A,@Xw[1]
+ add $D,$D,@Xw[4]
+ add $E,$E,@Xw[5]
+ stp $A,$B,[$ctx]
+ stp $C,$D,[$ctx,#8]
+ str $E,[$ctx,#16]
+ cbnz $num,.Loop
+
+ ldp x19,x20,[sp,#16]
+ ldp x21,x22,[sp,#32]
+ ldp x23,x24,[sp,#48]
+ ldp x25,x26,[sp,#64]
+ ldp x27,x28,[sp,#80]
+ ldr x29,[sp],#96
+ ret
+.size sha1_block_data_order,.-sha1_block_data_order
+___
+{{{
+my ($ABCD,$E,$E0,$E1)=map("v$_.16b",(0..3));
+my @MSG=map("v$_.16b",(4..7));
+my @Kxx=map("v$_.4s",(16..19));
+my ($W0,$W1)=("v20.4s","v21.4s");
+my $ABCD_SAVE="v22.16b";
+
+$code.=<<___;
+.type sha1_block_armv8,%function
+.align 6
+sha1_block_armv8:
+.Lv8_entry:
+ stp x29,x30,[sp,#-16]!
+ add x29,sp,#0
+
+ adr x4,.Lconst
+ eor $E,$E,$E
+ ld1.32 {$ABCD},[$ctx],#16
+ ld1.32 {$E}[0],[$ctx]
+ sub $ctx,$ctx,#16
+ ld1.32 {@Kxx[0]-@Kxx[3]},[x4]
+
+.Loop_hw:
+ ld1 {@MSG[0]-@MSG[3]},[$inp],#64
+ sub $num,$num,#1
+ rev32 @MSG[0],@MSG[0]
+ rev32 @MSG[1],@MSG[1]
+
+ add.i32 $W0,@Kxx[0],@MSG[0]
+ rev32 @MSG[2],@MSG[2]
+ orr $ABCD_SAVE,$ABCD,$ABCD // offload
+
+ add.i32 $W1,@Kxx[0],@MSG[1]
+ rev32 @MSG[3],@MSG[3]
+ sha1h $E1,$ABCD
+ sha1c $ABCD,$E,$W0 // 0
+ add.i32 $W0,@Kxx[$j],@MSG[2]
+ sha1su0 @MSG[0],@MSG[1],@MSG[2]
+___
+for ($j=0,$i=1;$i<20-3;$i++) {
+my $f=("c","p","m","p")[$i/5];
+$code.=<<___;
+ sha1h $E0,$ABCD // $i
+ sha1$f $ABCD,$E1,$W1
+ add.i32 $W1,@Kxx[$j],@MSG[3]
+ sha1su1 @MSG[0],@MSG[3]
+___
+$code.=<<___ if ($i<20-4);
+ sha1su0 @MSG[1],@MSG[2],@MSG[3]
+___
+ ($E0,$E1)=($E1,$E0); ($W0,$W1)=($W1,$W0);
+ push(@MSG,shift(@MSG)); $j++ if ((($i+3)%5)==0);
+}
+$code.=<<___;
+ sha1h $E0,$ABCD // $i
+ sha1p $ABCD,$E1,$W1
+ add.i32 $W1,@Kxx[$j],@MSG[3]
+
+ sha1h $E1,$ABCD // 18
+ sha1p $ABCD,$E0,$W0
+
+ sha1h $E0,$ABCD // 19
+ sha1p $ABCD,$E1,$W1
+
+ add.i32 $E,$E,$E0
+ add.i32 $ABCD,$ABCD,$ABCD_SAVE
+
+ cbnz $num,.Loop_hw
+
+ st1.32 {$ABCD},[$ctx],#16
+ st1.32 {$E}[0],[$ctx]
+
+ ldr x29,[sp],#16
+ ret
+.size sha1_block_armv8,.-sha1_block_armv8
+.align 6
+.Lconst:
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79
+.LOPENSSL_armcap_P:
+.quad OPENSSL_armcap_P-.
+.asciz "SHA1 block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+.comm OPENSSL_armcap_P,4,4
+___
+}}}
+
+{ my %opcode = (
+ "sha1c" => 0x5e000000, "sha1p" => 0x5e001000,
+ "sha1m" => 0x5e002000, "sha1su0" => 0x5e003000,
+ "sha1h" => 0x5e280800, "sha1su1" => 0x5e281800 );
+
+ sub unsha1 {
+ my ($mnemonic,$arg)=@_;
+
+ $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
+ &&
+ sprintf ".inst\t0x%08x\t//%s %s",
+ $opcode{$mnemonic}|$1|($2<<5)|($3<<16),
+ $mnemonic,$arg;
+ }
+}
+
+foreach(split("\n",$code)) {
+
+ s/\`([^\`]*)\`/eval($1)/geo;
+
+ s/\b(sha1\w+)\s+([qv].*)/unsha1($1,$2)/geo;
+
+ s/\.\w?32\b//o and s/\.16b/\.4s/go;
+ m/(ld|st)1[^\[]+\[0\]/o and s/\.4s/\.s/go;
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/app/openssl/crypto/sha/asm/sha256-armv4.S b/app/openssl/crypto/sha/asm/sha256-armv4.S
index d4f53c1d..853d7da5 120000..100644
--- a/app/openssl/crypto/sha/asm/sha256-armv4.S
+++ b/app/openssl/crypto/sha/asm/sha256-armv4.S
@@ -1 +1,2690 @@
-sha256-armv4.s \ No newline at end of file
+#include "arm_arch.h"
+
+.text
+.code 32
+
+.type K256,%object
+.align 5
+K256:
+.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size K256,.-K256
+.word 0 @ terminator
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha256_block_data_order
+.align 5
+
+.global sha256_block_data_order
+.type sha256_block_data_order,%function
+sha256_block_data_order:
+ sub r3,pc,#8 @ sha256_block_data_order
+ add r2,r1,r2,lsl#6 @ len to point at the end of inp
+#if __ARM_ARCH__>=7
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#ARMV8_SHA256
+ bne .LARMv8
+ tst r12,#ARMV7_NEON
+ bne .LNEON
+#endif
+ stmdb sp!,{r0,r1,r2,r4-r11,lr}
+ ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
+ sub r14,r3,#256+32 @ K256
+ sub sp,sp,#16*4 @ alloca(X[16])
+.Loop:
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r5,r6 @ magic
+ eor r12,r12,r12
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 0
+# if 0==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r8,r8,ror#5
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 0
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 0==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r8,r8,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r11,r11,r2 @ h+=X[i]
+ str r2,[sp,#0*4]
+ eor r2,r9,r10
+ add r11,r11,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r8
+ add r11,r11,r12 @ h+=K256[i]
+ eor r2,r2,r10 @ Ch(e,f,g)
+ eor r0,r4,r4,ror#11
+ add r11,r11,r2 @ h+=Ch(e,f,g)
+#if 0==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 0<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r4,r5 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#2*4] @ from future BODY_16_xx
+ eor r12,r4,r5 @ a^b, b^c in next round
+ ldr r1,[sp,#15*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r4,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r7,r7,r11 @ d+=h
+ eor r3,r3,r5 @ Maj(a,b,c)
+ add r11,r11,r0,ror#2 @ h+=Sigma0(a)
+ @ add r11,r11,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 1
+# if 1==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r7,r7,ror#5
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 1
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 1==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r7,r7,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r10,r10,r2 @ h+=X[i]
+ str r2,[sp,#1*4]
+ eor r2,r8,r9
+ add r10,r10,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r7
+ add r10,r10,r3 @ h+=K256[i]
+ eor r2,r2,r9 @ Ch(e,f,g)
+ eor r0,r11,r11,ror#11
+ add r10,r10,r2 @ h+=Ch(e,f,g)
+#if 1==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 1<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r11,r4 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#3*4] @ from future BODY_16_xx
+ eor r3,r11,r4 @ a^b, b^c in next round
+ ldr r1,[sp,#0*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r11,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r6,r6,r10 @ d+=h
+ eor r12,r12,r4 @ Maj(a,b,c)
+ add r10,r10,r0,ror#2 @ h+=Sigma0(a)
+ @ add r10,r10,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 2
+# if 2==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 2
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 2==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r6,r6,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r9,r9,r2 @ h+=X[i]
+ str r2,[sp,#2*4]
+ eor r2,r7,r8
+ add r9,r9,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r6
+ add r9,r9,r12 @ h+=K256[i]
+ eor r2,r2,r8 @ Ch(e,f,g)
+ eor r0,r10,r10,ror#11
+ add r9,r9,r2 @ h+=Ch(e,f,g)
+#if 2==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 2<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r10,r11 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#4*4] @ from future BODY_16_xx
+ eor r12,r10,r11 @ a^b, b^c in next round
+ ldr r1,[sp,#1*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r10,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r5,r5,r9 @ d+=h
+ eor r3,r3,r11 @ Maj(a,b,c)
+ add r9,r9,r0,ror#2 @ h+=Sigma0(a)
+ @ add r9,r9,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 3
+# if 3==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r5,r5,ror#5
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 3
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 3==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r5,r5,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r8,r8,r2 @ h+=X[i]
+ str r2,[sp,#3*4]
+ eor r2,r6,r7
+ add r8,r8,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r5
+ add r8,r8,r3 @ h+=K256[i]
+ eor r2,r2,r7 @ Ch(e,f,g)
+ eor r0,r9,r9,ror#11
+ add r8,r8,r2 @ h+=Ch(e,f,g)
+#if 3==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 3<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r9,r10 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#5*4] @ from future BODY_16_xx
+ eor r3,r9,r10 @ a^b, b^c in next round
+ ldr r1,[sp,#2*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r9,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r4,r4,r8 @ d+=h
+ eor r12,r12,r10 @ Maj(a,b,c)
+ add r8,r8,r0,ror#2 @ h+=Sigma0(a)
+ @ add r8,r8,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 4
+# if 4==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r4,r4,ror#5
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 4
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 4==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r4,r4,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r7,r7,r2 @ h+=X[i]
+ str r2,[sp,#4*4]
+ eor r2,r5,r6
+ add r7,r7,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r4
+ add r7,r7,r12 @ h+=K256[i]
+ eor r2,r2,r6 @ Ch(e,f,g)
+ eor r0,r8,r8,ror#11
+ add r7,r7,r2 @ h+=Ch(e,f,g)
+#if 4==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 4<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r8,r9 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#6*4] @ from future BODY_16_xx
+ eor r12,r8,r9 @ a^b, b^c in next round
+ ldr r1,[sp,#3*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r8,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r11,r11,r7 @ d+=h
+ eor r3,r3,r9 @ Maj(a,b,c)
+ add r7,r7,r0,ror#2 @ h+=Sigma0(a)
+ @ add r7,r7,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 5
+# if 5==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r11,r11,ror#5
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 5
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 5==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r11,r11,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r6,r6,r2 @ h+=X[i]
+ str r2,[sp,#5*4]
+ eor r2,r4,r5
+ add r6,r6,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r11
+ add r6,r6,r3 @ h+=K256[i]
+ eor r2,r2,r5 @ Ch(e,f,g)
+ eor r0,r7,r7,ror#11
+ add r6,r6,r2 @ h+=Ch(e,f,g)
+#if 5==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 5<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r7,r8 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#7*4] @ from future BODY_16_xx
+ eor r3,r7,r8 @ a^b, b^c in next round
+ ldr r1,[sp,#4*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r7,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r10,r10,r6 @ d+=h
+ eor r12,r12,r8 @ Maj(a,b,c)
+ add r6,r6,r0,ror#2 @ h+=Sigma0(a)
+ @ add r6,r6,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 6
+# if 6==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 6
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 6==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r10,r10,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r5,r5,r2 @ h+=X[i]
+ str r2,[sp,#6*4]
+ eor r2,r11,r4
+ add r5,r5,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r10
+ add r5,r5,r12 @ h+=K256[i]
+ eor r2,r2,r4 @ Ch(e,f,g)
+ eor r0,r6,r6,ror#11
+ add r5,r5,r2 @ h+=Ch(e,f,g)
+#if 6==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 6<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r6,r7 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#8*4] @ from future BODY_16_xx
+ eor r12,r6,r7 @ a^b, b^c in next round
+ ldr r1,[sp,#5*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r6,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r9,r9,r5 @ d+=h
+ eor r3,r3,r7 @ Maj(a,b,c)
+ add r5,r5,r0,ror#2 @ h+=Sigma0(a)
+ @ add r5,r5,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 7
+# if 7==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r9,r9,ror#5
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 7
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 7==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r9,r9,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r4,r4,r2 @ h+=X[i]
+ str r2,[sp,#7*4]
+ eor r2,r10,r11
+ add r4,r4,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r9
+ add r4,r4,r3 @ h+=K256[i]
+ eor r2,r2,r11 @ Ch(e,f,g)
+ eor r0,r5,r5,ror#11
+ add r4,r4,r2 @ h+=Ch(e,f,g)
+#if 7==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 7<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r5,r6 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#9*4] @ from future BODY_16_xx
+ eor r3,r5,r6 @ a^b, b^c in next round
+ ldr r1,[sp,#6*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r5,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r8,r8,r4 @ d+=h
+ eor r12,r12,r6 @ Maj(a,b,c)
+ add r4,r4,r0,ror#2 @ h+=Sigma0(a)
+ @ add r4,r4,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 8
+# if 8==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r8,r8,ror#5
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 8
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 8==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r8,r8,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r11,r11,r2 @ h+=X[i]
+ str r2,[sp,#8*4]
+ eor r2,r9,r10
+ add r11,r11,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r8
+ add r11,r11,r12 @ h+=K256[i]
+ eor r2,r2,r10 @ Ch(e,f,g)
+ eor r0,r4,r4,ror#11
+ add r11,r11,r2 @ h+=Ch(e,f,g)
+#if 8==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 8<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r4,r5 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#10*4] @ from future BODY_16_xx
+ eor r12,r4,r5 @ a^b, b^c in next round
+ ldr r1,[sp,#7*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r4,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r7,r7,r11 @ d+=h
+ eor r3,r3,r5 @ Maj(a,b,c)
+ add r11,r11,r0,ror#2 @ h+=Sigma0(a)
+ @ add r11,r11,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 9
+# if 9==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r7,r7,ror#5
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 9
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 9==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r7,r7,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r10,r10,r2 @ h+=X[i]
+ str r2,[sp,#9*4]
+ eor r2,r8,r9
+ add r10,r10,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r7
+ add r10,r10,r3 @ h+=K256[i]
+ eor r2,r2,r9 @ Ch(e,f,g)
+ eor r0,r11,r11,ror#11
+ add r10,r10,r2 @ h+=Ch(e,f,g)
+#if 9==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 9<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r11,r4 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#11*4] @ from future BODY_16_xx
+ eor r3,r11,r4 @ a^b, b^c in next round
+ ldr r1,[sp,#8*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r11,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r6,r6,r10 @ d+=h
+ eor r12,r12,r4 @ Maj(a,b,c)
+ add r10,r10,r0,ror#2 @ h+=Sigma0(a)
+ @ add r10,r10,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 10
+# if 10==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 10
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 10==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r6,r6,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r9,r9,r2 @ h+=X[i]
+ str r2,[sp,#10*4]
+ eor r2,r7,r8
+ add r9,r9,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r6
+ add r9,r9,r12 @ h+=K256[i]
+ eor r2,r2,r8 @ Ch(e,f,g)
+ eor r0,r10,r10,ror#11
+ add r9,r9,r2 @ h+=Ch(e,f,g)
+#if 10==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 10<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r10,r11 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#12*4] @ from future BODY_16_xx
+ eor r12,r10,r11 @ a^b, b^c in next round
+ ldr r1,[sp,#9*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r10,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r5,r5,r9 @ d+=h
+ eor r3,r3,r11 @ Maj(a,b,c)
+ add r9,r9,r0,ror#2 @ h+=Sigma0(a)
+ @ add r9,r9,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 11
+# if 11==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r5,r5,ror#5
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 11
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 11==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r5,r5,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r8,r8,r2 @ h+=X[i]
+ str r2,[sp,#11*4]
+ eor r2,r6,r7
+ add r8,r8,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r5
+ add r8,r8,r3 @ h+=K256[i]
+ eor r2,r2,r7 @ Ch(e,f,g)
+ eor r0,r9,r9,ror#11
+ add r8,r8,r2 @ h+=Ch(e,f,g)
+#if 11==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 11<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r9,r10 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#13*4] @ from future BODY_16_xx
+ eor r3,r9,r10 @ a^b, b^c in next round
+ ldr r1,[sp,#10*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r9,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r4,r4,r8 @ d+=h
+ eor r12,r12,r10 @ Maj(a,b,c)
+ add r8,r8,r0,ror#2 @ h+=Sigma0(a)
+ @ add r8,r8,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 12
+# if 12==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r4,r4,ror#5
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 12
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 12==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r4,r4,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r7,r7,r2 @ h+=X[i]
+ str r2,[sp,#12*4]
+ eor r2,r5,r6
+ add r7,r7,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r4
+ add r7,r7,r12 @ h+=K256[i]
+ eor r2,r2,r6 @ Ch(e,f,g)
+ eor r0,r8,r8,ror#11
+ add r7,r7,r2 @ h+=Ch(e,f,g)
+#if 12==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 12<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r8,r9 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#14*4] @ from future BODY_16_xx
+ eor r12,r8,r9 @ a^b, b^c in next round
+ ldr r1,[sp,#11*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r8,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r11,r11,r7 @ d+=h
+ eor r3,r3,r9 @ Maj(a,b,c)
+ add r7,r7,r0,ror#2 @ h+=Sigma0(a)
+ @ add r7,r7,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 13
+# if 13==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r11,r11,ror#5
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 13
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 13==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r11,r11,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r6,r6,r2 @ h+=X[i]
+ str r2,[sp,#13*4]
+ eor r2,r4,r5
+ add r6,r6,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r11
+ add r6,r6,r3 @ h+=K256[i]
+ eor r2,r2,r5 @ Ch(e,f,g)
+ eor r0,r7,r7,ror#11
+ add r6,r6,r2 @ h+=Ch(e,f,g)
+#if 13==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 13<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r7,r8 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#15*4] @ from future BODY_16_xx
+ eor r3,r7,r8 @ a^b, b^c in next round
+ ldr r1,[sp,#12*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r7,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r10,r10,r6 @ d+=h
+ eor r12,r12,r8 @ Maj(a,b,c)
+ add r6,r6,r0,ror#2 @ h+=Sigma0(a)
+ @ add r6,r6,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 14
+# if 14==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 14
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ ldrb r12,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r12,lsl#8
+ ldrb r12,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 14==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r10,r10,ror#5
+ orr r2,r2,r12,lsl#24
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+#endif
+ ldr r12,[r14],#4 @ *K256++
+ add r5,r5,r2 @ h+=X[i]
+ str r2,[sp,#14*4]
+ eor r2,r11,r4
+ add r5,r5,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r10
+ add r5,r5,r12 @ h+=K256[i]
+ eor r2,r2,r4 @ Ch(e,f,g)
+ eor r0,r6,r6,ror#11
+ add r5,r5,r2 @ h+=Ch(e,f,g)
+#if 14==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 14<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r6,r7 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#0*4] @ from future BODY_16_xx
+ eor r12,r6,r7 @ a^b, b^c in next round
+ ldr r1,[sp,#13*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r6,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r9,r9,r5 @ d+=h
+ eor r3,r3,r7 @ Maj(a,b,c)
+ add r5,r5,r0,ror#2 @ h+=Sigma0(a)
+ @ add r5,r5,r3 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ @ ldr r2,[r1],#4 @ 15
+# if 15==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r9,r9,ror#5
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+ rev r2,r2
+#else
+ @ ldrb r2,[r1,#3] @ 15
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ ldrb r3,[r1,#2]
+ ldrb r0,[r1,#1]
+ orr r2,r2,r3,lsl#8
+ ldrb r3,[r1],#4
+ orr r2,r2,r0,lsl#16
+# if 15==15
+ str r1,[sp,#17*4] @ make room for r1
+# endif
+ eor r0,r9,r9,ror#5
+ orr r2,r2,r3,lsl#24
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+#endif
+ ldr r3,[r14],#4 @ *K256++
+ add r4,r4,r2 @ h+=X[i]
+ str r2,[sp,#15*4]
+ eor r2,r10,r11
+ add r4,r4,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r9
+ add r4,r4,r3 @ h+=K256[i]
+ eor r2,r2,r11 @ Ch(e,f,g)
+ eor r0,r5,r5,ror#11
+ add r4,r4,r2 @ h+=Ch(e,f,g)
+#if 15==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 15<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r5,r6 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#1*4] @ from future BODY_16_xx
+ eor r3,r5,r6 @ a^b, b^c in next round
+ ldr r1,[sp,#14*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r5,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r8,r8,r4 @ d+=h
+ eor r12,r12,r6 @ Maj(a,b,c)
+ add r4,r4,r0,ror#2 @ h+=Sigma0(a)
+ @ add r4,r4,r12 @ h+=Maj(a,b,c)
+.Lrounds_16_xx:
+ @ ldr r2,[sp,#1*4] @ 16
+ @ ldr r1,[sp,#14*4]
+ mov r0,r2,ror#7
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#0*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#9*4]
+
+ add r12,r12,r0
+ eor r0,r8,r8,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r11,r11,r2 @ h+=X[i]
+ str r2,[sp,#0*4]
+ eor r2,r9,r10
+ add r11,r11,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r8
+ add r11,r11,r12 @ h+=K256[i]
+ eor r2,r2,r10 @ Ch(e,f,g)
+ eor r0,r4,r4,ror#11
+ add r11,r11,r2 @ h+=Ch(e,f,g)
+#if 16==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 16<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r4,r5 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#2*4] @ from future BODY_16_xx
+ eor r12,r4,r5 @ a^b, b^c in next round
+ ldr r1,[sp,#15*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r4,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r7,r7,r11 @ d+=h
+ eor r3,r3,r5 @ Maj(a,b,c)
+ add r11,r11,r0,ror#2 @ h+=Sigma0(a)
+ @ add r11,r11,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#2*4] @ 17
+ @ ldr r1,[sp,#15*4]
+ mov r0,r2,ror#7
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#1*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#10*4]
+
+ add r3,r3,r0
+ eor r0,r7,r7,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r10,r10,r2 @ h+=X[i]
+ str r2,[sp,#1*4]
+ eor r2,r8,r9
+ add r10,r10,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r7
+ add r10,r10,r3 @ h+=K256[i]
+ eor r2,r2,r9 @ Ch(e,f,g)
+ eor r0,r11,r11,ror#11
+ add r10,r10,r2 @ h+=Ch(e,f,g)
+#if 17==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 17<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r11,r4 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#3*4] @ from future BODY_16_xx
+ eor r3,r11,r4 @ a^b, b^c in next round
+ ldr r1,[sp,#0*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r11,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r6,r6,r10 @ d+=h
+ eor r12,r12,r4 @ Maj(a,b,c)
+ add r10,r10,r0,ror#2 @ h+=Sigma0(a)
+ @ add r10,r10,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#3*4] @ 18
+ @ ldr r1,[sp,#0*4]
+ mov r0,r2,ror#7
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#2*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#11*4]
+
+ add r12,r12,r0
+ eor r0,r6,r6,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r9,r9,r2 @ h+=X[i]
+ str r2,[sp,#2*4]
+ eor r2,r7,r8
+ add r9,r9,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r6
+ add r9,r9,r12 @ h+=K256[i]
+ eor r2,r2,r8 @ Ch(e,f,g)
+ eor r0,r10,r10,ror#11
+ add r9,r9,r2 @ h+=Ch(e,f,g)
+#if 18==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 18<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r10,r11 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#4*4] @ from future BODY_16_xx
+ eor r12,r10,r11 @ a^b, b^c in next round
+ ldr r1,[sp,#1*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r10,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r5,r5,r9 @ d+=h
+ eor r3,r3,r11 @ Maj(a,b,c)
+ add r9,r9,r0,ror#2 @ h+=Sigma0(a)
+ @ add r9,r9,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#4*4] @ 19
+ @ ldr r1,[sp,#1*4]
+ mov r0,r2,ror#7
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#3*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#12*4]
+
+ add r3,r3,r0
+ eor r0,r5,r5,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r8,r8,r2 @ h+=X[i]
+ str r2,[sp,#3*4]
+ eor r2,r6,r7
+ add r8,r8,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r5
+ add r8,r8,r3 @ h+=K256[i]
+ eor r2,r2,r7 @ Ch(e,f,g)
+ eor r0,r9,r9,ror#11
+ add r8,r8,r2 @ h+=Ch(e,f,g)
+#if 19==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 19<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r9,r10 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#5*4] @ from future BODY_16_xx
+ eor r3,r9,r10 @ a^b, b^c in next round
+ ldr r1,[sp,#2*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r9,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r4,r4,r8 @ d+=h
+ eor r12,r12,r10 @ Maj(a,b,c)
+ add r8,r8,r0,ror#2 @ h+=Sigma0(a)
+ @ add r8,r8,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#5*4] @ 20
+ @ ldr r1,[sp,#2*4]
+ mov r0,r2,ror#7
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#4*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#13*4]
+
+ add r12,r12,r0
+ eor r0,r4,r4,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r7,r7,r2 @ h+=X[i]
+ str r2,[sp,#4*4]
+ eor r2,r5,r6
+ add r7,r7,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r4
+ add r7,r7,r12 @ h+=K256[i]
+ eor r2,r2,r6 @ Ch(e,f,g)
+ eor r0,r8,r8,ror#11
+ add r7,r7,r2 @ h+=Ch(e,f,g)
+#if 20==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 20<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r8,r9 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#6*4] @ from future BODY_16_xx
+ eor r12,r8,r9 @ a^b, b^c in next round
+ ldr r1,[sp,#3*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r8,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r11,r11,r7 @ d+=h
+ eor r3,r3,r9 @ Maj(a,b,c)
+ add r7,r7,r0,ror#2 @ h+=Sigma0(a)
+ @ add r7,r7,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#6*4] @ 21
+ @ ldr r1,[sp,#3*4]
+ mov r0,r2,ror#7
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#5*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#14*4]
+
+ add r3,r3,r0
+ eor r0,r11,r11,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r6,r6,r2 @ h+=X[i]
+ str r2,[sp,#5*4]
+ eor r2,r4,r5
+ add r6,r6,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r11
+ add r6,r6,r3 @ h+=K256[i]
+ eor r2,r2,r5 @ Ch(e,f,g)
+ eor r0,r7,r7,ror#11
+ add r6,r6,r2 @ h+=Ch(e,f,g)
+#if 21==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 21<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r7,r8 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#7*4] @ from future BODY_16_xx
+ eor r3,r7,r8 @ a^b, b^c in next round
+ ldr r1,[sp,#4*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r7,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r10,r10,r6 @ d+=h
+ eor r12,r12,r8 @ Maj(a,b,c)
+ add r6,r6,r0,ror#2 @ h+=Sigma0(a)
+ @ add r6,r6,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#7*4] @ 22
+ @ ldr r1,[sp,#4*4]
+ mov r0,r2,ror#7
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#6*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#15*4]
+
+ add r12,r12,r0
+ eor r0,r10,r10,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r5,r5,r2 @ h+=X[i]
+ str r2,[sp,#6*4]
+ eor r2,r11,r4
+ add r5,r5,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r10
+ add r5,r5,r12 @ h+=K256[i]
+ eor r2,r2,r4 @ Ch(e,f,g)
+ eor r0,r6,r6,ror#11
+ add r5,r5,r2 @ h+=Ch(e,f,g)
+#if 22==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 22<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r6,r7 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#8*4] @ from future BODY_16_xx
+ eor r12,r6,r7 @ a^b, b^c in next round
+ ldr r1,[sp,#5*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r6,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r9,r9,r5 @ d+=h
+ eor r3,r3,r7 @ Maj(a,b,c)
+ add r5,r5,r0,ror#2 @ h+=Sigma0(a)
+ @ add r5,r5,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#8*4] @ 23
+ @ ldr r1,[sp,#5*4]
+ mov r0,r2,ror#7
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#7*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#0*4]
+
+ add r3,r3,r0
+ eor r0,r9,r9,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r4,r4,r2 @ h+=X[i]
+ str r2,[sp,#7*4]
+ eor r2,r10,r11
+ add r4,r4,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r9
+ add r4,r4,r3 @ h+=K256[i]
+ eor r2,r2,r11 @ Ch(e,f,g)
+ eor r0,r5,r5,ror#11
+ add r4,r4,r2 @ h+=Ch(e,f,g)
+#if 23==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 23<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r5,r6 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#9*4] @ from future BODY_16_xx
+ eor r3,r5,r6 @ a^b, b^c in next round
+ ldr r1,[sp,#6*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r5,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r8,r8,r4 @ d+=h
+ eor r12,r12,r6 @ Maj(a,b,c)
+ add r4,r4,r0,ror#2 @ h+=Sigma0(a)
+ @ add r4,r4,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#9*4] @ 24
+ @ ldr r1,[sp,#6*4]
+ mov r0,r2,ror#7
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#8*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#1*4]
+
+ add r12,r12,r0
+ eor r0,r8,r8,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r8,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r11,r11,r2 @ h+=X[i]
+ str r2,[sp,#8*4]
+ eor r2,r9,r10
+ add r11,r11,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r8
+ add r11,r11,r12 @ h+=K256[i]
+ eor r2,r2,r10 @ Ch(e,f,g)
+ eor r0,r4,r4,ror#11
+ add r11,r11,r2 @ h+=Ch(e,f,g)
+#if 24==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 24<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r4,r5 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#10*4] @ from future BODY_16_xx
+ eor r12,r4,r5 @ a^b, b^c in next round
+ ldr r1,[sp,#7*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r4,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r7,r7,r11 @ d+=h
+ eor r3,r3,r5 @ Maj(a,b,c)
+ add r11,r11,r0,ror#2 @ h+=Sigma0(a)
+ @ add r11,r11,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#10*4] @ 25
+ @ ldr r1,[sp,#7*4]
+ mov r0,r2,ror#7
+ add r11,r11,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#9*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#2*4]
+
+ add r3,r3,r0
+ eor r0,r7,r7,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r7,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r10,r10,r2 @ h+=X[i]
+ str r2,[sp,#9*4]
+ eor r2,r8,r9
+ add r10,r10,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r7
+ add r10,r10,r3 @ h+=K256[i]
+ eor r2,r2,r9 @ Ch(e,f,g)
+ eor r0,r11,r11,ror#11
+ add r10,r10,r2 @ h+=Ch(e,f,g)
+#if 25==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 25<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r11,r4 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#11*4] @ from future BODY_16_xx
+ eor r3,r11,r4 @ a^b, b^c in next round
+ ldr r1,[sp,#8*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r11,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r6,r6,r10 @ d+=h
+ eor r12,r12,r4 @ Maj(a,b,c)
+ add r10,r10,r0,ror#2 @ h+=Sigma0(a)
+ @ add r10,r10,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#11*4] @ 26
+ @ ldr r1,[sp,#8*4]
+ mov r0,r2,ror#7
+ add r10,r10,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#10*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#3*4]
+
+ add r12,r12,r0
+ eor r0,r6,r6,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r6,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r9,r9,r2 @ h+=X[i]
+ str r2,[sp,#10*4]
+ eor r2,r7,r8
+ add r9,r9,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r6
+ add r9,r9,r12 @ h+=K256[i]
+ eor r2,r2,r8 @ Ch(e,f,g)
+ eor r0,r10,r10,ror#11
+ add r9,r9,r2 @ h+=Ch(e,f,g)
+#if 26==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 26<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r10,r11 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#12*4] @ from future BODY_16_xx
+ eor r12,r10,r11 @ a^b, b^c in next round
+ ldr r1,[sp,#9*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r10,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r5,r5,r9 @ d+=h
+ eor r3,r3,r11 @ Maj(a,b,c)
+ add r9,r9,r0,ror#2 @ h+=Sigma0(a)
+ @ add r9,r9,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#12*4] @ 27
+ @ ldr r1,[sp,#9*4]
+ mov r0,r2,ror#7
+ add r9,r9,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#11*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#4*4]
+
+ add r3,r3,r0
+ eor r0,r5,r5,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r5,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r8,r8,r2 @ h+=X[i]
+ str r2,[sp,#11*4]
+ eor r2,r6,r7
+ add r8,r8,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r5
+ add r8,r8,r3 @ h+=K256[i]
+ eor r2,r2,r7 @ Ch(e,f,g)
+ eor r0,r9,r9,ror#11
+ add r8,r8,r2 @ h+=Ch(e,f,g)
+#if 27==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 27<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r9,r10 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#13*4] @ from future BODY_16_xx
+ eor r3,r9,r10 @ a^b, b^c in next round
+ ldr r1,[sp,#10*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r9,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r4,r4,r8 @ d+=h
+ eor r12,r12,r10 @ Maj(a,b,c)
+ add r8,r8,r0,ror#2 @ h+=Sigma0(a)
+ @ add r8,r8,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#13*4] @ 28
+ @ ldr r1,[sp,#10*4]
+ mov r0,r2,ror#7
+ add r8,r8,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#12*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#5*4]
+
+ add r12,r12,r0
+ eor r0,r4,r4,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r4,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r7,r7,r2 @ h+=X[i]
+ str r2,[sp,#12*4]
+ eor r2,r5,r6
+ add r7,r7,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r4
+ add r7,r7,r12 @ h+=K256[i]
+ eor r2,r2,r6 @ Ch(e,f,g)
+ eor r0,r8,r8,ror#11
+ add r7,r7,r2 @ h+=Ch(e,f,g)
+#if 28==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 28<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r8,r9 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#14*4] @ from future BODY_16_xx
+ eor r12,r8,r9 @ a^b, b^c in next round
+ ldr r1,[sp,#11*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r8,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r11,r11,r7 @ d+=h
+ eor r3,r3,r9 @ Maj(a,b,c)
+ add r7,r7,r0,ror#2 @ h+=Sigma0(a)
+ @ add r7,r7,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#14*4] @ 29
+ @ ldr r1,[sp,#11*4]
+ mov r0,r2,ror#7
+ add r7,r7,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#13*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#6*4]
+
+ add r3,r3,r0
+ eor r0,r11,r11,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r11,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r6,r6,r2 @ h+=X[i]
+ str r2,[sp,#13*4]
+ eor r2,r4,r5
+ add r6,r6,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r11
+ add r6,r6,r3 @ h+=K256[i]
+ eor r2,r2,r5 @ Ch(e,f,g)
+ eor r0,r7,r7,ror#11
+ add r6,r6,r2 @ h+=Ch(e,f,g)
+#if 29==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 29<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r7,r8 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#15*4] @ from future BODY_16_xx
+ eor r3,r7,r8 @ a^b, b^c in next round
+ ldr r1,[sp,#12*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r7,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r10,r10,r6 @ d+=h
+ eor r12,r12,r8 @ Maj(a,b,c)
+ add r6,r6,r0,ror#2 @ h+=Sigma0(a)
+ @ add r6,r6,r12 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#15*4] @ 30
+ @ ldr r1,[sp,#12*4]
+ mov r0,r2,ror#7
+ add r6,r6,r12 @ h+=Maj(a,b,c) from the past
+ mov r12,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r12,r12,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#14*4]
+ eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#7*4]
+
+ add r12,r12,r0
+ eor r0,r10,r10,ror#5 @ from BODY_00_15
+ add r2,r2,r12
+ eor r0,r0,r10,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r12,[r14],#4 @ *K256++
+ add r5,r5,r2 @ h+=X[i]
+ str r2,[sp,#14*4]
+ eor r2,r11,r4
+ add r5,r5,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r10
+ add r5,r5,r12 @ h+=K256[i]
+ eor r2,r2,r4 @ Ch(e,f,g)
+ eor r0,r6,r6,ror#11
+ add r5,r5,r2 @ h+=Ch(e,f,g)
+#if 30==31
+ and r12,r12,#0xff
+ cmp r12,#0xf2 @ done?
+#endif
+#if 30<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r12,r6,r7 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#0*4] @ from future BODY_16_xx
+ eor r12,r6,r7 @ a^b, b^c in next round
+ ldr r1,[sp,#13*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r6,ror#20 @ Sigma0(a)
+ and r3,r3,r12 @ (b^c)&=(a^b)
+ add r9,r9,r5 @ d+=h
+ eor r3,r3,r7 @ Maj(a,b,c)
+ add r5,r5,r0,ror#2 @ h+=Sigma0(a)
+ @ add r5,r5,r3 @ h+=Maj(a,b,c)
+ @ ldr r2,[sp,#0*4] @ 31
+ @ ldr r1,[sp,#13*4]
+ mov r0,r2,ror#7
+ add r5,r5,r3 @ h+=Maj(a,b,c) from the past
+ mov r3,r1,ror#17
+ eor r0,r0,r2,ror#18
+ eor r3,r3,r1,ror#19
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ ldr r2,[sp,#15*4]
+ eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
+ ldr r1,[sp,#8*4]
+
+ add r3,r3,r0
+ eor r0,r9,r9,ror#5 @ from BODY_00_15
+ add r2,r2,r3
+ eor r0,r0,r9,ror#19 @ Sigma1(e)
+ add r2,r2,r1 @ X[i]
+ ldr r3,[r14],#4 @ *K256++
+ add r4,r4,r2 @ h+=X[i]
+ str r2,[sp,#15*4]
+ eor r2,r10,r11
+ add r4,r4,r0,ror#6 @ h+=Sigma1(e)
+ and r2,r2,r9
+ add r4,r4,r3 @ h+=K256[i]
+ eor r2,r2,r11 @ Ch(e,f,g)
+ eor r0,r5,r5,ror#11
+ add r4,r4,r2 @ h+=Ch(e,f,g)
+#if 31==31
+ and r3,r3,#0xff
+ cmp r3,#0xf2 @ done?
+#endif
+#if 31<15
+# if __ARM_ARCH__>=7
+ ldr r2,[r1],#4 @ prefetch
+# else
+ ldrb r2,[r1,#3]
+# endif
+ eor r3,r5,r6 @ a^b, b^c in next round
+#else
+ ldr r2,[sp,#1*4] @ from future BODY_16_xx
+ eor r3,r5,r6 @ a^b, b^c in next round
+ ldr r1,[sp,#14*4] @ from future BODY_16_xx
+#endif
+ eor r0,r0,r5,ror#20 @ Sigma0(a)
+ and r12,r12,r3 @ (b^c)&=(a^b)
+ add r8,r8,r4 @ d+=h
+ eor r12,r12,r6 @ Maj(a,b,c)
+ add r4,r4,r0,ror#2 @ h+=Sigma0(a)
+ @ add r4,r4,r12 @ h+=Maj(a,b,c)
+ ldreq r3,[sp,#16*4] @ pull ctx
+ bne .Lrounds_16_xx
+
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ ldr r0,[r3,#0]
+ ldr r2,[r3,#4]
+ ldr r12,[r3,#8]
+ add r4,r4,r0
+ ldr r0,[r3,#12]
+ add r5,r5,r2
+ ldr r2,[r3,#16]
+ add r6,r6,r12
+ ldr r12,[r3,#20]
+ add r7,r7,r0
+ ldr r0,[r3,#24]
+ add r8,r8,r2
+ ldr r2,[r3,#28]
+ add r9,r9,r12
+ ldr r1,[sp,#17*4] @ pull inp
+ ldr r12,[sp,#18*4] @ pull inp+len
+ add r10,r10,r0
+ add r11,r11,r2
+ stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
+ cmp r1,r12
+ sub r14,r14,#256 @ rewind Ktbl
+ bne .Loop
+
+ add sp,sp,#19*4 @ destroy frame
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r11,pc}
+#else
+ ldmia sp!,{r4-r11,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+.size sha256_block_data_order,.-sha256_block_data_order
+#if __ARM_ARCH__>=7
+.fpu neon
+
+.type sha256_block_data_order_neon,%function
+.align 4
+sha256_block_data_order_neon:
+.LNEON:
+ stmdb sp!,{r4-r12,lr}
+
+ mov r12,sp
+ sub sp,sp,#16*4+16 @ alloca
+ sub r14,r3,#256+32 @ K256
+ bic sp,sp,#15 @ align for 128-bit stores
+
+ vld1.8 {q0},[r1]!
+ vld1.8 {q1},[r1]!
+ vld1.8 {q2},[r1]!
+ vld1.8 {q3},[r1]!
+ vld1.32 {q8},[r14,:128]!
+ vld1.32 {q9},[r14,:128]!
+ vld1.32 {q10},[r14,:128]!
+ vld1.32 {q11},[r14,:128]!
+ vrev32.8 q0,q0 @ yes, even on
+ str r0,[sp,#64]
+ vrev32.8 q1,q1 @ big-endian
+ str r1,[sp,#68]
+ mov r1,sp
+ vrev32.8 q2,q2
+ str r2,[sp,#72]
+ vrev32.8 q3,q3
+ str r12,[sp,#76] @ save original sp
+ vadd.i32 q8,q8,q0
+ vadd.i32 q9,q9,q1
+ vst1.32 {q8},[r1,:128]!
+ vadd.i32 q10,q10,q2
+ vst1.32 {q9},[r1,:128]!
+ vadd.i32 q11,q11,q3
+ vst1.32 {q10},[r1,:128]!
+ vst1.32 {q11},[r1,:128]!
+
+ ldmia r0,{r4-r11}
+ sub r1,r1,#64
+ ldr r2,[sp,#0]
+ eor r12,r12,r12
+ eor r3,r5,r6
+ b .L_00_48
+
+.align 4
+.L_00_48:
+ vext.8 q8,q0,q1,#4
+ add r11,r11,r2
+ eor r2,r9,r10
+ eor r0,r8,r8,ror#5
+ vext.8 q9,q2,q3,#4
+ add r4,r4,r12
+ and r2,r2,r8
+ eor r12,r0,r8,ror#19
+ vshr.u32 q10,q8,#7
+ eor r0,r4,r4,ror#11
+ eor r2,r2,r10
+ vadd.i32 q0,q0,q9
+ add r11,r11,r12,ror#6
+ eor r12,r4,r5
+ vshr.u32 q9,q8,#3
+ eor r0,r0,r4,ror#20
+ add r11,r11,r2
+ vsli.32 q10,q8,#25
+ ldr r2,[sp,#4]
+ and r3,r3,r12
+ vshr.u32 q11,q8,#18
+ add r7,r7,r11
+ add r11,r11,r0,ror#2
+ eor r3,r3,r5
+ veor q9,q9,q10
+ add r10,r10,r2
+ vsli.32 q11,q8,#14
+ eor r2,r8,r9
+ eor r0,r7,r7,ror#5
+ vshr.u32 d24,d7,#17
+ add r11,r11,r3
+ and r2,r2,r7
+ veor q9,q9,q11
+ eor r3,r0,r7,ror#19
+ eor r0,r11,r11,ror#11
+ vsli.32 d24,d7,#15
+ eor r2,r2,r9
+ add r10,r10,r3,ror#6
+ vshr.u32 d25,d7,#10
+ eor r3,r11,r4
+ eor r0,r0,r11,ror#20
+ vadd.i32 q0,q0,q9
+ add r10,r10,r2
+ ldr r2,[sp,#8]
+ veor d25,d25,d24
+ and r12,r12,r3
+ add r6,r6,r10
+ vshr.u32 d24,d7,#19
+ add r10,r10,r0,ror#2
+ eor r12,r12,r4
+ vsli.32 d24,d7,#13
+ add r9,r9,r2
+ eor r2,r7,r8
+ veor d25,d25,d24
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12
+ vadd.i32 d0,d0,d25
+ and r2,r2,r6
+ eor r12,r0,r6,ror#19
+ vshr.u32 d24,d0,#17
+ eor r0,r10,r10,ror#11
+ eor r2,r2,r8
+ vsli.32 d24,d0,#15
+ add r9,r9,r12,ror#6
+ eor r12,r10,r11
+ vshr.u32 d25,d0,#10
+ eor r0,r0,r10,ror#20
+ add r9,r9,r2
+ veor d25,d25,d24
+ ldr r2,[sp,#12]
+ and r3,r3,r12
+ vshr.u32 d24,d0,#19
+ add r5,r5,r9
+ add r9,r9,r0,ror#2
+ eor r3,r3,r11
+ vld1.32 {q8},[r14,:128]!
+ add r8,r8,r2
+ vsli.32 d24,d0,#13
+ eor r2,r6,r7
+ eor r0,r5,r5,ror#5
+ veor d25,d25,d24
+ add r9,r9,r3
+ and r2,r2,r5
+ vadd.i32 d1,d1,d25
+ eor r3,r0,r5,ror#19
+ eor r0,r9,r9,ror#11
+ vadd.i32 q8,q8,q0
+ eor r2,r2,r7
+ add r8,r8,r3,ror#6
+ eor r3,r9,r10
+ eor r0,r0,r9,ror#20
+ add r8,r8,r2
+ ldr r2,[sp,#16]
+ and r12,r12,r3
+ add r4,r4,r8
+ vst1.32 {q8},[r1,:128]!
+ add r8,r8,r0,ror#2
+ eor r12,r12,r10
+ vext.8 q8,q1,q2,#4
+ add r7,r7,r2
+ eor r2,r5,r6
+ eor r0,r4,r4,ror#5
+ vext.8 q9,q3,q0,#4
+ add r8,r8,r12
+ and r2,r2,r4
+ eor r12,r0,r4,ror#19
+ vshr.u32 q10,q8,#7
+ eor r0,r8,r8,ror#11
+ eor r2,r2,r6
+ vadd.i32 q1,q1,q9
+ add r7,r7,r12,ror#6
+ eor r12,r8,r9
+ vshr.u32 q9,q8,#3
+ eor r0,r0,r8,ror#20
+ add r7,r7,r2
+ vsli.32 q10,q8,#25
+ ldr r2,[sp,#20]
+ and r3,r3,r12
+ vshr.u32 q11,q8,#18
+ add r11,r11,r7
+ add r7,r7,r0,ror#2
+ eor r3,r3,r9
+ veor q9,q9,q10
+ add r6,r6,r2
+ vsli.32 q11,q8,#14
+ eor r2,r4,r5
+ eor r0,r11,r11,ror#5
+ vshr.u32 d24,d1,#17
+ add r7,r7,r3
+ and r2,r2,r11
+ veor q9,q9,q11
+ eor r3,r0,r11,ror#19
+ eor r0,r7,r7,ror#11
+ vsli.32 d24,d1,#15
+ eor r2,r2,r5
+ add r6,r6,r3,ror#6
+ vshr.u32 d25,d1,#10
+ eor r3,r7,r8
+ eor r0,r0,r7,ror#20
+ vadd.i32 q1,q1,q9
+ add r6,r6,r2
+ ldr r2,[sp,#24]
+ veor d25,d25,d24
+ and r12,r12,r3
+ add r10,r10,r6
+ vshr.u32 d24,d1,#19
+ add r6,r6,r0,ror#2
+ eor r12,r12,r8
+ vsli.32 d24,d1,#13
+ add r5,r5,r2
+ eor r2,r11,r4
+ veor d25,d25,d24
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12
+ vadd.i32 d2,d2,d25
+ and r2,r2,r10
+ eor r12,r0,r10,ror#19
+ vshr.u32 d24,d2,#17
+ eor r0,r6,r6,ror#11
+ eor r2,r2,r4
+ vsli.32 d24,d2,#15
+ add r5,r5,r12,ror#6
+ eor r12,r6,r7
+ vshr.u32 d25,d2,#10
+ eor r0,r0,r6,ror#20
+ add r5,r5,r2
+ veor d25,d25,d24
+ ldr r2,[sp,#28]
+ and r3,r3,r12
+ vshr.u32 d24,d2,#19
+ add r9,r9,r5
+ add r5,r5,r0,ror#2
+ eor r3,r3,r7
+ vld1.32 {q8},[r14,:128]!
+ add r4,r4,r2
+ vsli.32 d24,d2,#13
+ eor r2,r10,r11
+ eor r0,r9,r9,ror#5
+ veor d25,d25,d24
+ add r5,r5,r3
+ and r2,r2,r9
+ vadd.i32 d3,d3,d25
+ eor r3,r0,r9,ror#19
+ eor r0,r5,r5,ror#11
+ vadd.i32 q8,q8,q1
+ eor r2,r2,r11
+ add r4,r4,r3,ror#6
+ eor r3,r5,r6
+ eor r0,r0,r5,ror#20
+ add r4,r4,r2
+ ldr r2,[sp,#32]
+ and r12,r12,r3
+ add r8,r8,r4
+ vst1.32 {q8},[r1,:128]!
+ add r4,r4,r0,ror#2
+ eor r12,r12,r6
+ vext.8 q8,q2,q3,#4
+ add r11,r11,r2
+ eor r2,r9,r10
+ eor r0,r8,r8,ror#5
+ vext.8 q9,q0,q1,#4
+ add r4,r4,r12
+ and r2,r2,r8
+ eor r12,r0,r8,ror#19
+ vshr.u32 q10,q8,#7
+ eor r0,r4,r4,ror#11
+ eor r2,r2,r10
+ vadd.i32 q2,q2,q9
+ add r11,r11,r12,ror#6
+ eor r12,r4,r5
+ vshr.u32 q9,q8,#3
+ eor r0,r0,r4,ror#20
+ add r11,r11,r2
+ vsli.32 q10,q8,#25
+ ldr r2,[sp,#36]
+ and r3,r3,r12
+ vshr.u32 q11,q8,#18
+ add r7,r7,r11
+ add r11,r11,r0,ror#2
+ eor r3,r3,r5
+ veor q9,q9,q10
+ add r10,r10,r2
+ vsli.32 q11,q8,#14
+ eor r2,r8,r9
+ eor r0,r7,r7,ror#5
+ vshr.u32 d24,d3,#17
+ add r11,r11,r3
+ and r2,r2,r7
+ veor q9,q9,q11
+ eor r3,r0,r7,ror#19
+ eor r0,r11,r11,ror#11
+ vsli.32 d24,d3,#15
+ eor r2,r2,r9
+ add r10,r10,r3,ror#6
+ vshr.u32 d25,d3,#10
+ eor r3,r11,r4
+ eor r0,r0,r11,ror#20
+ vadd.i32 q2,q2,q9
+ add r10,r10,r2
+ ldr r2,[sp,#40]
+ veor d25,d25,d24
+ and r12,r12,r3
+ add r6,r6,r10
+ vshr.u32 d24,d3,#19
+ add r10,r10,r0,ror#2
+ eor r12,r12,r4
+ vsli.32 d24,d3,#13
+ add r9,r9,r2
+ eor r2,r7,r8
+ veor d25,d25,d24
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12
+ vadd.i32 d4,d4,d25
+ and r2,r2,r6
+ eor r12,r0,r6,ror#19
+ vshr.u32 d24,d4,#17
+ eor r0,r10,r10,ror#11
+ eor r2,r2,r8
+ vsli.32 d24,d4,#15
+ add r9,r9,r12,ror#6
+ eor r12,r10,r11
+ vshr.u32 d25,d4,#10
+ eor r0,r0,r10,ror#20
+ add r9,r9,r2
+ veor d25,d25,d24
+ ldr r2,[sp,#44]
+ and r3,r3,r12
+ vshr.u32 d24,d4,#19
+ add r5,r5,r9
+ add r9,r9,r0,ror#2
+ eor r3,r3,r11
+ vld1.32 {q8},[r14,:128]!
+ add r8,r8,r2
+ vsli.32 d24,d4,#13
+ eor r2,r6,r7
+ eor r0,r5,r5,ror#5
+ veor d25,d25,d24
+ add r9,r9,r3
+ and r2,r2,r5
+ vadd.i32 d5,d5,d25
+ eor r3,r0,r5,ror#19
+ eor r0,r9,r9,ror#11
+ vadd.i32 q8,q8,q2
+ eor r2,r2,r7
+ add r8,r8,r3,ror#6
+ eor r3,r9,r10
+ eor r0,r0,r9,ror#20
+ add r8,r8,r2
+ ldr r2,[sp,#48]
+ and r12,r12,r3
+ add r4,r4,r8
+ vst1.32 {q8},[r1,:128]!
+ add r8,r8,r0,ror#2
+ eor r12,r12,r10
+ vext.8 q8,q3,q0,#4
+ add r7,r7,r2
+ eor r2,r5,r6
+ eor r0,r4,r4,ror#5
+ vext.8 q9,q1,q2,#4
+ add r8,r8,r12
+ and r2,r2,r4
+ eor r12,r0,r4,ror#19
+ vshr.u32 q10,q8,#7
+ eor r0,r8,r8,ror#11
+ eor r2,r2,r6
+ vadd.i32 q3,q3,q9
+ add r7,r7,r12,ror#6
+ eor r12,r8,r9
+ vshr.u32 q9,q8,#3
+ eor r0,r0,r8,ror#20
+ add r7,r7,r2
+ vsli.32 q10,q8,#25
+ ldr r2,[sp,#52]
+ and r3,r3,r12
+ vshr.u32 q11,q8,#18
+ add r11,r11,r7
+ add r7,r7,r0,ror#2
+ eor r3,r3,r9
+ veor q9,q9,q10
+ add r6,r6,r2
+ vsli.32 q11,q8,#14
+ eor r2,r4,r5
+ eor r0,r11,r11,ror#5
+ vshr.u32 d24,d5,#17
+ add r7,r7,r3
+ and r2,r2,r11
+ veor q9,q9,q11
+ eor r3,r0,r11,ror#19
+ eor r0,r7,r7,ror#11
+ vsli.32 d24,d5,#15
+ eor r2,r2,r5
+ add r6,r6,r3,ror#6
+ vshr.u32 d25,d5,#10
+ eor r3,r7,r8
+ eor r0,r0,r7,ror#20
+ vadd.i32 q3,q3,q9
+ add r6,r6,r2
+ ldr r2,[sp,#56]
+ veor d25,d25,d24
+ and r12,r12,r3
+ add r10,r10,r6
+ vshr.u32 d24,d5,#19
+ add r6,r6,r0,ror#2
+ eor r12,r12,r8
+ vsli.32 d24,d5,#13
+ add r5,r5,r2
+ eor r2,r11,r4
+ veor d25,d25,d24
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12
+ vadd.i32 d6,d6,d25
+ and r2,r2,r10
+ eor r12,r0,r10,ror#19
+ vshr.u32 d24,d6,#17
+ eor r0,r6,r6,ror#11
+ eor r2,r2,r4
+ vsli.32 d24,d6,#15
+ add r5,r5,r12,ror#6
+ eor r12,r6,r7
+ vshr.u32 d25,d6,#10
+ eor r0,r0,r6,ror#20
+ add r5,r5,r2
+ veor d25,d25,d24
+ ldr r2,[sp,#60]
+ and r3,r3,r12
+ vshr.u32 d24,d6,#19
+ add r9,r9,r5
+ add r5,r5,r0,ror#2
+ eor r3,r3,r7
+ vld1.32 {q8},[r14,:128]!
+ add r4,r4,r2
+ vsli.32 d24,d6,#13
+ eor r2,r10,r11
+ eor r0,r9,r9,ror#5
+ veor d25,d25,d24
+ add r5,r5,r3
+ and r2,r2,r9
+ vadd.i32 d7,d7,d25
+ eor r3,r0,r9,ror#19
+ eor r0,r5,r5,ror#11
+ vadd.i32 q8,q8,q3
+ eor r2,r2,r11
+ add r4,r4,r3,ror#6
+ eor r3,r5,r6
+ eor r0,r0,r5,ror#20
+ add r4,r4,r2
+ ldr r2,[r14]
+ and r12,r12,r3
+ add r8,r8,r4
+ vst1.32 {q8},[r1,:128]!
+ add r4,r4,r0,ror#2
+ eor r12,r12,r6
+ teq r2,#0 @ check for K256 terminator
+ ldr r2,[sp,#0]
+ sub r1,r1,#64
+ bne .L_00_48
+
+ ldr r1,[sp,#68]
+ ldr r0,[sp,#72]
+ sub r14,r14,#256 @ rewind r14
+ teq r1,r0
+ subeq r1,r1,#64 @ avoid SEGV
+ vld1.8 {q0},[r1]! @ load next input block
+ vld1.8 {q1},[r1]!
+ vld1.8 {q2},[r1]!
+ vld1.8 {q3},[r1]!
+ strne r1,[sp,#68]
+ mov r1,sp
+ add r11,r11,r2
+ eor r2,r9,r10
+ eor r0,r8,r8,ror#5
+ add r4,r4,r12
+ vld1.32 {q8},[r14,:128]!
+ and r2,r2,r8
+ eor r12,r0,r8,ror#19
+ eor r0,r4,r4,ror#11
+ eor r2,r2,r10
+ vrev32.8 q0,q0
+ add r11,r11,r12,ror#6
+ eor r12,r4,r5
+ eor r0,r0,r4,ror#20
+ add r11,r11,r2
+ vadd.i32 q8,q8,q0
+ ldr r2,[sp,#4]
+ and r3,r3,r12
+ add r7,r7,r11
+ add r11,r11,r0,ror#2
+ eor r3,r3,r5
+ add r10,r10,r2
+ eor r2,r8,r9
+ eor r0,r7,r7,ror#5
+ add r11,r11,r3
+ and r2,r2,r7
+ eor r3,r0,r7,ror#19
+ eor r0,r11,r11,ror#11
+ eor r2,r2,r9
+ add r10,r10,r3,ror#6
+ eor r3,r11,r4
+ eor r0,r0,r11,ror#20
+ add r10,r10,r2
+ ldr r2,[sp,#8]
+ and r12,r12,r3
+ add r6,r6,r10
+ add r10,r10,r0,ror#2
+ eor r12,r12,r4
+ add r9,r9,r2
+ eor r2,r7,r8
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12
+ and r2,r2,r6
+ eor r12,r0,r6,ror#19
+ eor r0,r10,r10,ror#11
+ eor r2,r2,r8
+ add r9,r9,r12,ror#6
+ eor r12,r10,r11
+ eor r0,r0,r10,ror#20
+ add r9,r9,r2
+ ldr r2,[sp,#12]
+ and r3,r3,r12
+ add r5,r5,r9
+ add r9,r9,r0,ror#2
+ eor r3,r3,r11
+ add r8,r8,r2
+ eor r2,r6,r7
+ eor r0,r5,r5,ror#5
+ add r9,r9,r3
+ and r2,r2,r5
+ eor r3,r0,r5,ror#19
+ eor r0,r9,r9,ror#11
+ eor r2,r2,r7
+ add r8,r8,r3,ror#6
+ eor r3,r9,r10
+ eor r0,r0,r9,ror#20
+ add r8,r8,r2
+ ldr r2,[sp,#16]
+ and r12,r12,r3
+ add r4,r4,r8
+ add r8,r8,r0,ror#2
+ eor r12,r12,r10
+ vst1.32 {q8},[r1,:128]!
+ add r7,r7,r2
+ eor r2,r5,r6
+ eor r0,r4,r4,ror#5
+ add r8,r8,r12
+ vld1.32 {q8},[r14,:128]!
+ and r2,r2,r4
+ eor r12,r0,r4,ror#19
+ eor r0,r8,r8,ror#11
+ eor r2,r2,r6
+ vrev32.8 q1,q1
+ add r7,r7,r12,ror#6
+ eor r12,r8,r9
+ eor r0,r0,r8,ror#20
+ add r7,r7,r2
+ vadd.i32 q8,q8,q1
+ ldr r2,[sp,#20]
+ and r3,r3,r12
+ add r11,r11,r7
+ add r7,r7,r0,ror#2
+ eor r3,r3,r9
+ add r6,r6,r2
+ eor r2,r4,r5
+ eor r0,r11,r11,ror#5
+ add r7,r7,r3
+ and r2,r2,r11
+ eor r3,r0,r11,ror#19
+ eor r0,r7,r7,ror#11
+ eor r2,r2,r5
+ add r6,r6,r3,ror#6
+ eor r3,r7,r8
+ eor r0,r0,r7,ror#20
+ add r6,r6,r2
+ ldr r2,[sp,#24]
+ and r12,r12,r3
+ add r10,r10,r6
+ add r6,r6,r0,ror#2
+ eor r12,r12,r8
+ add r5,r5,r2
+ eor r2,r11,r4
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12
+ and r2,r2,r10
+ eor r12,r0,r10,ror#19
+ eor r0,r6,r6,ror#11
+ eor r2,r2,r4
+ add r5,r5,r12,ror#6
+ eor r12,r6,r7
+ eor r0,r0,r6,ror#20
+ add r5,r5,r2
+ ldr r2,[sp,#28]
+ and r3,r3,r12
+ add r9,r9,r5
+ add r5,r5,r0,ror#2
+ eor r3,r3,r7
+ add r4,r4,r2
+ eor r2,r10,r11
+ eor r0,r9,r9,ror#5
+ add r5,r5,r3
+ and r2,r2,r9
+ eor r3,r0,r9,ror#19
+ eor r0,r5,r5,ror#11
+ eor r2,r2,r11
+ add r4,r4,r3,ror#6
+ eor r3,r5,r6
+ eor r0,r0,r5,ror#20
+ add r4,r4,r2
+ ldr r2,[sp,#32]
+ and r12,r12,r3
+ add r8,r8,r4
+ add r4,r4,r0,ror#2
+ eor r12,r12,r6
+ vst1.32 {q8},[r1,:128]!
+ add r11,r11,r2
+ eor r2,r9,r10
+ eor r0,r8,r8,ror#5
+ add r4,r4,r12
+ vld1.32 {q8},[r14,:128]!
+ and r2,r2,r8
+ eor r12,r0,r8,ror#19
+ eor r0,r4,r4,ror#11
+ eor r2,r2,r10
+ vrev32.8 q2,q2
+ add r11,r11,r12,ror#6
+ eor r12,r4,r5
+ eor r0,r0,r4,ror#20
+ add r11,r11,r2
+ vadd.i32 q8,q8,q2
+ ldr r2,[sp,#36]
+ and r3,r3,r12
+ add r7,r7,r11
+ add r11,r11,r0,ror#2
+ eor r3,r3,r5
+ add r10,r10,r2
+ eor r2,r8,r9
+ eor r0,r7,r7,ror#5
+ add r11,r11,r3
+ and r2,r2,r7
+ eor r3,r0,r7,ror#19
+ eor r0,r11,r11,ror#11
+ eor r2,r2,r9
+ add r10,r10,r3,ror#6
+ eor r3,r11,r4
+ eor r0,r0,r11,ror#20
+ add r10,r10,r2
+ ldr r2,[sp,#40]
+ and r12,r12,r3
+ add r6,r6,r10
+ add r10,r10,r0,ror#2
+ eor r12,r12,r4
+ add r9,r9,r2
+ eor r2,r7,r8
+ eor r0,r6,r6,ror#5
+ add r10,r10,r12
+ and r2,r2,r6
+ eor r12,r0,r6,ror#19
+ eor r0,r10,r10,ror#11
+ eor r2,r2,r8
+ add r9,r9,r12,ror#6
+ eor r12,r10,r11
+ eor r0,r0,r10,ror#20
+ add r9,r9,r2
+ ldr r2,[sp,#44]
+ and r3,r3,r12
+ add r5,r5,r9
+ add r9,r9,r0,ror#2
+ eor r3,r3,r11
+ add r8,r8,r2
+ eor r2,r6,r7
+ eor r0,r5,r5,ror#5
+ add r9,r9,r3
+ and r2,r2,r5
+ eor r3,r0,r5,ror#19
+ eor r0,r9,r9,ror#11
+ eor r2,r2,r7
+ add r8,r8,r3,ror#6
+ eor r3,r9,r10
+ eor r0,r0,r9,ror#20
+ add r8,r8,r2
+ ldr r2,[sp,#48]
+ and r12,r12,r3
+ add r4,r4,r8
+ add r8,r8,r0,ror#2
+ eor r12,r12,r10
+ vst1.32 {q8},[r1,:128]!
+ add r7,r7,r2
+ eor r2,r5,r6
+ eor r0,r4,r4,ror#5
+ add r8,r8,r12
+ vld1.32 {q8},[r14,:128]!
+ and r2,r2,r4
+ eor r12,r0,r4,ror#19
+ eor r0,r8,r8,ror#11
+ eor r2,r2,r6
+ vrev32.8 q3,q3
+ add r7,r7,r12,ror#6
+ eor r12,r8,r9
+ eor r0,r0,r8,ror#20
+ add r7,r7,r2
+ vadd.i32 q8,q8,q3
+ ldr r2,[sp,#52]
+ and r3,r3,r12
+ add r11,r11,r7
+ add r7,r7,r0,ror#2
+ eor r3,r3,r9
+ add r6,r6,r2
+ eor r2,r4,r5
+ eor r0,r11,r11,ror#5
+ add r7,r7,r3
+ and r2,r2,r11
+ eor r3,r0,r11,ror#19
+ eor r0,r7,r7,ror#11
+ eor r2,r2,r5
+ add r6,r6,r3,ror#6
+ eor r3,r7,r8
+ eor r0,r0,r7,ror#20
+ add r6,r6,r2
+ ldr r2,[sp,#56]
+ and r12,r12,r3
+ add r10,r10,r6
+ add r6,r6,r0,ror#2
+ eor r12,r12,r8
+ add r5,r5,r2
+ eor r2,r11,r4
+ eor r0,r10,r10,ror#5
+ add r6,r6,r12
+ and r2,r2,r10
+ eor r12,r0,r10,ror#19
+ eor r0,r6,r6,ror#11
+ eor r2,r2,r4
+ add r5,r5,r12,ror#6
+ eor r12,r6,r7
+ eor r0,r0,r6,ror#20
+ add r5,r5,r2
+ ldr r2,[sp,#60]
+ and r3,r3,r12
+ add r9,r9,r5
+ add r5,r5,r0,ror#2
+ eor r3,r3,r7
+ add r4,r4,r2
+ eor r2,r10,r11
+ eor r0,r9,r9,ror#5
+ add r5,r5,r3
+ and r2,r2,r9
+ eor r3,r0,r9,ror#19
+ eor r0,r5,r5,ror#11
+ eor r2,r2,r11
+ add r4,r4,r3,ror#6
+ eor r3,r5,r6
+ eor r0,r0,r5,ror#20
+ add r4,r4,r2
+ ldr r2,[sp,#64]
+ and r12,r12,r3
+ add r8,r8,r4
+ add r4,r4,r0,ror#2
+ eor r12,r12,r6
+ vst1.32 {q8},[r1,:128]!
+ ldr r0,[r2,#0]
+ add r4,r4,r12 @ h+=Maj(a,b,c) from the past
+ ldr r12,[r2,#4]
+ ldr r3,[r2,#8]
+ ldr r1,[r2,#12]
+ add r4,r4,r0 @ accumulate
+ ldr r0,[r2,#16]
+ add r5,r5,r12
+ ldr r12,[r2,#20]
+ add r6,r6,r3
+ ldr r3,[r2,#24]
+ add r7,r7,r1
+ ldr r1,[r2,#28]
+ add r8,r8,r0
+ str r4,[r2],#4
+ add r9,r9,r12
+ str r5,[r2],#4
+ add r10,r10,r3
+ str r6,[r2],#4
+ add r11,r11,r1
+ str r7,[r2],#4
+ stmia r2,{r8-r11}
+
+ movne r1,sp
+ ldrne r2,[sp,#0]
+ eorne r12,r12,r12
+ ldreq sp,[sp,#76] @ restore original sp
+ eorne r3,r5,r6
+ bne .L_00_48
+
+ ldmia sp!,{r4-r12,pc}
+.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
+#endif
+#if __ARM_ARCH__>=7
+.type sha256_block_data_order_armv8,%function
+.align 5
+sha256_block_data_order_armv8:
+.LARMv8:
+ vld1.32 {q0,q1},[r0]
+ sub r3,r3,#sha256_block_data_order-K256
+
+.Loop_v8:
+ vld1.8 {q8-q9},[r1]!
+ vld1.8 {q10-q11},[r1]!
+ vld1.32 {q12},[r3]!
+ vrev32.8 q8,q8
+ vrev32.8 q9,q9
+ vrev32.8 q10,q10
+ vrev32.8 q11,q11
+ vmov q14,q0 @ offload
+ vmov q15,q1
+ teq r1,r2
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q8
+ .byte 0xe2,0x03,0xfa,0xf3 @ sha256su0 q8,q9
+ vmov q2,q0
+ .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
+ .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
+ .byte 0xe6,0x0c,0x64,0xf3 @ sha256su1 q8,q10,q11
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q9
+ .byte 0xe4,0x23,0xfa,0xf3 @ sha256su0 q9,q10
+ vmov q2,q0
+ .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
+ .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
+ .byte 0xe0,0x2c,0x66,0xf3 @ sha256su1 q9,q11,q8
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q10
+ .byte 0xe6,0x43,0xfa,0xf3 @ sha256su0 q10,q11
+ vmov q2,q0
+ .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
+ .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
+ .byte 0xe2,0x4c,0x60,0xf3 @ sha256su1 q10,q8,q9
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q11
+ .byte 0xe0,0x63,0xfa,0xf3 @ sha256su0 q11,q8
+ vmov q2,q0
+ .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
+ .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
+ .byte 0xe4,0x6c,0x62,0xf3 @ sha256su1 q11,q9,q10
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q8
+ .byte 0xe2,0x03,0xfa,0xf3 @ sha256su0 q8,q9
+ vmov q2,q0
+ .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
+ .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
+ .byte 0xe6,0x0c,0x64,0xf3 @ sha256su1 q8,q10,q11
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q9
+ .byte 0xe4,0x23,0xfa,0xf3 @ sha256su0 q9,q10
+ vmov q2,q0
+ .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
+ .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
+ .byte 0xe0,0x2c,0x66,0xf3 @ sha256su1 q9,q11,q8
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q10
+ .byte 0xe6,0x43,0xfa,0xf3 @ sha256su0 q10,q11
+ vmov q2,q0
+ .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
+ .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
+ .byte 0xe2,0x4c,0x60,0xf3 @ sha256su1 q10,q8,q9
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q11
+ .byte 0xe0,0x63,0xfa,0xf3 @ sha256su0 q11,q8
+ vmov q2,q0
+ .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
+ .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
+ .byte 0xe4,0x6c,0x62,0xf3 @ sha256su1 q11,q9,q10
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q8
+ .byte 0xe2,0x03,0xfa,0xf3 @ sha256su0 q8,q9
+ vmov q2,q0
+ .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
+ .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
+ .byte 0xe6,0x0c,0x64,0xf3 @ sha256su1 q8,q10,q11
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q9
+ .byte 0xe4,0x23,0xfa,0xf3 @ sha256su0 q9,q10
+ vmov q2,q0
+ .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
+ .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
+ .byte 0xe0,0x2c,0x66,0xf3 @ sha256su1 q9,q11,q8
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q10
+ .byte 0xe6,0x43,0xfa,0xf3 @ sha256su0 q10,q11
+ vmov q2,q0
+ .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
+ .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
+ .byte 0xe2,0x4c,0x60,0xf3 @ sha256su1 q10,q8,q9
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q11
+ .byte 0xe0,0x63,0xfa,0xf3 @ sha256su0 q11,q8
+ vmov q2,q0
+ .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
+ .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
+ .byte 0xe4,0x6c,0x62,0xf3 @ sha256su1 q11,q9,q10
+ vld1.32 {q13},[r3]!
+ vadd.i32 q12,q12,q8
+ vmov q2,q0
+ .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
+ .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
+
+ vld1.32 {q12},[r3]!
+ vadd.i32 q13,q13,q9
+ vmov q2,q0
+ .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
+ .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
+
+ vld1.32 {q13},[r3]
+ vadd.i32 q12,q12,q10
+ sub r3,r3,#256-16 @ rewind
+ vmov q2,q0
+ .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
+ .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
+
+ vadd.i32 q13,q13,q11
+ vmov q2,q0
+ .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
+ .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
+
+ vadd.i32 q0,q0,q14
+ vadd.i32 q1,q1,q15
+ bne .Loop_v8
+
+ vst1.32 {q0,q1},[r0]
+
+ bx lr @ bx lr
+.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
+#endif
+.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+.comm OPENSSL_armcap_P,4,4
diff --git a/app/openssl/crypto/sha/asm/sha256-armv4.pl b/app/openssl/crypto/sha/asm/sha256-armv4.pl
index 9c84e8d9..505ca8f3 100644
--- a/app/openssl/crypto/sha/asm/sha256-armv4.pl
+++ b/app/openssl/crypto/sha/asm/sha256-armv4.pl
@@ -1,7 +1,7 @@
#!/usr/bin/env perl
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -21,15 +21,27 @@
# February 2011.
#
# Profiler-assisted and platform-specific optimization resulted in 16%
-# improvement on Cortex A8 core and ~17 cycles per processed byte.
+# improvement on Cortex A8 core and ~15.4 cycles per processed byte.
+
+# September 2013.
+#
+# Add NEON implementation. On Cortex A8 it was measured to process one
+# byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
+# S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
+# code (meaning that latter performs sub-optimally, nothing was done
+# about it).
+
+# May 2014.
+#
+# Add ARMv8 code path performing at 2.0 cpb on Apple A7.
while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output";
$ctx="r0"; $t0="r0";
-$inp="r1"; $t3="r1";
+$inp="r1"; $t4="r1";
$len="r2"; $t1="r2";
-$T1="r3";
+$T1="r3"; $t3="r3";
$A="r4";
$B="r5";
$C="r6";
@@ -52,71 +64,88 @@ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___ if ($i<16);
#if __ARM_ARCH__>=7
- ldr $T1,[$inp],#4
+ @ ldr $t1,[$inp],#4 @ $i
+# if $i==15
+ str $inp,[sp,#17*4] @ make room for $t4
+# endif
+ eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+ add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
+ eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
+ rev $t1,$t1
#else
- ldrb $T1,[$inp,#3] @ $i
+ @ ldrb $t1,[$inp,#3] @ $i
+ add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
ldrb $t2,[$inp,#2]
- ldrb $t1,[$inp,#1]
- ldrb $t0,[$inp],#4
- orr $T1,$T1,$t2,lsl#8
- orr $T1,$T1,$t1,lsl#16
- orr $T1,$T1,$t0,lsl#24
+ ldrb $t0,[$inp,#1]
+ orr $t1,$t1,$t2,lsl#8
+ ldrb $t2,[$inp],#4
+ orr $t1,$t1,$t0,lsl#16
+# if $i==15
+ str $inp,[sp,#17*4] @ make room for $t4
+# endif
+ eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+ orr $t1,$t1,$t2,lsl#24
+ eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
#endif
___
$code.=<<___;
- mov $t0,$e,ror#$Sigma1[0]
ldr $t2,[$Ktbl],#4 @ *K256++
- eor $t0,$t0,$e,ror#$Sigma1[1]
+ add $h,$h,$t1 @ h+=X[i]
+ str $t1,[sp,#`$i%16`*4]
eor $t1,$f,$g
-#if $i>=16
- add $T1,$T1,$t3 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev $T1,$T1
-#endif
-#if $i==15
- str $inp,[sp,#17*4] @ leave room for $t3
-#endif
- eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e)
+ add $h,$h,$t0,ror#$Sigma1[0] @ h+=Sigma1(e)
and $t1,$t1,$e
- str $T1,[sp,#`$i%16`*4]
- add $T1,$T1,$t0
+ add $h,$h,$t2 @ h+=K256[i]
eor $t1,$t1,$g @ Ch(e,f,g)
- add $T1,$T1,$h
- mov $h,$a,ror#$Sigma0[0]
- add $T1,$T1,$t1
- eor $h,$h,$a,ror#$Sigma0[1]
- add $T1,$T1,$t2
- eor $h,$h,$a,ror#$Sigma0[2] @ Sigma0(a)
-#if $i>=15
- ldr $t3,[sp,#`($i+2)%16`*4] @ from BODY_16_xx
+ eor $t0,$a,$a,ror#`$Sigma0[1]-$Sigma0[0]`
+ add $h,$h,$t1 @ h+=Ch(e,f,g)
+#if $i==31
+ and $t2,$t2,#0xff
+ cmp $t2,#0xf2 @ done?
#endif
- orr $t0,$a,$b
- and $t1,$a,$b
- and $t0,$t0,$c
- add $h,$h,$T1
- orr $t0,$t0,$t1 @ Maj(a,b,c)
- add $d,$d,$T1
- add $h,$h,$t0
+#if $i<15
+# if __ARM_ARCH__>=7
+ ldr $t1,[$inp],#4 @ prefetch
+# else
+ ldrb $t1,[$inp,#3]
+# endif
+ eor $t2,$a,$b @ a^b, b^c in next round
+#else
+ ldr $t1,[sp,#`($i+2)%16`*4] @ from future BODY_16_xx
+ eor $t2,$a,$b @ a^b, b^c in next round
+ ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx
+#endif
+ eor $t0,$t0,$a,ror#`$Sigma0[2]-$Sigma0[0]` @ Sigma0(a)
+ and $t3,$t3,$t2 @ (b^c)&=(a^b)
+ add $d,$d,$h @ d+=h
+ eor $t3,$t3,$b @ Maj(a,b,c)
+ add $h,$h,$t0,ror#$Sigma0[0] @ h+=Sigma0(a)
+ @ add $h,$h,$t3 @ h+=Maj(a,b,c)
___
+ ($t2,$t3)=($t3,$t2);
}
sub BODY_16_XX {
my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___;
- @ ldr $t3,[sp,#`($i+1)%16`*4] @ $i
- ldr $t2,[sp,#`($i+14)%16`*4]
- mov $t0,$t3,ror#$sigma0[0]
- ldr $T1,[sp,#`($i+0)%16`*4]
- eor $t0,$t0,$t3,ror#$sigma0[1]
- ldr $t1,[sp,#`($i+9)%16`*4]
- eor $t0,$t0,$t3,lsr#$sigma0[2] @ sigma0(X[i+1])
- mov $t3,$t2,ror#$sigma1[0]
- add $T1,$T1,$t0
- eor $t3,$t3,$t2,ror#$sigma1[1]
- add $T1,$T1,$t1
- eor $t3,$t3,$t2,lsr#$sigma1[2] @ sigma1(X[i+14])
- @ add $T1,$T1,$t3
+ @ ldr $t1,[sp,#`($i+1)%16`*4] @ $i
+ @ ldr $t4,[sp,#`($i+14)%16`*4]
+ mov $t0,$t1,ror#$sigma0[0]
+ add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
+ mov $t2,$t4,ror#$sigma1[0]
+ eor $t0,$t0,$t1,ror#$sigma0[1]
+ eor $t2,$t2,$t4,ror#$sigma1[1]
+ eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1])
+ ldr $t1,[sp,#`($i+0)%16`*4]
+ eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14])
+ ldr $t4,[sp,#`($i+9)%16`*4]
+
+ add $t2,$t2,$t0
+ eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` @ from BODY_00_15
+ add $t1,$t1,$t2
+ eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
+ add $t1,$t1,$t4 @ X[i]
___
&BODY_00_15(@_);
}
@@ -147,46 +176,64 @@ K256:
.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.size K256,.-K256
+.word 0 @ terminator
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha256_block_data_order
+.align 5
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
sub r3,pc,#8 @ sha256_block_data_order
add $len,$inp,$len,lsl#6 @ len to point at the end of inp
+#if __ARM_ARCH__>=7
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#ARMV8_SHA256
+ bne .LARMv8
+ tst r12,#ARMV7_NEON
+ bne .LNEON
+#endif
stmdb sp!,{$ctx,$inp,$len,r4-r11,lr}
ldmia $ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
- sub $Ktbl,r3,#256 @ K256
+ sub $Ktbl,r3,#256+32 @ K256
sub sp,sp,#16*4 @ alloca(X[16])
.Loop:
+# if __ARM_ARCH__>=7
+ ldr $t1,[$inp],#4
+# else
+ ldrb $t1,[$inp,#3]
+# endif
+ eor $t3,$B,$C @ magic
+ eor $t2,$t2,$t2
___
for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
$code.=".Lrounds_16_xx:\n";
for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
- and $t2,$t2,#0xff
- cmp $t2,#0xf2
+ ldreq $t3,[sp,#16*4] @ pull ctx
bne .Lrounds_16_xx
- ldr $T1,[sp,#16*4] @ pull ctx
- ldr $t0,[$T1,#0]
- ldr $t1,[$T1,#4]
- ldr $t2,[$T1,#8]
+ add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
+ ldr $t0,[$t3,#0]
+ ldr $t1,[$t3,#4]
+ ldr $t2,[$t3,#8]
add $A,$A,$t0
- ldr $t0,[$T1,#12]
+ ldr $t0,[$t3,#12]
add $B,$B,$t1
- ldr $t1,[$T1,#16]
+ ldr $t1,[$t3,#16]
add $C,$C,$t2
- ldr $t2,[$T1,#20]
+ ldr $t2,[$t3,#20]
add $D,$D,$t0
- ldr $t0,[$T1,#24]
+ ldr $t0,[$t3,#24]
add $E,$E,$t1
- ldr $t1,[$T1,#28]
+ ldr $t1,[$t3,#28]
add $F,$F,$t2
ldr $inp,[sp,#17*4] @ pull inp
ldr $t2,[sp,#18*4] @ pull inp+len
add $G,$G,$t0
add $H,$H,$t1
- stmia $T1,{$A,$B,$C,$D,$E,$F,$G,$H}
+ stmia $t3,{$A,$B,$C,$D,$E,$F,$G,$H}
cmp $inp,$t2
sub $Ktbl,$Ktbl,#256 @ rewind Ktbl
bne .Loop
@@ -200,12 +247,410 @@ $code.=<<___;
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
#endif
-.size sha256_block_data_order,.-sha256_block_data_order
-.asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+.size sha256_block_data_order,.-sha256_block_data_order
+___
+######################################################################
+# NEON stuff
+#
+{{{
+my @X=map("q$_",(0..3));
+my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25");
+my $Xfer=$t4;
+my $j=0;
+
+sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
+sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
+
+sub AUTOLOAD() # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+ my $arg = pop;
+ $arg = "#$arg" if ($arg*1 eq $arg);
+ $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+sub Xupdate()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+ &vext_8 ($T0,@X[0],@X[1],4); # X[1..4]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vext_8 ($T1,@X[2],@X[3],4); # X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T2,$T0,$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += X[9..12]
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T1,$T0,$sigma0[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T2,$T0,32-$sigma0[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T3,$T0,$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T1,$T1,$T2);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T3,$T0,32-$sigma0[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T1,$T1,$T3); # sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T5,&Dhi(@X[3]),$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4); # sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (&Dlo(@X[0]),&Dlo(@X[0]),$T5);# X[0..1] += sigma1(X[14..15])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T5,&Dlo(@X[0]),$sigma1[2]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vld1_32 ("{$T0}","[$Ktbl,:128]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[1]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &veor ($T5,$T5,$T4); # sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 (&Dhi(@X[0]),&Dhi(@X[0]),$T5);# X[2..3] += sigma1(X[16..17])
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 ($T0,$T0,@X[0]);
+ while($#insns>=2) { eval(shift(@insns)); }
+ &vst1_32 ("{$T0}","[$Xfer,:128]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+
+ push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub Xpreload()
+{ use integer;
+ my $body = shift;
+ my @insns = (&$body,&$body,&$body,&$body);
+ my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vld1_32 ("{$T0}","[$Ktbl,:128]!");
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vrev32_8 (@X[0],@X[0]);
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ eval(shift(@insns));
+ &vadd_i32 ($T0,$T0,@X[0]);
+ foreach (@insns) { eval; } # remaining instructions
+ &vst1_32 ("{$T0}","[$Xfer,:128]!");
+
+ push(@X,shift(@X)); # "rotate" X[]
+}
+
+sub body_00_15 () {
+ (
+ '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
+ '&add ($h,$h,$t1)', # h+=X[i]+K[i]
+ '&eor ($t1,$f,$g)',
+ '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
+ '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past
+ '&and ($t1,$t1,$e)',
+ '&eor ($t2,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e)
+ '&eor ($t0,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
+ '&eor ($t1,$t1,$g)', # Ch(e,f,g)
+ '&add ($h,$h,$t2,"ror#$Sigma1[0]")', # h+=Sigma1(e)
+ '&eor ($t2,$a,$b)', # a^b, b^c in next round
+ '&eor ($t0,$t0,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a)
+ '&add ($h,$h,$t1)', # h+=Ch(e,f,g)
+ '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'.
+ '&ldr ($t1,"[$Ktbl]") if ($j==15);'.
+ '&ldr ($t1,"[sp,#64]") if ($j==31)',
+ '&and ($t3,$t3,$t2)', # (b^c)&=(a^b)
+ '&add ($d,$d,$h)', # d+=h
+ '&add ($h,$h,$t0,"ror#$Sigma0[0]");'. # h+=Sigma0(a)
+ '&eor ($t3,$t3,$b)', # Maj(a,b,c)
+ '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
+ )
+}
+
+$code.=<<___;
+#if __ARM_ARCH__>=7
+.fpu neon
+
+.type sha256_block_data_order_neon,%function
+.align 4
+sha256_block_data_order_neon:
+.LNEON:
+ stmdb sp!,{r4-r12,lr}
+
+ mov $t2,sp
+ sub sp,sp,#16*4+16 @ alloca
+ sub $Ktbl,r3,#256+32 @ K256
+ bic sp,sp,#15 @ align for 128-bit stores
+
+ vld1.8 {@X[0]},[$inp]!
+ vld1.8 {@X[1]},[$inp]!
+ vld1.8 {@X[2]},[$inp]!
+ vld1.8 {@X[3]},[$inp]!
+ vld1.32 {$T0},[$Ktbl,:128]!
+ vld1.32 {$T1},[$Ktbl,:128]!
+ vld1.32 {$T2},[$Ktbl,:128]!
+ vld1.32 {$T3},[$Ktbl,:128]!
+ vrev32.8 @X[0],@X[0] @ yes, even on
+ str $ctx,[sp,#64]
+ vrev32.8 @X[1],@X[1] @ big-endian
+ str $inp,[sp,#68]
+ mov $Xfer,sp
+ vrev32.8 @X[2],@X[2]
+ str $len,[sp,#72]
+ vrev32.8 @X[3],@X[3]
+ str $t2,[sp,#76] @ save original sp
+ vadd.i32 $T0,$T0,@X[0]
+ vadd.i32 $T1,$T1,@X[1]
+ vst1.32 {$T0},[$Xfer,:128]!
+ vadd.i32 $T2,$T2,@X[2]
+ vst1.32 {$T1},[$Xfer,:128]!
+ vadd.i32 $T3,$T3,@X[3]
+ vst1.32 {$T2},[$Xfer,:128]!
+ vst1.32 {$T3},[$Xfer,:128]!
+
+ ldmia $ctx,{$A-$H}
+ sub $Xfer,$Xfer,#64
+ ldr $t1,[sp,#0]
+ eor $t2,$t2,$t2
+ eor $t3,$B,$C
+ b .L_00_48
+
+.align 4
+.L_00_48:
+___
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+ &Xupdate(\&body_00_15);
+$code.=<<___;
+ teq $t1,#0 @ check for K256 terminator
+ ldr $t1,[sp,#0]
+ sub $Xfer,$Xfer,#64
+ bne .L_00_48
+
+ ldr $inp,[sp,#68]
+ ldr $t0,[sp,#72]
+ sub $Ktbl,$Ktbl,#256 @ rewind $Ktbl
+ teq $inp,$t0
+ subeq $inp,$inp,#64 @ avoid SEGV
+ vld1.8 {@X[0]},[$inp]! @ load next input block
+ vld1.8 {@X[1]},[$inp]!
+ vld1.8 {@X[2]},[$inp]!
+ vld1.8 {@X[3]},[$inp]!
+ strne $inp,[sp,#68]
+ mov $Xfer,sp
+___
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+ &Xpreload(\&body_00_15);
+$code.=<<___;
+ ldr $t0,[$t1,#0]
+ add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
+ ldr $t2,[$t1,#4]
+ ldr $t3,[$t1,#8]
+ ldr $t4,[$t1,#12]
+ add $A,$A,$t0 @ accumulate
+ ldr $t0,[$t1,#16]
+ add $B,$B,$t2
+ ldr $t2,[$t1,#20]
+ add $C,$C,$t3
+ ldr $t3,[$t1,#24]
+ add $D,$D,$t4
+ ldr $t4,[$t1,#28]
+ add $E,$E,$t0
+ str $A,[$t1],#4
+ add $F,$F,$t2
+ str $B,[$t1],#4
+ add $G,$G,$t3
+ str $C,[$t1],#4
+ add $H,$H,$t4
+ str $D,[$t1],#4
+ stmia $t1,{$E-$H}
+
+ movne $Xfer,sp
+ ldrne $t1,[sp,#0]
+ eorne $t2,$t2,$t2
+ ldreq sp,[sp,#76] @ restore original sp
+ eorne $t3,$B,$C
+ bne .L_00_48
+
+ ldmia sp!,{r4-r12,pc}
+.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
+#endif
+___
+}}}
+######################################################################
+# ARMv8 stuff
+#
+{{{
+my ($ABCD,$EFGH,$abcd)=map("q$_",(0..2));
+my @MSG=map("q$_",(8..11));
+my ($W0,$W1,$ABCD_SAVE,$EFGH_SAVE)=map("q$_",(12..15));
+my $Ktbl="r3";
+
+$code.=<<___;
+#if __ARM_ARCH__>=7
+.type sha256_block_data_order_armv8,%function
+.align 5
+sha256_block_data_order_armv8:
+.LARMv8:
+ vld1.32 {$ABCD,$EFGH},[$ctx]
+ sub $Ktbl,r3,#sha256_block_data_order-K256
+
+.Loop_v8:
+ vld1.8 {@MSG[0]-@MSG[1]},[$inp]!
+ vld1.8 {@MSG[2]-@MSG[3]},[$inp]!
+ vld1.32 {$W0},[$Ktbl]!
+ vrev32.8 @MSG[0],@MSG[0]
+ vrev32.8 @MSG[1],@MSG[1]
+ vrev32.8 @MSG[2],@MSG[2]
+ vrev32.8 @MSG[3],@MSG[3]
+ vmov $ABCD_SAVE,$ABCD @ offload
+ vmov $EFGH_SAVE,$EFGH
+ teq $inp,$len
+___
+for($i=0;$i<12;$i++) {
+$code.=<<___;
+ vld1.32 {$W1},[$Ktbl]!
+ vadd.i32 $W0,$W0,@MSG[0]
+ sha256su0 @MSG[0],@MSG[1]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+ sha256su1 @MSG[0],@MSG[2],@MSG[3]
+___
+ ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+ vld1.32 {$W1},[$Ktbl]!
+ vadd.i32 $W0,$W0,@MSG[0]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ vld1.32 {$W0},[$Ktbl]!
+ vadd.i32 $W1,$W1,@MSG[1]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ vld1.32 {$W1},[$Ktbl]
+ vadd.i32 $W0,$W0,@MSG[2]
+ sub $Ktbl,$Ktbl,#256-16 @ rewind
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ vadd.i32 $W1,$W1,@MSG[3]
+ vmov $abcd,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ vadd.i32 $ABCD,$ABCD,$ABCD_SAVE
+ vadd.i32 $EFGH,$EFGH,$EFGH_SAVE
+ bne .Loop_v8
+
+ vst1.32 {$ABCD,$EFGH},[$ctx]
+
+ ret @ bx lr
+.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
+#endif
+___
+}}}
+$code.=<<___;
+.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
.align 2
+.comm OPENSSL_armcap_P,4,4
___
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
-print $code;
+{ my %opcode = (
+ "sha256h" => 0xf3000c40, "sha256h2" => 0xf3100c40,
+ "sha256su0" => 0xf3ba03c0, "sha256su1" => 0xf3200c40 );
+
+ sub unsha256 {
+ my ($mnemonic,$arg)=@_;
+
+ if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
+ my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
+ |(($2&7)<<17)|(($2&8)<<4)
+ |(($3&7)<<1) |(($3&8)<<2);
+ # since ARMv7 instructions are always encoded little-endian.
+ # correct solution is to use .inst directive, but older
+ # assemblers don't implement it:-(
+ sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
+ $word&0xff,($word>>8)&0xff,
+ ($word>>16)&0xff,($word>>24)&0xff,
+ $mnemonic,$arg;
+ }
+ }
+}
+
+foreach (split($/,$code)) {
+
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\b(sha256\w+)\s+(q.*)/unsha256($1,$2)/geo;
+
+ s/\bret\b/bx lr/go or
+ s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
+
+ print $_,"\n";
+}
+
close STDOUT; # enforce flush
diff --git a/app/openssl/crypto/sha/asm/sha256-armv4.s b/app/openssl/crypto/sha/asm/sha256-armv4.s
deleted file mode 100644
index 9c20a63c..00000000
--- a/app/openssl/crypto/sha/asm/sha256-armv4.s
+++ /dev/null
@@ -1,1517 +0,0 @@
-#include "arm_arch.h"
-
-.text
-.code 32
-
-.type K256,%object
-.align 5
-K256:
-.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
-.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
-.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
-.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
-.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
-.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
-.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
-.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
-.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
-.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
-.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
-.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
-.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
-.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
-.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
-.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
-.size K256,.-K256
-
-.global sha256_block_data_order
-.type sha256_block_data_order,%function
-sha256_block_data_order:
- sub r3,pc,#8 @ sha256_block_data_order
- add r2,r1,r2,lsl#6 @ len to point at the end of inp
- stmdb sp!,{r0,r1,r2,r4-r11,lr}
- ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
- sub r14,r3,#256 @ K256
- sub sp,sp,#16*4 @ alloca(X[16])
-.Loop:
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 0
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r8,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r8,ror#11
- eor r2,r9,r10
-#if 0>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 0==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r8,ror#25 @ Sigma1(e)
- and r2,r2,r8
- str r3,[sp,#0*4]
- add r3,r3,r0
- eor r2,r2,r10 @ Ch(e,f,g)
- add r3,r3,r11
- mov r11,r4,ror#2
- add r3,r3,r2
- eor r11,r11,r4,ror#13
- add r3,r3,r12
- eor r11,r11,r4,ror#22 @ Sigma0(a)
-#if 0>=15
- ldr r1,[sp,#2*4] @ from BODY_16_xx
-#endif
- orr r0,r4,r5
- and r2,r4,r5
- and r0,r0,r6
- add r11,r11,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r7,r7,r3
- add r11,r11,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 1
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r7,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r7,ror#11
- eor r2,r8,r9
-#if 1>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 1==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r7,ror#25 @ Sigma1(e)
- and r2,r2,r7
- str r3,[sp,#1*4]
- add r3,r3,r0
- eor r2,r2,r9 @ Ch(e,f,g)
- add r3,r3,r10
- mov r10,r11,ror#2
- add r3,r3,r2
- eor r10,r10,r11,ror#13
- add r3,r3,r12
- eor r10,r10,r11,ror#22 @ Sigma0(a)
-#if 1>=15
- ldr r1,[sp,#3*4] @ from BODY_16_xx
-#endif
- orr r0,r11,r4
- and r2,r11,r4
- and r0,r0,r5
- add r10,r10,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r6,r6,r3
- add r10,r10,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 2
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r6,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r6,ror#11
- eor r2,r7,r8
-#if 2>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 2==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r6,ror#25 @ Sigma1(e)
- and r2,r2,r6
- str r3,[sp,#2*4]
- add r3,r3,r0
- eor r2,r2,r8 @ Ch(e,f,g)
- add r3,r3,r9
- mov r9,r10,ror#2
- add r3,r3,r2
- eor r9,r9,r10,ror#13
- add r3,r3,r12
- eor r9,r9,r10,ror#22 @ Sigma0(a)
-#if 2>=15
- ldr r1,[sp,#4*4] @ from BODY_16_xx
-#endif
- orr r0,r10,r11
- and r2,r10,r11
- and r0,r0,r4
- add r9,r9,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r5,r5,r3
- add r9,r9,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 3
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r5,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r5,ror#11
- eor r2,r6,r7
-#if 3>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 3==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r5,ror#25 @ Sigma1(e)
- and r2,r2,r5
- str r3,[sp,#3*4]
- add r3,r3,r0
- eor r2,r2,r7 @ Ch(e,f,g)
- add r3,r3,r8
- mov r8,r9,ror#2
- add r3,r3,r2
- eor r8,r8,r9,ror#13
- add r3,r3,r12
- eor r8,r8,r9,ror#22 @ Sigma0(a)
-#if 3>=15
- ldr r1,[sp,#5*4] @ from BODY_16_xx
-#endif
- orr r0,r9,r10
- and r2,r9,r10
- and r0,r0,r11
- add r8,r8,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r4,r4,r3
- add r8,r8,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 4
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r4,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r4,ror#11
- eor r2,r5,r6
-#if 4>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 4==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r4,ror#25 @ Sigma1(e)
- and r2,r2,r4
- str r3,[sp,#4*4]
- add r3,r3,r0
- eor r2,r2,r6 @ Ch(e,f,g)
- add r3,r3,r7
- mov r7,r8,ror#2
- add r3,r3,r2
- eor r7,r7,r8,ror#13
- add r3,r3,r12
- eor r7,r7,r8,ror#22 @ Sigma0(a)
-#if 4>=15
- ldr r1,[sp,#6*4] @ from BODY_16_xx
-#endif
- orr r0,r8,r9
- and r2,r8,r9
- and r0,r0,r10
- add r7,r7,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r11,r11,r3
- add r7,r7,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 5
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r11,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r11,ror#11
- eor r2,r4,r5
-#if 5>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 5==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r11,ror#25 @ Sigma1(e)
- and r2,r2,r11
- str r3,[sp,#5*4]
- add r3,r3,r0
- eor r2,r2,r5 @ Ch(e,f,g)
- add r3,r3,r6
- mov r6,r7,ror#2
- add r3,r3,r2
- eor r6,r6,r7,ror#13
- add r3,r3,r12
- eor r6,r6,r7,ror#22 @ Sigma0(a)
-#if 5>=15
- ldr r1,[sp,#7*4] @ from BODY_16_xx
-#endif
- orr r0,r7,r8
- and r2,r7,r8
- and r0,r0,r9
- add r6,r6,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r10,r10,r3
- add r6,r6,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 6
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r10,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r10,ror#11
- eor r2,r11,r4
-#if 6>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 6==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r10,ror#25 @ Sigma1(e)
- and r2,r2,r10
- str r3,[sp,#6*4]
- add r3,r3,r0
- eor r2,r2,r4 @ Ch(e,f,g)
- add r3,r3,r5
- mov r5,r6,ror#2
- add r3,r3,r2
- eor r5,r5,r6,ror#13
- add r3,r3,r12
- eor r5,r5,r6,ror#22 @ Sigma0(a)
-#if 6>=15
- ldr r1,[sp,#8*4] @ from BODY_16_xx
-#endif
- orr r0,r6,r7
- and r2,r6,r7
- and r0,r0,r8
- add r5,r5,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r9,r9,r3
- add r5,r5,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 7
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r9,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r9,ror#11
- eor r2,r10,r11
-#if 7>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 7==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r9,ror#25 @ Sigma1(e)
- and r2,r2,r9
- str r3,[sp,#7*4]
- add r3,r3,r0
- eor r2,r2,r11 @ Ch(e,f,g)
- add r3,r3,r4
- mov r4,r5,ror#2
- add r3,r3,r2
- eor r4,r4,r5,ror#13
- add r3,r3,r12
- eor r4,r4,r5,ror#22 @ Sigma0(a)
-#if 7>=15
- ldr r1,[sp,#9*4] @ from BODY_16_xx
-#endif
- orr r0,r5,r6
- and r2,r5,r6
- and r0,r0,r7
- add r4,r4,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r8,r8,r3
- add r4,r4,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 8
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r8,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r8,ror#11
- eor r2,r9,r10
-#if 8>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 8==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r8,ror#25 @ Sigma1(e)
- and r2,r2,r8
- str r3,[sp,#8*4]
- add r3,r3,r0
- eor r2,r2,r10 @ Ch(e,f,g)
- add r3,r3,r11
- mov r11,r4,ror#2
- add r3,r3,r2
- eor r11,r11,r4,ror#13
- add r3,r3,r12
- eor r11,r11,r4,ror#22 @ Sigma0(a)
-#if 8>=15
- ldr r1,[sp,#10*4] @ from BODY_16_xx
-#endif
- orr r0,r4,r5
- and r2,r4,r5
- and r0,r0,r6
- add r11,r11,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r7,r7,r3
- add r11,r11,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 9
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r7,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r7,ror#11
- eor r2,r8,r9
-#if 9>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 9==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r7,ror#25 @ Sigma1(e)
- and r2,r2,r7
- str r3,[sp,#9*4]
- add r3,r3,r0
- eor r2,r2,r9 @ Ch(e,f,g)
- add r3,r3,r10
- mov r10,r11,ror#2
- add r3,r3,r2
- eor r10,r10,r11,ror#13
- add r3,r3,r12
- eor r10,r10,r11,ror#22 @ Sigma0(a)
-#if 9>=15
- ldr r1,[sp,#11*4] @ from BODY_16_xx
-#endif
- orr r0,r11,r4
- and r2,r11,r4
- and r0,r0,r5
- add r10,r10,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r6,r6,r3
- add r10,r10,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 10
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r6,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r6,ror#11
- eor r2,r7,r8
-#if 10>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 10==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r6,ror#25 @ Sigma1(e)
- and r2,r2,r6
- str r3,[sp,#10*4]
- add r3,r3,r0
- eor r2,r2,r8 @ Ch(e,f,g)
- add r3,r3,r9
- mov r9,r10,ror#2
- add r3,r3,r2
- eor r9,r9,r10,ror#13
- add r3,r3,r12
- eor r9,r9,r10,ror#22 @ Sigma0(a)
-#if 10>=15
- ldr r1,[sp,#12*4] @ from BODY_16_xx
-#endif
- orr r0,r10,r11
- and r2,r10,r11
- and r0,r0,r4
- add r9,r9,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r5,r5,r3
- add r9,r9,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 11
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r5,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r5,ror#11
- eor r2,r6,r7
-#if 11>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 11==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r5,ror#25 @ Sigma1(e)
- and r2,r2,r5
- str r3,[sp,#11*4]
- add r3,r3,r0
- eor r2,r2,r7 @ Ch(e,f,g)
- add r3,r3,r8
- mov r8,r9,ror#2
- add r3,r3,r2
- eor r8,r8,r9,ror#13
- add r3,r3,r12
- eor r8,r8,r9,ror#22 @ Sigma0(a)
-#if 11>=15
- ldr r1,[sp,#13*4] @ from BODY_16_xx
-#endif
- orr r0,r9,r10
- and r2,r9,r10
- and r0,r0,r11
- add r8,r8,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r4,r4,r3
- add r8,r8,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 12
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r4,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r4,ror#11
- eor r2,r5,r6
-#if 12>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 12==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r4,ror#25 @ Sigma1(e)
- and r2,r2,r4
- str r3,[sp,#12*4]
- add r3,r3,r0
- eor r2,r2,r6 @ Ch(e,f,g)
- add r3,r3,r7
- mov r7,r8,ror#2
- add r3,r3,r2
- eor r7,r7,r8,ror#13
- add r3,r3,r12
- eor r7,r7,r8,ror#22 @ Sigma0(a)
-#if 12>=15
- ldr r1,[sp,#14*4] @ from BODY_16_xx
-#endif
- orr r0,r8,r9
- and r2,r8,r9
- and r0,r0,r10
- add r7,r7,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r11,r11,r3
- add r7,r7,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 13
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r11,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r11,ror#11
- eor r2,r4,r5
-#if 13>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 13==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r11,ror#25 @ Sigma1(e)
- and r2,r2,r11
- str r3,[sp,#13*4]
- add r3,r3,r0
- eor r2,r2,r5 @ Ch(e,f,g)
- add r3,r3,r6
- mov r6,r7,ror#2
- add r3,r3,r2
- eor r6,r6,r7,ror#13
- add r3,r3,r12
- eor r6,r6,r7,ror#22 @ Sigma0(a)
-#if 13>=15
- ldr r1,[sp,#15*4] @ from BODY_16_xx
-#endif
- orr r0,r7,r8
- and r2,r7,r8
- and r0,r0,r9
- add r6,r6,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r10,r10,r3
- add r6,r6,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 14
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r10,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r10,ror#11
- eor r2,r11,r4
-#if 14>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 14==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r10,ror#25 @ Sigma1(e)
- and r2,r2,r10
- str r3,[sp,#14*4]
- add r3,r3,r0
- eor r2,r2,r4 @ Ch(e,f,g)
- add r3,r3,r5
- mov r5,r6,ror#2
- add r3,r3,r2
- eor r5,r5,r6,ror#13
- add r3,r3,r12
- eor r5,r5,r6,ror#22 @ Sigma0(a)
-#if 14>=15
- ldr r1,[sp,#0*4] @ from BODY_16_xx
-#endif
- orr r0,r6,r7
- and r2,r6,r7
- and r0,r0,r8
- add r5,r5,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r9,r9,r3
- add r5,r5,r0
-#if __ARM_ARCH__>=7
- ldr r3,[r1],#4
-#else
- ldrb r3,[r1,#3] @ 15
- ldrb r12,[r1,#2]
- ldrb r2,[r1,#1]
- ldrb r0,[r1],#4
- orr r3,r3,r12,lsl#8
- orr r3,r3,r2,lsl#16
- orr r3,r3,r0,lsl#24
-#endif
- mov r0,r9,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r9,ror#11
- eor r2,r10,r11
-#if 15>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 15==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r9,ror#25 @ Sigma1(e)
- and r2,r2,r9
- str r3,[sp,#15*4]
- add r3,r3,r0
- eor r2,r2,r11 @ Ch(e,f,g)
- add r3,r3,r4
- mov r4,r5,ror#2
- add r3,r3,r2
- eor r4,r4,r5,ror#13
- add r3,r3,r12
- eor r4,r4,r5,ror#22 @ Sigma0(a)
-#if 15>=15
- ldr r1,[sp,#1*4] @ from BODY_16_xx
-#endif
- orr r0,r5,r6
- and r2,r5,r6
- and r0,r0,r7
- add r4,r4,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r8,r8,r3
- add r4,r4,r0
-.Lrounds_16_xx:
- @ ldr r1,[sp,#1*4] @ 16
- ldr r12,[sp,#14*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#0*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#9*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r8,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r8,ror#11
- eor r2,r9,r10
-#if 16>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 16==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r8,ror#25 @ Sigma1(e)
- and r2,r2,r8
- str r3,[sp,#0*4]
- add r3,r3,r0
- eor r2,r2,r10 @ Ch(e,f,g)
- add r3,r3,r11
- mov r11,r4,ror#2
- add r3,r3,r2
- eor r11,r11,r4,ror#13
- add r3,r3,r12
- eor r11,r11,r4,ror#22 @ Sigma0(a)
-#if 16>=15
- ldr r1,[sp,#2*4] @ from BODY_16_xx
-#endif
- orr r0,r4,r5
- and r2,r4,r5
- and r0,r0,r6
- add r11,r11,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r7,r7,r3
- add r11,r11,r0
- @ ldr r1,[sp,#2*4] @ 17
- ldr r12,[sp,#15*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#1*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#10*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r7,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r7,ror#11
- eor r2,r8,r9
-#if 17>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 17==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r7,ror#25 @ Sigma1(e)
- and r2,r2,r7
- str r3,[sp,#1*4]
- add r3,r3,r0
- eor r2,r2,r9 @ Ch(e,f,g)
- add r3,r3,r10
- mov r10,r11,ror#2
- add r3,r3,r2
- eor r10,r10,r11,ror#13
- add r3,r3,r12
- eor r10,r10,r11,ror#22 @ Sigma0(a)
-#if 17>=15
- ldr r1,[sp,#3*4] @ from BODY_16_xx
-#endif
- orr r0,r11,r4
- and r2,r11,r4
- and r0,r0,r5
- add r10,r10,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r6,r6,r3
- add r10,r10,r0
- @ ldr r1,[sp,#3*4] @ 18
- ldr r12,[sp,#0*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#2*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#11*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r6,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r6,ror#11
- eor r2,r7,r8
-#if 18>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 18==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r6,ror#25 @ Sigma1(e)
- and r2,r2,r6
- str r3,[sp,#2*4]
- add r3,r3,r0
- eor r2,r2,r8 @ Ch(e,f,g)
- add r3,r3,r9
- mov r9,r10,ror#2
- add r3,r3,r2
- eor r9,r9,r10,ror#13
- add r3,r3,r12
- eor r9,r9,r10,ror#22 @ Sigma0(a)
-#if 18>=15
- ldr r1,[sp,#4*4] @ from BODY_16_xx
-#endif
- orr r0,r10,r11
- and r2,r10,r11
- and r0,r0,r4
- add r9,r9,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r5,r5,r3
- add r9,r9,r0
- @ ldr r1,[sp,#4*4] @ 19
- ldr r12,[sp,#1*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#3*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#12*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r5,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r5,ror#11
- eor r2,r6,r7
-#if 19>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 19==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r5,ror#25 @ Sigma1(e)
- and r2,r2,r5
- str r3,[sp,#3*4]
- add r3,r3,r0
- eor r2,r2,r7 @ Ch(e,f,g)
- add r3,r3,r8
- mov r8,r9,ror#2
- add r3,r3,r2
- eor r8,r8,r9,ror#13
- add r3,r3,r12
- eor r8,r8,r9,ror#22 @ Sigma0(a)
-#if 19>=15
- ldr r1,[sp,#5*4] @ from BODY_16_xx
-#endif
- orr r0,r9,r10
- and r2,r9,r10
- and r0,r0,r11
- add r8,r8,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r4,r4,r3
- add r8,r8,r0
- @ ldr r1,[sp,#5*4] @ 20
- ldr r12,[sp,#2*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#4*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#13*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r4,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r4,ror#11
- eor r2,r5,r6
-#if 20>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 20==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r4,ror#25 @ Sigma1(e)
- and r2,r2,r4
- str r3,[sp,#4*4]
- add r3,r3,r0
- eor r2,r2,r6 @ Ch(e,f,g)
- add r3,r3,r7
- mov r7,r8,ror#2
- add r3,r3,r2
- eor r7,r7,r8,ror#13
- add r3,r3,r12
- eor r7,r7,r8,ror#22 @ Sigma0(a)
-#if 20>=15
- ldr r1,[sp,#6*4] @ from BODY_16_xx
-#endif
- orr r0,r8,r9
- and r2,r8,r9
- and r0,r0,r10
- add r7,r7,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r11,r11,r3
- add r7,r7,r0
- @ ldr r1,[sp,#6*4] @ 21
- ldr r12,[sp,#3*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#5*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#14*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r11,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r11,ror#11
- eor r2,r4,r5
-#if 21>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 21==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r11,ror#25 @ Sigma1(e)
- and r2,r2,r11
- str r3,[sp,#5*4]
- add r3,r3,r0
- eor r2,r2,r5 @ Ch(e,f,g)
- add r3,r3,r6
- mov r6,r7,ror#2
- add r3,r3,r2
- eor r6,r6,r7,ror#13
- add r3,r3,r12
- eor r6,r6,r7,ror#22 @ Sigma0(a)
-#if 21>=15
- ldr r1,[sp,#7*4] @ from BODY_16_xx
-#endif
- orr r0,r7,r8
- and r2,r7,r8
- and r0,r0,r9
- add r6,r6,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r10,r10,r3
- add r6,r6,r0
- @ ldr r1,[sp,#7*4] @ 22
- ldr r12,[sp,#4*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#6*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#15*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r10,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r10,ror#11
- eor r2,r11,r4
-#if 22>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 22==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r10,ror#25 @ Sigma1(e)
- and r2,r2,r10
- str r3,[sp,#6*4]
- add r3,r3,r0
- eor r2,r2,r4 @ Ch(e,f,g)
- add r3,r3,r5
- mov r5,r6,ror#2
- add r3,r3,r2
- eor r5,r5,r6,ror#13
- add r3,r3,r12
- eor r5,r5,r6,ror#22 @ Sigma0(a)
-#if 22>=15
- ldr r1,[sp,#8*4] @ from BODY_16_xx
-#endif
- orr r0,r6,r7
- and r2,r6,r7
- and r0,r0,r8
- add r5,r5,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r9,r9,r3
- add r5,r5,r0
- @ ldr r1,[sp,#8*4] @ 23
- ldr r12,[sp,#5*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#7*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#0*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r9,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r9,ror#11
- eor r2,r10,r11
-#if 23>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 23==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r9,ror#25 @ Sigma1(e)
- and r2,r2,r9
- str r3,[sp,#7*4]
- add r3,r3,r0
- eor r2,r2,r11 @ Ch(e,f,g)
- add r3,r3,r4
- mov r4,r5,ror#2
- add r3,r3,r2
- eor r4,r4,r5,ror#13
- add r3,r3,r12
- eor r4,r4,r5,ror#22 @ Sigma0(a)
-#if 23>=15
- ldr r1,[sp,#9*4] @ from BODY_16_xx
-#endif
- orr r0,r5,r6
- and r2,r5,r6
- and r0,r0,r7
- add r4,r4,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r8,r8,r3
- add r4,r4,r0
- @ ldr r1,[sp,#9*4] @ 24
- ldr r12,[sp,#6*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#8*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#1*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r8,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r8,ror#11
- eor r2,r9,r10
-#if 24>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 24==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r8,ror#25 @ Sigma1(e)
- and r2,r2,r8
- str r3,[sp,#8*4]
- add r3,r3,r0
- eor r2,r2,r10 @ Ch(e,f,g)
- add r3,r3,r11
- mov r11,r4,ror#2
- add r3,r3,r2
- eor r11,r11,r4,ror#13
- add r3,r3,r12
- eor r11,r11,r4,ror#22 @ Sigma0(a)
-#if 24>=15
- ldr r1,[sp,#10*4] @ from BODY_16_xx
-#endif
- orr r0,r4,r5
- and r2,r4,r5
- and r0,r0,r6
- add r11,r11,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r7,r7,r3
- add r11,r11,r0
- @ ldr r1,[sp,#10*4] @ 25
- ldr r12,[sp,#7*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#9*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#2*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r7,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r7,ror#11
- eor r2,r8,r9
-#if 25>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 25==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r7,ror#25 @ Sigma1(e)
- and r2,r2,r7
- str r3,[sp,#9*4]
- add r3,r3,r0
- eor r2,r2,r9 @ Ch(e,f,g)
- add r3,r3,r10
- mov r10,r11,ror#2
- add r3,r3,r2
- eor r10,r10,r11,ror#13
- add r3,r3,r12
- eor r10,r10,r11,ror#22 @ Sigma0(a)
-#if 25>=15
- ldr r1,[sp,#11*4] @ from BODY_16_xx
-#endif
- orr r0,r11,r4
- and r2,r11,r4
- and r0,r0,r5
- add r10,r10,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r6,r6,r3
- add r10,r10,r0
- @ ldr r1,[sp,#11*4] @ 26
- ldr r12,[sp,#8*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#10*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#3*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r6,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r6,ror#11
- eor r2,r7,r8
-#if 26>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 26==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r6,ror#25 @ Sigma1(e)
- and r2,r2,r6
- str r3,[sp,#10*4]
- add r3,r3,r0
- eor r2,r2,r8 @ Ch(e,f,g)
- add r3,r3,r9
- mov r9,r10,ror#2
- add r3,r3,r2
- eor r9,r9,r10,ror#13
- add r3,r3,r12
- eor r9,r9,r10,ror#22 @ Sigma0(a)
-#if 26>=15
- ldr r1,[sp,#12*4] @ from BODY_16_xx
-#endif
- orr r0,r10,r11
- and r2,r10,r11
- and r0,r0,r4
- add r9,r9,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r5,r5,r3
- add r9,r9,r0
- @ ldr r1,[sp,#12*4] @ 27
- ldr r12,[sp,#9*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#11*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#4*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r5,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r5,ror#11
- eor r2,r6,r7
-#if 27>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 27==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r5,ror#25 @ Sigma1(e)
- and r2,r2,r5
- str r3,[sp,#11*4]
- add r3,r3,r0
- eor r2,r2,r7 @ Ch(e,f,g)
- add r3,r3,r8
- mov r8,r9,ror#2
- add r3,r3,r2
- eor r8,r8,r9,ror#13
- add r3,r3,r12
- eor r8,r8,r9,ror#22 @ Sigma0(a)
-#if 27>=15
- ldr r1,[sp,#13*4] @ from BODY_16_xx
-#endif
- orr r0,r9,r10
- and r2,r9,r10
- and r0,r0,r11
- add r8,r8,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r4,r4,r3
- add r8,r8,r0
- @ ldr r1,[sp,#13*4] @ 28
- ldr r12,[sp,#10*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#12*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#5*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r4,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r4,ror#11
- eor r2,r5,r6
-#if 28>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 28==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r4,ror#25 @ Sigma1(e)
- and r2,r2,r4
- str r3,[sp,#12*4]
- add r3,r3,r0
- eor r2,r2,r6 @ Ch(e,f,g)
- add r3,r3,r7
- mov r7,r8,ror#2
- add r3,r3,r2
- eor r7,r7,r8,ror#13
- add r3,r3,r12
- eor r7,r7,r8,ror#22 @ Sigma0(a)
-#if 28>=15
- ldr r1,[sp,#14*4] @ from BODY_16_xx
-#endif
- orr r0,r8,r9
- and r2,r8,r9
- and r0,r0,r10
- add r7,r7,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r11,r11,r3
- add r7,r7,r0
- @ ldr r1,[sp,#14*4] @ 29
- ldr r12,[sp,#11*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#13*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#6*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r11,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r11,ror#11
- eor r2,r4,r5
-#if 29>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 29==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r11,ror#25 @ Sigma1(e)
- and r2,r2,r11
- str r3,[sp,#13*4]
- add r3,r3,r0
- eor r2,r2,r5 @ Ch(e,f,g)
- add r3,r3,r6
- mov r6,r7,ror#2
- add r3,r3,r2
- eor r6,r6,r7,ror#13
- add r3,r3,r12
- eor r6,r6,r7,ror#22 @ Sigma0(a)
-#if 29>=15
- ldr r1,[sp,#15*4] @ from BODY_16_xx
-#endif
- orr r0,r7,r8
- and r2,r7,r8
- and r0,r0,r9
- add r6,r6,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r10,r10,r3
- add r6,r6,r0
- @ ldr r1,[sp,#15*4] @ 30
- ldr r12,[sp,#12*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#14*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#7*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r10,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r10,ror#11
- eor r2,r11,r4
-#if 30>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 30==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r10,ror#25 @ Sigma1(e)
- and r2,r2,r10
- str r3,[sp,#14*4]
- add r3,r3,r0
- eor r2,r2,r4 @ Ch(e,f,g)
- add r3,r3,r5
- mov r5,r6,ror#2
- add r3,r3,r2
- eor r5,r5,r6,ror#13
- add r3,r3,r12
- eor r5,r5,r6,ror#22 @ Sigma0(a)
-#if 30>=15
- ldr r1,[sp,#0*4] @ from BODY_16_xx
-#endif
- orr r0,r6,r7
- and r2,r6,r7
- and r0,r0,r8
- add r5,r5,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r9,r9,r3
- add r5,r5,r0
- @ ldr r1,[sp,#0*4] @ 31
- ldr r12,[sp,#13*4]
- mov r0,r1,ror#7
- ldr r3,[sp,#15*4]
- eor r0,r0,r1,ror#18
- ldr r2,[sp,#8*4]
- eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
- mov r1,r12,ror#17
- add r3,r3,r0
- eor r1,r1,r12,ror#19
- add r3,r3,r2
- eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
- @ add r3,r3,r1
- mov r0,r9,ror#6
- ldr r12,[r14],#4 @ *K256++
- eor r0,r0,r9,ror#11
- eor r2,r10,r11
-#if 31>=16
- add r3,r3,r1 @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
- rev r3,r3
-#endif
-#if 31==15
- str r1,[sp,#17*4] @ leave room for r1
-#endif
- eor r0,r0,r9,ror#25 @ Sigma1(e)
- and r2,r2,r9
- str r3,[sp,#15*4]
- add r3,r3,r0
- eor r2,r2,r11 @ Ch(e,f,g)
- add r3,r3,r4
- mov r4,r5,ror#2
- add r3,r3,r2
- eor r4,r4,r5,ror#13
- add r3,r3,r12
- eor r4,r4,r5,ror#22 @ Sigma0(a)
-#if 31>=15
- ldr r1,[sp,#1*4] @ from BODY_16_xx
-#endif
- orr r0,r5,r6
- and r2,r5,r6
- and r0,r0,r7
- add r4,r4,r3
- orr r0,r0,r2 @ Maj(a,b,c)
- add r8,r8,r3
- add r4,r4,r0
- and r12,r12,#0xff
- cmp r12,#0xf2
- bne .Lrounds_16_xx
-
- ldr r3,[sp,#16*4] @ pull ctx
- ldr r0,[r3,#0]
- ldr r2,[r3,#4]
- ldr r12,[r3,#8]
- add r4,r4,r0
- ldr r0,[r3,#12]
- add r5,r5,r2
- ldr r2,[r3,#16]
- add r6,r6,r12
- ldr r12,[r3,#20]
- add r7,r7,r0
- ldr r0,[r3,#24]
- add r8,r8,r2
- ldr r2,[r3,#28]
- add r9,r9,r12
- ldr r1,[sp,#17*4] @ pull inp
- ldr r12,[sp,#18*4] @ pull inp+len
- add r10,r10,r0
- add r11,r11,r2
- stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
- cmp r1,r12
- sub r14,r14,#256 @ rewind Ktbl
- bne .Loop
-
- add sp,sp,#19*4 @ destroy frame
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r11,pc}
-#else
- ldmia sp!,{r4-r11,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size sha256_block_data_order,.-sha256_block_data_order
-.asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
diff --git a/app/openssl/crypto/sha/asm/sha256-armv8.S b/app/openssl/crypto/sha/asm/sha256-armv8.S
new file mode 100644
index 00000000..bd43b1fe
--- /dev/null
+++ b/app/openssl/crypto/sha/asm/sha256-armv8.S
@@ -0,0 +1,1141 @@
+#include "arm_arch.h"
+
+.text
+
+.globl sha256_block_data_order
+.type sha256_block_data_order,%function
+.align 6
+sha256_block_data_order:
+ ldr x16,.LOPENSSL_armcap_P
+ adr x17,.LOPENSSL_armcap_P
+ add x16,x16,x17
+ ldr w16,[x16]
+ tst w16,#ARMV8_SHA256
+ b.ne .Lv8_entry
+ stp x29,x30,[sp,#-128]!
+ add x29,sp,#0
+
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ sub sp,sp,#4*4
+
+ ldp w20,w21,[x0] // load context
+ ldp w22,w23,[x0,#2*4]
+ ldp w24,w25,[x0,#4*4]
+ add x2,x1,x2,lsl#6 // end of input
+ ldp w26,w27,[x0,#6*4]
+ adr x30,K256
+ stp x0,x2,[x29,#96]
+
+.Loop:
+ ldp w3,w4,[x1],#2*4
+ ldr w19,[x30],#4 // *K++
+ eor w28,w21,w22 // magic seed
+ str x1,[x29,#112]
+#ifndef __ARMEB__
+ rev w3,w3 // 0
+#endif
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ eor w6,w24,w24,ror#14
+ and w17,w25,w24
+ bic w19,w26,w24
+ add w27,w27,w3 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w6,ror#11 // Sigma1(e)
+ ror w6,w20,#2
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ eor w17,w20,w20,ror#9
+ add w27,w27,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w23,w23,w27 // d+=h
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w6,w17,ror#13 // Sigma0(a)
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w27,w27,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w4,w4 // 1
+#endif
+ ldp w5,w6,[x1],#2*4
+ add w27,w27,w17 // h+=Sigma0(a)
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ eor w7,w23,w23,ror#14
+ and w17,w24,w23
+ bic w28,w25,w23
+ add w26,w26,w4 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w7,ror#11 // Sigma1(e)
+ ror w7,w27,#2
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ eor w17,w27,w27,ror#9
+ add w26,w26,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w22,w22,w26 // d+=h
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w7,w17,ror#13 // Sigma0(a)
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w26,w26,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w5,w5 // 2
+#endif
+ add w26,w26,w17 // h+=Sigma0(a)
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ eor w8,w22,w22,ror#14
+ and w17,w23,w22
+ bic w19,w24,w22
+ add w25,w25,w5 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w8,ror#11 // Sigma1(e)
+ ror w8,w26,#2
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ eor w17,w26,w26,ror#9
+ add w25,w25,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w21,w21,w25 // d+=h
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w8,w17,ror#13 // Sigma0(a)
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w25,w25,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w6,w6 // 3
+#endif
+ ldp w7,w8,[x1],#2*4
+ add w25,w25,w17 // h+=Sigma0(a)
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ eor w9,w21,w21,ror#14
+ and w17,w22,w21
+ bic w28,w23,w21
+ add w24,w24,w6 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w9,ror#11 // Sigma1(e)
+ ror w9,w25,#2
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ eor w17,w25,w25,ror#9
+ add w24,w24,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w20,w20,w24 // d+=h
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w9,w17,ror#13 // Sigma0(a)
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w24,w24,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w7,w7 // 4
+#endif
+ add w24,w24,w17 // h+=Sigma0(a)
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ eor w10,w20,w20,ror#14
+ and w17,w21,w20
+ bic w19,w22,w20
+ add w23,w23,w7 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w10,ror#11 // Sigma1(e)
+ ror w10,w24,#2
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ eor w17,w24,w24,ror#9
+ add w23,w23,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w27,w27,w23 // d+=h
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w10,w17,ror#13 // Sigma0(a)
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w23,w23,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w8,w8 // 5
+#endif
+ ldp w9,w10,[x1],#2*4
+ add w23,w23,w17 // h+=Sigma0(a)
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ eor w11,w27,w27,ror#14
+ and w17,w20,w27
+ bic w28,w21,w27
+ add w22,w22,w8 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w11,ror#11 // Sigma1(e)
+ ror w11,w23,#2
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ eor w17,w23,w23,ror#9
+ add w22,w22,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w26,w26,w22 // d+=h
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w11,w17,ror#13 // Sigma0(a)
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w22,w22,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w9,w9 // 6
+#endif
+ add w22,w22,w17 // h+=Sigma0(a)
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ eor w12,w26,w26,ror#14
+ and w17,w27,w26
+ bic w19,w20,w26
+ add w21,w21,w9 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w12,ror#11 // Sigma1(e)
+ ror w12,w22,#2
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ eor w17,w22,w22,ror#9
+ add w21,w21,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w25,w25,w21 // d+=h
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w12,w17,ror#13 // Sigma0(a)
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w21,w21,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w10,w10 // 7
+#endif
+ ldp w11,w12,[x1],#2*4
+ add w21,w21,w17 // h+=Sigma0(a)
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ eor w13,w25,w25,ror#14
+ and w17,w26,w25
+ bic w28,w27,w25
+ add w20,w20,w10 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w13,ror#11 // Sigma1(e)
+ ror w13,w21,#2
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ eor w17,w21,w21,ror#9
+ add w20,w20,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w24,w24,w20 // d+=h
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w13,w17,ror#13 // Sigma0(a)
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w20,w20,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w11,w11 // 8
+#endif
+ add w20,w20,w17 // h+=Sigma0(a)
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ eor w14,w24,w24,ror#14
+ and w17,w25,w24
+ bic w19,w26,w24
+ add w27,w27,w11 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w14,ror#11 // Sigma1(e)
+ ror w14,w20,#2
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ eor w17,w20,w20,ror#9
+ add w27,w27,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w23,w23,w27 // d+=h
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w14,w17,ror#13 // Sigma0(a)
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w27,w27,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w12,w12 // 9
+#endif
+ ldp w13,w14,[x1],#2*4
+ add w27,w27,w17 // h+=Sigma0(a)
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ eor w15,w23,w23,ror#14
+ and w17,w24,w23
+ bic w28,w25,w23
+ add w26,w26,w12 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w15,ror#11 // Sigma1(e)
+ ror w15,w27,#2
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ eor w17,w27,w27,ror#9
+ add w26,w26,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w22,w22,w26 // d+=h
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w15,w17,ror#13 // Sigma0(a)
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w26,w26,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w13,w13 // 10
+#endif
+ add w26,w26,w17 // h+=Sigma0(a)
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ eor w0,w22,w22,ror#14
+ and w17,w23,w22
+ bic w19,w24,w22
+ add w25,w25,w13 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w0,ror#11 // Sigma1(e)
+ ror w0,w26,#2
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ eor w17,w26,w26,ror#9
+ add w25,w25,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w21,w21,w25 // d+=h
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w0,w17,ror#13 // Sigma0(a)
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w25,w25,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w14,w14 // 11
+#endif
+ ldp w15,w0,[x1],#2*4
+ add w25,w25,w17 // h+=Sigma0(a)
+ str w6,[sp,#12]
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ eor w6,w21,w21,ror#14
+ and w17,w22,w21
+ bic w28,w23,w21
+ add w24,w24,w14 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w6,ror#11 // Sigma1(e)
+ ror w6,w25,#2
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ eor w17,w25,w25,ror#9
+ add w24,w24,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w20,w20,w24 // d+=h
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w6,w17,ror#13 // Sigma0(a)
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w24,w24,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w15,w15 // 12
+#endif
+ add w24,w24,w17 // h+=Sigma0(a)
+ str w7,[sp,#0]
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ eor w7,w20,w20,ror#14
+ and w17,w21,w20
+ bic w19,w22,w20
+ add w23,w23,w15 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w7,ror#11 // Sigma1(e)
+ ror w7,w24,#2
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ eor w17,w24,w24,ror#9
+ add w23,w23,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w27,w27,w23 // d+=h
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w7,w17,ror#13 // Sigma0(a)
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w23,w23,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w0,w0 // 13
+#endif
+ ldp w1,w2,[x1]
+ add w23,w23,w17 // h+=Sigma0(a)
+ str w8,[sp,#4]
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ eor w8,w27,w27,ror#14
+ and w17,w20,w27
+ bic w28,w21,w27
+ add w22,w22,w0 // h+=X[i]
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w8,ror#11 // Sigma1(e)
+ ror w8,w23,#2
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ eor w17,w23,w23,ror#9
+ add w22,w22,w16 // h+=Sigma1(e)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ add w26,w26,w22 // d+=h
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w8,w17,ror#13 // Sigma0(a)
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ //add w22,w22,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w1,w1 // 14
+#endif
+ ldr w6,[sp,#12]
+ add w22,w22,w17 // h+=Sigma0(a)
+ str w9,[sp,#8]
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ eor w9,w26,w26,ror#14
+ and w17,w27,w26
+ bic w19,w20,w26
+ add w21,w21,w1 // h+=X[i]
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w9,ror#11 // Sigma1(e)
+ ror w9,w22,#2
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ eor w17,w22,w22,ror#9
+ add w21,w21,w16 // h+=Sigma1(e)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ add w25,w25,w21 // d+=h
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w9,w17,ror#13 // Sigma0(a)
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ //add w21,w21,w17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev w2,w2 // 15
+#endif
+ ldr w7,[sp,#0]
+ add w21,w21,w17 // h+=Sigma0(a)
+ str w10,[sp,#12]
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ ror w9,w4,#7
+ and w17,w26,w25
+ ror w8,w1,#17
+ bic w28,w27,w25
+ ror w10,w21,#2
+ add w20,w20,w2 // h+=X[i]
+ eor w16,w16,w25,ror#11
+ eor w9,w9,w4,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w25,ror#25 // Sigma1(e)
+ eor w10,w10,w21,ror#13
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w8,w8,w1,ror#19
+ eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
+ add w20,w20,w16 // h+=Sigma1(e)
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w10,w21,ror#22 // Sigma0(a)
+ eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
+ add w3,w3,w12
+ add w24,w24,w20 // d+=h
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w3,w3,w9
+ add w20,w20,w17 // h+=Sigma0(a)
+ add w3,w3,w8
+.Loop_16_xx:
+ ldr w8,[sp,#4]
+ str w11,[sp,#0]
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ ror w10,w5,#7
+ and w17,w25,w24
+ ror w9,w2,#17
+ bic w19,w26,w24
+ ror w11,w20,#2
+ add w27,w27,w3 // h+=X[i]
+ eor w16,w16,w24,ror#11
+ eor w10,w10,w5,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w24,ror#25 // Sigma1(e)
+ eor w11,w11,w20,ror#13
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w9,w9,w2,ror#19
+ eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
+ add w27,w27,w16 // h+=Sigma1(e)
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w11,w20,ror#22 // Sigma0(a)
+ eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
+ add w4,w4,w13
+ add w23,w23,w27 // d+=h
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w4,w4,w10
+ add w27,w27,w17 // h+=Sigma0(a)
+ add w4,w4,w9
+ ldr w9,[sp,#8]
+ str w12,[sp,#4]
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ ror w11,w6,#7
+ and w17,w24,w23
+ ror w10,w3,#17
+ bic w28,w25,w23
+ ror w12,w27,#2
+ add w26,w26,w4 // h+=X[i]
+ eor w16,w16,w23,ror#11
+ eor w11,w11,w6,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w23,ror#25 // Sigma1(e)
+ eor w12,w12,w27,ror#13
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w10,w10,w3,ror#19
+ eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
+ add w26,w26,w16 // h+=Sigma1(e)
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w12,w27,ror#22 // Sigma0(a)
+ eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
+ add w5,w5,w14
+ add w22,w22,w26 // d+=h
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w5,w5,w11
+ add w26,w26,w17 // h+=Sigma0(a)
+ add w5,w5,w10
+ ldr w10,[sp,#12]
+ str w13,[sp,#8]
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ ror w12,w7,#7
+ and w17,w23,w22
+ ror w11,w4,#17
+ bic w19,w24,w22
+ ror w13,w26,#2
+ add w25,w25,w5 // h+=X[i]
+ eor w16,w16,w22,ror#11
+ eor w12,w12,w7,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w22,ror#25 // Sigma1(e)
+ eor w13,w13,w26,ror#13
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w11,w11,w4,ror#19
+ eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
+ add w25,w25,w16 // h+=Sigma1(e)
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w13,w26,ror#22 // Sigma0(a)
+ eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
+ add w6,w6,w15
+ add w21,w21,w25 // d+=h
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w6,w6,w12
+ add w25,w25,w17 // h+=Sigma0(a)
+ add w6,w6,w11
+ ldr w11,[sp,#0]
+ str w14,[sp,#12]
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ ror w13,w8,#7
+ and w17,w22,w21
+ ror w12,w5,#17
+ bic w28,w23,w21
+ ror w14,w25,#2
+ add w24,w24,w6 // h+=X[i]
+ eor w16,w16,w21,ror#11
+ eor w13,w13,w8,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w21,ror#25 // Sigma1(e)
+ eor w14,w14,w25,ror#13
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w12,w12,w5,ror#19
+ eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
+ add w24,w24,w16 // h+=Sigma1(e)
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w14,w25,ror#22 // Sigma0(a)
+ eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
+ add w7,w7,w0
+ add w20,w20,w24 // d+=h
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w7,w7,w13
+ add w24,w24,w17 // h+=Sigma0(a)
+ add w7,w7,w12
+ ldr w12,[sp,#4]
+ str w15,[sp,#0]
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ ror w14,w9,#7
+ and w17,w21,w20
+ ror w13,w6,#17
+ bic w19,w22,w20
+ ror w15,w24,#2
+ add w23,w23,w7 // h+=X[i]
+ eor w16,w16,w20,ror#11
+ eor w14,w14,w9,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w20,ror#25 // Sigma1(e)
+ eor w15,w15,w24,ror#13
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w13,w13,w6,ror#19
+ eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
+ add w23,w23,w16 // h+=Sigma1(e)
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w15,w24,ror#22 // Sigma0(a)
+ eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
+ add w8,w8,w1
+ add w27,w27,w23 // d+=h
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w8,w8,w14
+ add w23,w23,w17 // h+=Sigma0(a)
+ add w8,w8,w13
+ ldr w13,[sp,#8]
+ str w0,[sp,#4]
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ ror w15,w10,#7
+ and w17,w20,w27
+ ror w14,w7,#17
+ bic w28,w21,w27
+ ror w0,w23,#2
+ add w22,w22,w8 // h+=X[i]
+ eor w16,w16,w27,ror#11
+ eor w15,w15,w10,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w27,ror#25 // Sigma1(e)
+ eor w0,w0,w23,ror#13
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w14,w14,w7,ror#19
+ eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
+ add w22,w22,w16 // h+=Sigma1(e)
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w0,w23,ror#22 // Sigma0(a)
+ eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
+ add w9,w9,w2
+ add w26,w26,w22 // d+=h
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w9,w9,w15
+ add w22,w22,w17 // h+=Sigma0(a)
+ add w9,w9,w14
+ ldr w14,[sp,#12]
+ str w1,[sp,#8]
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ ror w0,w11,#7
+ and w17,w27,w26
+ ror w15,w8,#17
+ bic w19,w20,w26
+ ror w1,w22,#2
+ add w21,w21,w9 // h+=X[i]
+ eor w16,w16,w26,ror#11
+ eor w0,w0,w11,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w26,ror#25 // Sigma1(e)
+ eor w1,w1,w22,ror#13
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w15,w15,w8,ror#19
+ eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
+ add w21,w21,w16 // h+=Sigma1(e)
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w1,w22,ror#22 // Sigma0(a)
+ eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
+ add w10,w10,w3
+ add w25,w25,w21 // d+=h
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w10,w10,w0
+ add w21,w21,w17 // h+=Sigma0(a)
+ add w10,w10,w15
+ ldr w15,[sp,#0]
+ str w2,[sp,#12]
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ ror w1,w12,#7
+ and w17,w26,w25
+ ror w0,w9,#17
+ bic w28,w27,w25
+ ror w2,w21,#2
+ add w20,w20,w10 // h+=X[i]
+ eor w16,w16,w25,ror#11
+ eor w1,w1,w12,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w25,ror#25 // Sigma1(e)
+ eor w2,w2,w21,ror#13
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w0,w0,w9,ror#19
+ eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
+ add w20,w20,w16 // h+=Sigma1(e)
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w2,w21,ror#22 // Sigma0(a)
+ eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
+ add w11,w11,w4
+ add w24,w24,w20 // d+=h
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w11,w11,w1
+ add w20,w20,w17 // h+=Sigma0(a)
+ add w11,w11,w0
+ ldr w0,[sp,#4]
+ str w3,[sp,#0]
+ ror w16,w24,#6
+ add w27,w27,w19 // h+=K[i]
+ ror w2,w13,#7
+ and w17,w25,w24
+ ror w1,w10,#17
+ bic w19,w26,w24
+ ror w3,w20,#2
+ add w27,w27,w11 // h+=X[i]
+ eor w16,w16,w24,ror#11
+ eor w2,w2,w13,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w20,w21 // a^b, b^c in next round
+ eor w16,w16,w24,ror#25 // Sigma1(e)
+ eor w3,w3,w20,ror#13
+ add w27,w27,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w1,w1,w10,ror#19
+ eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
+ add w27,w27,w16 // h+=Sigma1(e)
+ eor w28,w28,w21 // Maj(a,b,c)
+ eor w17,w3,w20,ror#22 // Sigma0(a)
+ eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
+ add w12,w12,w5
+ add w23,w23,w27 // d+=h
+ add w27,w27,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w12,w12,w2
+ add w27,w27,w17 // h+=Sigma0(a)
+ add w12,w12,w1
+ ldr w1,[sp,#8]
+ str w4,[sp,#4]
+ ror w16,w23,#6
+ add w26,w26,w28 // h+=K[i]
+ ror w3,w14,#7
+ and w17,w24,w23
+ ror w2,w11,#17
+ bic w28,w25,w23
+ ror w4,w27,#2
+ add w26,w26,w12 // h+=X[i]
+ eor w16,w16,w23,ror#11
+ eor w3,w3,w14,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w27,w20 // a^b, b^c in next round
+ eor w16,w16,w23,ror#25 // Sigma1(e)
+ eor w4,w4,w27,ror#13
+ add w26,w26,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w2,w2,w11,ror#19
+ eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
+ add w26,w26,w16 // h+=Sigma1(e)
+ eor w19,w19,w20 // Maj(a,b,c)
+ eor w17,w4,w27,ror#22 // Sigma0(a)
+ eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
+ add w13,w13,w6
+ add w22,w22,w26 // d+=h
+ add w26,w26,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w13,w13,w3
+ add w26,w26,w17 // h+=Sigma0(a)
+ add w13,w13,w2
+ ldr w2,[sp,#12]
+ str w5,[sp,#8]
+ ror w16,w22,#6
+ add w25,w25,w19 // h+=K[i]
+ ror w4,w15,#7
+ and w17,w23,w22
+ ror w3,w12,#17
+ bic w19,w24,w22
+ ror w5,w26,#2
+ add w25,w25,w13 // h+=X[i]
+ eor w16,w16,w22,ror#11
+ eor w4,w4,w15,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w26,w27 // a^b, b^c in next round
+ eor w16,w16,w22,ror#25 // Sigma1(e)
+ eor w5,w5,w26,ror#13
+ add w25,w25,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w3,w3,w12,ror#19
+ eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
+ add w25,w25,w16 // h+=Sigma1(e)
+ eor w28,w28,w27 // Maj(a,b,c)
+ eor w17,w5,w26,ror#22 // Sigma0(a)
+ eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
+ add w14,w14,w7
+ add w21,w21,w25 // d+=h
+ add w25,w25,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w14,w14,w4
+ add w25,w25,w17 // h+=Sigma0(a)
+ add w14,w14,w3
+ ldr w3,[sp,#0]
+ str w6,[sp,#12]
+ ror w16,w21,#6
+ add w24,w24,w28 // h+=K[i]
+ ror w5,w0,#7
+ and w17,w22,w21
+ ror w4,w13,#17
+ bic w28,w23,w21
+ ror w6,w25,#2
+ add w24,w24,w14 // h+=X[i]
+ eor w16,w16,w21,ror#11
+ eor w5,w5,w0,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w25,w26 // a^b, b^c in next round
+ eor w16,w16,w21,ror#25 // Sigma1(e)
+ eor w6,w6,w25,ror#13
+ add w24,w24,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w4,w4,w13,ror#19
+ eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
+ add w24,w24,w16 // h+=Sigma1(e)
+ eor w19,w19,w26 // Maj(a,b,c)
+ eor w17,w6,w25,ror#22 // Sigma0(a)
+ eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
+ add w15,w15,w8
+ add w20,w20,w24 // d+=h
+ add w24,w24,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w15,w15,w5
+ add w24,w24,w17 // h+=Sigma0(a)
+ add w15,w15,w4
+ ldr w4,[sp,#4]
+ str w7,[sp,#0]
+ ror w16,w20,#6
+ add w23,w23,w19 // h+=K[i]
+ ror w6,w1,#7
+ and w17,w21,w20
+ ror w5,w14,#17
+ bic w19,w22,w20
+ ror w7,w24,#2
+ add w23,w23,w15 // h+=X[i]
+ eor w16,w16,w20,ror#11
+ eor w6,w6,w1,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w24,w25 // a^b, b^c in next round
+ eor w16,w16,w20,ror#25 // Sigma1(e)
+ eor w7,w7,w24,ror#13
+ add w23,w23,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w5,w5,w14,ror#19
+ eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
+ add w23,w23,w16 // h+=Sigma1(e)
+ eor w28,w28,w25 // Maj(a,b,c)
+ eor w17,w7,w24,ror#22 // Sigma0(a)
+ eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
+ add w0,w0,w9
+ add w27,w27,w23 // d+=h
+ add w23,w23,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w0,w0,w6
+ add w23,w23,w17 // h+=Sigma0(a)
+ add w0,w0,w5
+ ldr w5,[sp,#8]
+ str w8,[sp,#4]
+ ror w16,w27,#6
+ add w22,w22,w28 // h+=K[i]
+ ror w7,w2,#7
+ and w17,w20,w27
+ ror w6,w15,#17
+ bic w28,w21,w27
+ ror w8,w23,#2
+ add w22,w22,w0 // h+=X[i]
+ eor w16,w16,w27,ror#11
+ eor w7,w7,w2,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w23,w24 // a^b, b^c in next round
+ eor w16,w16,w27,ror#25 // Sigma1(e)
+ eor w8,w8,w23,ror#13
+ add w22,w22,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w6,w6,w15,ror#19
+ eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
+ add w22,w22,w16 // h+=Sigma1(e)
+ eor w19,w19,w24 // Maj(a,b,c)
+ eor w17,w8,w23,ror#22 // Sigma0(a)
+ eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
+ add w1,w1,w10
+ add w26,w26,w22 // d+=h
+ add w22,w22,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w1,w1,w7
+ add w22,w22,w17 // h+=Sigma0(a)
+ add w1,w1,w6
+ ldr w6,[sp,#12]
+ str w9,[sp,#8]
+ ror w16,w26,#6
+ add w21,w21,w19 // h+=K[i]
+ ror w8,w3,#7
+ and w17,w27,w26
+ ror w7,w0,#17
+ bic w19,w20,w26
+ ror w9,w22,#2
+ add w21,w21,w1 // h+=X[i]
+ eor w16,w16,w26,ror#11
+ eor w8,w8,w3,ror#18
+ orr w17,w17,w19 // Ch(e,f,g)
+ eor w19,w22,w23 // a^b, b^c in next round
+ eor w16,w16,w26,ror#25 // Sigma1(e)
+ eor w9,w9,w22,ror#13
+ add w21,w21,w17 // h+=Ch(e,f,g)
+ and w28,w28,w19 // (b^c)&=(a^b)
+ eor w7,w7,w0,ror#19
+ eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
+ add w21,w21,w16 // h+=Sigma1(e)
+ eor w28,w28,w23 // Maj(a,b,c)
+ eor w17,w9,w22,ror#22 // Sigma0(a)
+ eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
+ add w2,w2,w11
+ add w25,w25,w21 // d+=h
+ add w21,w21,w28 // h+=Maj(a,b,c)
+ ldr w28,[x30],#4 // *K++, w19 in next round
+ add w2,w2,w8
+ add w21,w21,w17 // h+=Sigma0(a)
+ add w2,w2,w7
+ ldr w7,[sp,#0]
+ str w10,[sp,#12]
+ ror w16,w25,#6
+ add w20,w20,w28 // h+=K[i]
+ ror w9,w4,#7
+ and w17,w26,w25
+ ror w8,w1,#17
+ bic w28,w27,w25
+ ror w10,w21,#2
+ add w20,w20,w2 // h+=X[i]
+ eor w16,w16,w25,ror#11
+ eor w9,w9,w4,ror#18
+ orr w17,w17,w28 // Ch(e,f,g)
+ eor w28,w21,w22 // a^b, b^c in next round
+ eor w16,w16,w25,ror#25 // Sigma1(e)
+ eor w10,w10,w21,ror#13
+ add w20,w20,w17 // h+=Ch(e,f,g)
+ and w19,w19,w28 // (b^c)&=(a^b)
+ eor w8,w8,w1,ror#19
+ eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
+ add w20,w20,w16 // h+=Sigma1(e)
+ eor w19,w19,w22 // Maj(a,b,c)
+ eor w17,w10,w21,ror#22 // Sigma0(a)
+ eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
+ add w3,w3,w12
+ add w24,w24,w20 // d+=h
+ add w20,w20,w19 // h+=Maj(a,b,c)
+ ldr w19,[x30],#4 // *K++, w28 in next round
+ add w3,w3,w9
+ add w20,w20,w17 // h+=Sigma0(a)
+ add w3,w3,w8
+ cbnz w19,.Loop_16_xx
+
+ ldp x0,x2,[x29,#96]
+ ldr x1,[x29,#112]
+ sub x30,x30,#260 // rewind
+
+ ldp w3,w4,[x0]
+ ldp w5,w6,[x0,#2*4]
+ add x1,x1,#14*4 // advance input pointer
+ ldp w7,w8,[x0,#4*4]
+ add w20,w20,w3
+ ldp w9,w10,[x0,#6*4]
+ add w21,w21,w4
+ add w22,w22,w5
+ add w23,w23,w6
+ stp w20,w21,[x0]
+ add w24,w24,w7
+ add w25,w25,w8
+ stp w22,w23,[x0,#2*4]
+ add w26,w26,w9
+ add w27,w27,w10
+ cmp x1,x2
+ stp w24,w25,[x0,#4*4]
+ stp w26,w27,[x0,#6*4]
+ b.ne .Loop
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#4*4
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#128
+ ret
+.size sha256_block_data_order,.-sha256_block_data_order
+
+.align 6
+.type K256,%object
+K256:
+ .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+ .long 0 //terminator
+.size K256,.-K256
+.align 3
+.LOPENSSL_armcap_P:
+ .quad OPENSSL_armcap_P-.
+.asciz "SHA256 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+.type sha256_block_armv8,%function
+.align 6
+sha256_block_armv8:
+.Lv8_entry:
+ stp x29,x30,[sp,#-16]!
+ add x29,sp,#0
+
+ ld1 {v0.4s,v1.4s},[x0]
+ adr x3,K256
+
+.Loop_hw:
+ ld1 {v4.16b-v7.16b},[x1],#64
+ sub x2,x2,#1
+ ld1 {v16.4s},[x3],#16
+ rev32 v4.16b,v4.16b
+ rev32 v5.16b,v5.16b
+ rev32 v6.16b,v6.16b
+ rev32 v7.16b,v7.16b
+ orr v18.16b,v0.16b,v0.16b // offload
+ orr v19.16b,v1.16b,v1.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v6.4s
+ .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v7.4s
+ .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v6.4s
+ .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v7.4s
+ .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v6.4s
+ .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+ .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v7.4s
+ .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+ .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
+ ld1 {v17.4s},[x3],#16
+ add v16.4s,v16.4s,v4.4s
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+
+ ld1 {v16.4s},[x3],#16
+ add v17.4s,v17.4s,v5.4s
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+
+ ld1 {v17.4s},[x3]
+ add v16.4s,v16.4s,v6.4s
+ sub x3,x3,#64*4-16 // rewind
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
+ .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
+
+ add v17.4s,v17.4s,v7.4s
+ orr v2.16b,v0.16b,v0.16b
+ .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
+ .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
+
+ add v0.4s,v0.4s,v18.4s
+ add v1.4s,v1.4s,v19.4s
+
+ cbnz x2,.Loop_hw
+
+ st1 {v0.4s,v1.4s},[x0]
+
+ ldr x29,[sp],#16
+ ret
+.size sha256_block_armv8,.-sha256_block_armv8
+.comm OPENSSL_armcap_P,4,4
diff --git a/app/openssl/crypto/sha/asm/sha512-armv4.S b/app/openssl/crypto/sha/asm/sha512-armv4.S
index 046c909a..fd462771 120000..100644
--- a/app/openssl/crypto/sha/asm/sha512-armv4.S
+++ b/app/openssl/crypto/sha/asm/sha512-armv4.S
@@ -1 +1,1783 @@
-sha512-armv4.s \ No newline at end of file
+#include "arm_arch.h"
+#ifdef __ARMEL__
+# define LO 0
+# define HI 4
+# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
+#else
+# define HI 0
+# define LO 4
+# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
+#endif
+
+.text
+.code 32
+.type K512,%object
+.align 5
+K512:
+WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
+WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
+WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
+WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
+WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
+WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
+WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
+WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
+WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
+WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
+WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
+WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
+WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
+WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
+WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
+WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
+WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
+WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
+WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
+WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
+WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
+WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
+WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
+WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
+WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
+WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
+WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
+WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
+WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
+WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
+WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
+WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
+WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
+WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
+WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
+WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
+WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
+WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
+WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
+WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
+.size K512,.-K512
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha512_block_data_order
+.skip 32-4
+
+.global sha512_block_data_order
+.type sha512_block_data_order,%function
+sha512_block_data_order:
+ sub r3,pc,#8 @ sha512_block_data_order
+ add r2,r1,r2,lsl#7 @ len to point at the end of inp
+#if __ARM_ARCH__>=7
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#1
+ bne .LNEON
+#endif
+ stmdb sp!,{r4-r12,lr}
+ sub r14,r3,#672 @ K512
+ sub sp,sp,#9*8
+
+ ldr r7,[r0,#32+LO]
+ ldr r8,[r0,#32+HI]
+ ldr r9, [r0,#48+LO]
+ ldr r10, [r0,#48+HI]
+ ldr r11, [r0,#56+LO]
+ ldr r12, [r0,#56+HI]
+.Loop:
+ str r9, [sp,#48+0]
+ str r10, [sp,#48+4]
+ str r11, [sp,#56+0]
+ str r12, [sp,#56+4]
+ ldr r5,[r0,#0+LO]
+ ldr r6,[r0,#0+HI]
+ ldr r3,[r0,#8+LO]
+ ldr r4,[r0,#8+HI]
+ ldr r9, [r0,#16+LO]
+ ldr r10, [r0,#16+HI]
+ ldr r11, [r0,#24+LO]
+ ldr r12, [r0,#24+HI]
+ str r3,[sp,#8+0]
+ str r4,[sp,#8+4]
+ str r9, [sp,#16+0]
+ str r10, [sp,#16+4]
+ str r11, [sp,#24+0]
+ str r12, [sp,#24+4]
+ ldr r3,[r0,#40+LO]
+ ldr r4,[r0,#40+HI]
+ str r3,[sp,#40+0]
+ str r4,[sp,#40+4]
+
+.L00_15:
+#if __ARM_ARCH__<7
+ ldrb r3,[r1,#7]
+ ldrb r9, [r1,#6]
+ ldrb r10, [r1,#5]
+ ldrb r11, [r1,#4]
+ ldrb r4,[r1,#3]
+ ldrb r12, [r1,#2]
+ orr r3,r3,r9,lsl#8
+ ldrb r9, [r1,#1]
+ orr r3,r3,r10,lsl#16
+ ldrb r10, [r1],#8
+ orr r3,r3,r11,lsl#24
+ orr r4,r4,r12,lsl#8
+ orr r4,r4,r9,lsl#16
+ orr r4,r4,r10,lsl#24
+#else
+ ldr r3,[r1,#4]
+ ldr r4,[r1],#8
+#ifdef __ARMEL__
+ rev r3,r3
+ rev r4,r4
+#endif
+#endif
+ @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+ @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+ @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+ mov r9,r7,lsr#14
+ str r3,[sp,#64+0]
+ mov r10,r8,lsr#14
+ str r4,[sp,#64+4]
+ eor r9,r9,r8,lsl#18
+ ldr r11,[sp,#56+0] @ h.lo
+ eor r10,r10,r7,lsl#18
+ ldr r12,[sp,#56+4] @ h.hi
+ eor r9,r9,r7,lsr#18
+ eor r10,r10,r8,lsr#18
+ eor r9,r9,r8,lsl#14
+ eor r10,r10,r7,lsl#14
+ eor r9,r9,r8,lsr#9
+ eor r10,r10,r7,lsr#9
+ eor r9,r9,r7,lsl#23
+ eor r10,r10,r8,lsl#23 @ Sigma1(e)
+ adds r3,r3,r9
+ ldr r9,[sp,#40+0] @ f.lo
+ adc r4,r4,r10 @ T += Sigma1(e)
+ ldr r10,[sp,#40+4] @ f.hi
+ adds r3,r3,r11
+ ldr r11,[sp,#48+0] @ g.lo
+ adc r4,r4,r12 @ T += h
+ ldr r12,[sp,#48+4] @ g.hi
+
+ eor r9,r9,r11
+ str r7,[sp,#32+0]
+ eor r10,r10,r12
+ str r8,[sp,#32+4]
+ and r9,r9,r7
+ str r5,[sp,#0+0]
+ and r10,r10,r8
+ str r6,[sp,#0+4]
+ eor r9,r9,r11
+ ldr r11,[r14,#LO] @ K[i].lo
+ eor r10,r10,r12 @ Ch(e,f,g)
+ ldr r12,[r14,#HI] @ K[i].hi
+
+ adds r3,r3,r9
+ ldr r7,[sp,#24+0] @ d.lo
+ adc r4,r4,r10 @ T += Ch(e,f,g)
+ ldr r8,[sp,#24+4] @ d.hi
+ adds r3,r3,r11
+ and r9,r11,#0xff
+ adc r4,r4,r12 @ T += K[i]
+ adds r7,r7,r3
+ ldr r11,[sp,#8+0] @ b.lo
+ adc r8,r8,r4 @ d += T
+ teq r9,#148
+
+ ldr r12,[sp,#16+0] @ c.lo
+ orreq r14,r14,#1
+ @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+ @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+ @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+ mov r9,r5,lsr#28
+ mov r10,r6,lsr#28
+ eor r9,r9,r6,lsl#4
+ eor r10,r10,r5,lsl#4
+ eor r9,r9,r6,lsr#2
+ eor r10,r10,r5,lsr#2
+ eor r9,r9,r5,lsl#30
+ eor r10,r10,r6,lsl#30
+ eor r9,r9,r6,lsr#7
+ eor r10,r10,r5,lsr#7
+ eor r9,r9,r5,lsl#25
+ eor r10,r10,r6,lsl#25 @ Sigma0(a)
+ adds r3,r3,r9
+ and r9,r5,r11
+ adc r4,r4,r10 @ T += Sigma0(a)
+
+ ldr r10,[sp,#8+4] @ b.hi
+ orr r5,r5,r11
+ ldr r11,[sp,#16+4] @ c.hi
+ and r5,r5,r12
+ and r12,r6,r10
+ orr r6,r6,r10
+ orr r5,r5,r9 @ Maj(a,b,c).lo
+ and r6,r6,r11
+ adds r5,r5,r3
+ orr r6,r6,r12 @ Maj(a,b,c).hi
+ sub sp,sp,#8
+ adc r6,r6,r4 @ h += T
+ tst r14,#1
+ add r14,r14,#8
+ tst r14,#1
+ beq .L00_15
+ ldr r9,[sp,#184+0]
+ ldr r10,[sp,#184+4]
+ bic r14,r14,#1
+.L16_79:
+ @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
+ @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
+ @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
+ mov r3,r9,lsr#1
+ ldr r11,[sp,#80+0]
+ mov r4,r10,lsr#1
+ ldr r12,[sp,#80+4]
+ eor r3,r3,r10,lsl#31
+ eor r4,r4,r9,lsl#31
+ eor r3,r3,r9,lsr#8
+ eor r4,r4,r10,lsr#8
+ eor r3,r3,r10,lsl#24
+ eor r4,r4,r9,lsl#24
+ eor r3,r3,r9,lsr#7
+ eor r4,r4,r10,lsr#7
+ eor r3,r3,r10,lsl#25
+
+ @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+ @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
+ @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
+ mov r9,r11,lsr#19
+ mov r10,r12,lsr#19
+ eor r9,r9,r12,lsl#13
+ eor r10,r10,r11,lsl#13
+ eor r9,r9,r12,lsr#29
+ eor r10,r10,r11,lsr#29
+ eor r9,r9,r11,lsl#3
+ eor r10,r10,r12,lsl#3
+ eor r9,r9,r11,lsr#6
+ eor r10,r10,r12,lsr#6
+ ldr r11,[sp,#120+0]
+ eor r9,r9,r12,lsl#26
+
+ ldr r12,[sp,#120+4]
+ adds r3,r3,r9
+ ldr r9,[sp,#192+0]
+ adc r4,r4,r10
+
+ ldr r10,[sp,#192+4]
+ adds r3,r3,r11
+ adc r4,r4,r12
+ adds r3,r3,r9
+ adc r4,r4,r10
+ @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+ @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+ @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+ mov r9,r7,lsr#14
+ str r3,[sp,#64+0]
+ mov r10,r8,lsr#14
+ str r4,[sp,#64+4]
+ eor r9,r9,r8,lsl#18
+ ldr r11,[sp,#56+0] @ h.lo
+ eor r10,r10,r7,lsl#18
+ ldr r12,[sp,#56+4] @ h.hi
+ eor r9,r9,r7,lsr#18
+ eor r10,r10,r8,lsr#18
+ eor r9,r9,r8,lsl#14
+ eor r10,r10,r7,lsl#14
+ eor r9,r9,r8,lsr#9
+ eor r10,r10,r7,lsr#9
+ eor r9,r9,r7,lsl#23
+ eor r10,r10,r8,lsl#23 @ Sigma1(e)
+ adds r3,r3,r9
+ ldr r9,[sp,#40+0] @ f.lo
+ adc r4,r4,r10 @ T += Sigma1(e)
+ ldr r10,[sp,#40+4] @ f.hi
+ adds r3,r3,r11
+ ldr r11,[sp,#48+0] @ g.lo
+ adc r4,r4,r12 @ T += h
+ ldr r12,[sp,#48+4] @ g.hi
+
+ eor r9,r9,r11
+ str r7,[sp,#32+0]
+ eor r10,r10,r12
+ str r8,[sp,#32+4]
+ and r9,r9,r7
+ str r5,[sp,#0+0]
+ and r10,r10,r8
+ str r6,[sp,#0+4]
+ eor r9,r9,r11
+ ldr r11,[r14,#LO] @ K[i].lo
+ eor r10,r10,r12 @ Ch(e,f,g)
+ ldr r12,[r14,#HI] @ K[i].hi
+
+ adds r3,r3,r9
+ ldr r7,[sp,#24+0] @ d.lo
+ adc r4,r4,r10 @ T += Ch(e,f,g)
+ ldr r8,[sp,#24+4] @ d.hi
+ adds r3,r3,r11
+ and r9,r11,#0xff
+ adc r4,r4,r12 @ T += K[i]
+ adds r7,r7,r3
+ ldr r11,[sp,#8+0] @ b.lo
+ adc r8,r8,r4 @ d += T
+ teq r9,#23
+
+ ldr r12,[sp,#16+0] @ c.lo
+ orreq r14,r14,#1
+ @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+ @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+ @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+ mov r9,r5,lsr#28
+ mov r10,r6,lsr#28
+ eor r9,r9,r6,lsl#4
+ eor r10,r10,r5,lsl#4
+ eor r9,r9,r6,lsr#2
+ eor r10,r10,r5,lsr#2
+ eor r9,r9,r5,lsl#30
+ eor r10,r10,r6,lsl#30
+ eor r9,r9,r6,lsr#7
+ eor r10,r10,r5,lsr#7
+ eor r9,r9,r5,lsl#25
+ eor r10,r10,r6,lsl#25 @ Sigma0(a)
+ adds r3,r3,r9
+ and r9,r5,r11
+ adc r4,r4,r10 @ T += Sigma0(a)
+
+ ldr r10,[sp,#8+4] @ b.hi
+ orr r5,r5,r11
+ ldr r11,[sp,#16+4] @ c.hi
+ and r5,r5,r12
+ and r12,r6,r10
+ orr r6,r6,r10
+ orr r5,r5,r9 @ Maj(a,b,c).lo
+ and r6,r6,r11
+ adds r5,r5,r3
+ orr r6,r6,r12 @ Maj(a,b,c).hi
+ sub sp,sp,#8
+ adc r6,r6,r4 @ h += T
+ tst r14,#1
+ add r14,r14,#8
+ ldreq r9,[sp,#184+0]
+ ldreq r10,[sp,#184+4]
+ beq .L16_79
+ bic r14,r14,#1
+
+ ldr r3,[sp,#8+0]
+ ldr r4,[sp,#8+4]
+ ldr r9, [r0,#0+LO]
+ ldr r10, [r0,#0+HI]
+ ldr r11, [r0,#8+LO]
+ ldr r12, [r0,#8+HI]
+ adds r9,r5,r9
+ str r9, [r0,#0+LO]
+ adc r10,r6,r10
+ str r10, [r0,#0+HI]
+ adds r11,r3,r11
+ str r11, [r0,#8+LO]
+ adc r12,r4,r12
+ str r12, [r0,#8+HI]
+
+ ldr r5,[sp,#16+0]
+ ldr r6,[sp,#16+4]
+ ldr r3,[sp,#24+0]
+ ldr r4,[sp,#24+4]
+ ldr r9, [r0,#16+LO]
+ ldr r10, [r0,#16+HI]
+ ldr r11, [r0,#24+LO]
+ ldr r12, [r0,#24+HI]
+ adds r9,r5,r9
+ str r9, [r0,#16+LO]
+ adc r10,r6,r10
+ str r10, [r0,#16+HI]
+ adds r11,r3,r11
+ str r11, [r0,#24+LO]
+ adc r12,r4,r12
+ str r12, [r0,#24+HI]
+
+ ldr r3,[sp,#40+0]
+ ldr r4,[sp,#40+4]
+ ldr r9, [r0,#32+LO]
+ ldr r10, [r0,#32+HI]
+ ldr r11, [r0,#40+LO]
+ ldr r12, [r0,#40+HI]
+ adds r7,r7,r9
+ str r7,[r0,#32+LO]
+ adc r8,r8,r10
+ str r8,[r0,#32+HI]
+ adds r11,r3,r11
+ str r11, [r0,#40+LO]
+ adc r12,r4,r12
+ str r12, [r0,#40+HI]
+
+ ldr r5,[sp,#48+0]
+ ldr r6,[sp,#48+4]
+ ldr r3,[sp,#56+0]
+ ldr r4,[sp,#56+4]
+ ldr r9, [r0,#48+LO]
+ ldr r10, [r0,#48+HI]
+ ldr r11, [r0,#56+LO]
+ ldr r12, [r0,#56+HI]
+ adds r9,r5,r9
+ str r9, [r0,#48+LO]
+ adc r10,r6,r10
+ str r10, [r0,#48+HI]
+ adds r11,r3,r11
+ str r11, [r0,#56+LO]
+ adc r12,r4,r12
+ str r12, [r0,#56+HI]
+
+ add sp,sp,#640
+ sub r14,r14,#640
+
+ teq r1,r2
+ bne .Loop
+
+ add sp,sp,#8*9 @ destroy frame
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r12,pc}
+#else
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+#if __ARM_ARCH__>=7
+.fpu neon
+
+.align 4
+.LNEON:
+ dmb @ errata #451034 on early Cortex A8
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+ sub r3,r3,#672 @ K512
+ vldmia r0,{d16-d23} @ load context
+.Loop_neon:
+ vshr.u64 d24,d20,#14 @ 0
+#if 0<16
+ vld1.64 {d0},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d20,#18
+ vshr.u64 d26,d20,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vsli.64 d26,d20,#23
+#if 0<16 && defined(__ARMEL__)
+ vrev64.8 d0,d0
+#endif
+ vadd.i64 d27,d28,d23
+ veor d29,d21,d22
+ veor d24,d25
+ vand d29,d20
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d22 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d16,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d16,#34
+ vshr.u64 d26,d16,#39
+ vsli.64 d24,d16,#36
+ vsli.64 d25,d16,#30
+ vsli.64 d26,d16,#25
+ vadd.i64 d27,d0
+ vorr d30,d16,d18
+ vand d29,d16,d18
+ veor d23,d24,d25
+ vand d30,d17
+ veor d23,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d23,d27
+ vadd.i64 d19,d27
+ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 1
+#if 1<16
+ vld1.64 {d1},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vsli.64 d26,d19,#23
+#if 1<16 && defined(__ARMEL__)
+ vrev64.8 d1,d1
+#endif
+ vadd.i64 d27,d28,d22
+ veor d29,d20,d21
+ veor d24,d25
+ vand d29,d19
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d21 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d23,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d23,#34
+ vshr.u64 d26,d23,#39
+ vsli.64 d24,d23,#36
+ vsli.64 d25,d23,#30
+ vsli.64 d26,d23,#25
+ vadd.i64 d27,d1
+ vorr d30,d23,d17
+ vand d29,d23,d17
+ veor d22,d24,d25
+ vand d30,d16
+ veor d22,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d22,d27
+ vadd.i64 d18,d27
+ vadd.i64 d22,d30
+ vshr.u64 d24,d18,#14 @ 2
+#if 2<16
+ vld1.64 {d2},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d18,#18
+ vshr.u64 d26,d18,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vsli.64 d26,d18,#23
+#if 2<16 && defined(__ARMEL__)
+ vrev64.8 d2,d2
+#endif
+ vadd.i64 d27,d28,d21
+ veor d29,d19,d20
+ veor d24,d25
+ vand d29,d18
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d20 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d22,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d22,#34
+ vshr.u64 d26,d22,#39
+ vsli.64 d24,d22,#36
+ vsli.64 d25,d22,#30
+ vsli.64 d26,d22,#25
+ vadd.i64 d27,d2
+ vorr d30,d22,d16
+ vand d29,d22,d16
+ veor d21,d24,d25
+ vand d30,d23
+ veor d21,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d21,d27
+ vadd.i64 d17,d27
+ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 3
+#if 3<16
+ vld1.64 {d3},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vsli.64 d26,d17,#23
+#if 3<16 && defined(__ARMEL__)
+ vrev64.8 d3,d3
+#endif
+ vadd.i64 d27,d28,d20
+ veor d29,d18,d19
+ veor d24,d25
+ vand d29,d17
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d19 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d21,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d21,#34
+ vshr.u64 d26,d21,#39
+ vsli.64 d24,d21,#36
+ vsli.64 d25,d21,#30
+ vsli.64 d26,d21,#25
+ vadd.i64 d27,d3
+ vorr d30,d21,d23
+ vand d29,d21,d23
+ veor d20,d24,d25
+ vand d30,d22
+ veor d20,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d20,d27
+ vadd.i64 d16,d27
+ vadd.i64 d20,d30
+ vshr.u64 d24,d16,#14 @ 4
+#if 4<16
+ vld1.64 {d4},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d16,#18
+ vshr.u64 d26,d16,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vsli.64 d26,d16,#23
+#if 4<16 && defined(__ARMEL__)
+ vrev64.8 d4,d4
+#endif
+ vadd.i64 d27,d28,d19
+ veor d29,d17,d18
+ veor d24,d25
+ vand d29,d16
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d18 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d20,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d20,#34
+ vshr.u64 d26,d20,#39
+ vsli.64 d24,d20,#36
+ vsli.64 d25,d20,#30
+ vsli.64 d26,d20,#25
+ vadd.i64 d27,d4
+ vorr d30,d20,d22
+ vand d29,d20,d22
+ veor d19,d24,d25
+ vand d30,d21
+ veor d19,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d19,d27
+ vadd.i64 d23,d27
+ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 5
+#if 5<16
+ vld1.64 {d5},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vsli.64 d26,d23,#23
+#if 5<16 && defined(__ARMEL__)
+ vrev64.8 d5,d5
+#endif
+ vadd.i64 d27,d28,d18
+ veor d29,d16,d17
+ veor d24,d25
+ vand d29,d23
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d17 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d19,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d19,#34
+ vshr.u64 d26,d19,#39
+ vsli.64 d24,d19,#36
+ vsli.64 d25,d19,#30
+ vsli.64 d26,d19,#25
+ vadd.i64 d27,d5
+ vorr d30,d19,d21
+ vand d29,d19,d21
+ veor d18,d24,d25
+ vand d30,d20
+ veor d18,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d18,d27
+ vadd.i64 d22,d27
+ vadd.i64 d18,d30
+ vshr.u64 d24,d22,#14 @ 6
+#if 6<16
+ vld1.64 {d6},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d22,#18
+ vshr.u64 d26,d22,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vsli.64 d26,d22,#23
+#if 6<16 && defined(__ARMEL__)
+ vrev64.8 d6,d6
+#endif
+ vadd.i64 d27,d28,d17
+ veor d29,d23,d16
+ veor d24,d25
+ vand d29,d22
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d16 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d18,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d18,#34
+ vshr.u64 d26,d18,#39
+ vsli.64 d24,d18,#36
+ vsli.64 d25,d18,#30
+ vsli.64 d26,d18,#25
+ vadd.i64 d27,d6
+ vorr d30,d18,d20
+ vand d29,d18,d20
+ veor d17,d24,d25
+ vand d30,d19
+ veor d17,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d17,d27
+ vadd.i64 d21,d27
+ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 7
+#if 7<16
+ vld1.64 {d7},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vsli.64 d26,d21,#23
+#if 7<16 && defined(__ARMEL__)
+ vrev64.8 d7,d7
+#endif
+ vadd.i64 d27,d28,d16
+ veor d29,d22,d23
+ veor d24,d25
+ vand d29,d21
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d23 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d17,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d17,#34
+ vshr.u64 d26,d17,#39
+ vsli.64 d24,d17,#36
+ vsli.64 d25,d17,#30
+ vsli.64 d26,d17,#25
+ vadd.i64 d27,d7
+ vorr d30,d17,d19
+ vand d29,d17,d19
+ veor d16,d24,d25
+ vand d30,d18
+ veor d16,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d16,d27
+ vadd.i64 d20,d27
+ vadd.i64 d16,d30
+ vshr.u64 d24,d20,#14 @ 8
+#if 8<16
+ vld1.64 {d8},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d20,#18
+ vshr.u64 d26,d20,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vsli.64 d26,d20,#23
+#if 8<16 && defined(__ARMEL__)
+ vrev64.8 d8,d8
+#endif
+ vadd.i64 d27,d28,d23
+ veor d29,d21,d22
+ veor d24,d25
+ vand d29,d20
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d22 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d16,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d16,#34
+ vshr.u64 d26,d16,#39
+ vsli.64 d24,d16,#36
+ vsli.64 d25,d16,#30
+ vsli.64 d26,d16,#25
+ vadd.i64 d27,d8
+ vorr d30,d16,d18
+ vand d29,d16,d18
+ veor d23,d24,d25
+ vand d30,d17
+ veor d23,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d23,d27
+ vadd.i64 d19,d27
+ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 9
+#if 9<16
+ vld1.64 {d9},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vsli.64 d26,d19,#23
+#if 9<16 && defined(__ARMEL__)
+ vrev64.8 d9,d9
+#endif
+ vadd.i64 d27,d28,d22
+ veor d29,d20,d21
+ veor d24,d25
+ vand d29,d19
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d21 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d23,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d23,#34
+ vshr.u64 d26,d23,#39
+ vsli.64 d24,d23,#36
+ vsli.64 d25,d23,#30
+ vsli.64 d26,d23,#25
+ vadd.i64 d27,d9
+ vorr d30,d23,d17
+ vand d29,d23,d17
+ veor d22,d24,d25
+ vand d30,d16
+ veor d22,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d22,d27
+ vadd.i64 d18,d27
+ vadd.i64 d22,d30
+ vshr.u64 d24,d18,#14 @ 10
+#if 10<16
+ vld1.64 {d10},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d18,#18
+ vshr.u64 d26,d18,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vsli.64 d26,d18,#23
+#if 10<16 && defined(__ARMEL__)
+ vrev64.8 d10,d10
+#endif
+ vadd.i64 d27,d28,d21
+ veor d29,d19,d20
+ veor d24,d25
+ vand d29,d18
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d20 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d22,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d22,#34
+ vshr.u64 d26,d22,#39
+ vsli.64 d24,d22,#36
+ vsli.64 d25,d22,#30
+ vsli.64 d26,d22,#25
+ vadd.i64 d27,d10
+ vorr d30,d22,d16
+ vand d29,d22,d16
+ veor d21,d24,d25
+ vand d30,d23
+ veor d21,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d21,d27
+ vadd.i64 d17,d27
+ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 11
+#if 11<16
+ vld1.64 {d11},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vsli.64 d26,d17,#23
+#if 11<16 && defined(__ARMEL__)
+ vrev64.8 d11,d11
+#endif
+ vadd.i64 d27,d28,d20
+ veor d29,d18,d19
+ veor d24,d25
+ vand d29,d17
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d19 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d21,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d21,#34
+ vshr.u64 d26,d21,#39
+ vsli.64 d24,d21,#36
+ vsli.64 d25,d21,#30
+ vsli.64 d26,d21,#25
+ vadd.i64 d27,d11
+ vorr d30,d21,d23
+ vand d29,d21,d23
+ veor d20,d24,d25
+ vand d30,d22
+ veor d20,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d20,d27
+ vadd.i64 d16,d27
+ vadd.i64 d20,d30
+ vshr.u64 d24,d16,#14 @ 12
+#if 12<16
+ vld1.64 {d12},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d16,#18
+ vshr.u64 d26,d16,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vsli.64 d26,d16,#23
+#if 12<16 && defined(__ARMEL__)
+ vrev64.8 d12,d12
+#endif
+ vadd.i64 d27,d28,d19
+ veor d29,d17,d18
+ veor d24,d25
+ vand d29,d16
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d18 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d20,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d20,#34
+ vshr.u64 d26,d20,#39
+ vsli.64 d24,d20,#36
+ vsli.64 d25,d20,#30
+ vsli.64 d26,d20,#25
+ vadd.i64 d27,d12
+ vorr d30,d20,d22
+ vand d29,d20,d22
+ veor d19,d24,d25
+ vand d30,d21
+ veor d19,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d19,d27
+ vadd.i64 d23,d27
+ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 13
+#if 13<16
+ vld1.64 {d13},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vsli.64 d26,d23,#23
+#if 13<16 && defined(__ARMEL__)
+ vrev64.8 d13,d13
+#endif
+ vadd.i64 d27,d28,d18
+ veor d29,d16,d17
+ veor d24,d25
+ vand d29,d23
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d17 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d19,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d19,#34
+ vshr.u64 d26,d19,#39
+ vsli.64 d24,d19,#36
+ vsli.64 d25,d19,#30
+ vsli.64 d26,d19,#25
+ vadd.i64 d27,d13
+ vorr d30,d19,d21
+ vand d29,d19,d21
+ veor d18,d24,d25
+ vand d30,d20
+ veor d18,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d18,d27
+ vadd.i64 d22,d27
+ vadd.i64 d18,d30
+ vshr.u64 d24,d22,#14 @ 14
+#if 14<16
+ vld1.64 {d14},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d22,#18
+ vshr.u64 d26,d22,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vsli.64 d26,d22,#23
+#if 14<16 && defined(__ARMEL__)
+ vrev64.8 d14,d14
+#endif
+ vadd.i64 d27,d28,d17
+ veor d29,d23,d16
+ veor d24,d25
+ vand d29,d22
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d16 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d18,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d18,#34
+ vshr.u64 d26,d18,#39
+ vsli.64 d24,d18,#36
+ vsli.64 d25,d18,#30
+ vsli.64 d26,d18,#25
+ vadd.i64 d27,d14
+ vorr d30,d18,d20
+ vand d29,d18,d20
+ veor d17,d24,d25
+ vand d30,d19
+ veor d17,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d17,d27
+ vadd.i64 d21,d27
+ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 15
+#if 15<16
+ vld1.64 {d15},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vsli.64 d26,d21,#23
+#if 15<16 && defined(__ARMEL__)
+ vrev64.8 d15,d15
+#endif
+ vadd.i64 d27,d28,d16
+ veor d29,d22,d23
+ veor d24,d25
+ vand d29,d21
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d23 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d17,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d17,#34
+ vshr.u64 d26,d17,#39
+ vsli.64 d24,d17,#36
+ vsli.64 d25,d17,#30
+ vsli.64 d26,d17,#25
+ vadd.i64 d27,d15
+ vorr d30,d17,d19
+ vand d29,d17,d19
+ veor d16,d24,d25
+ vand d30,d18
+ veor d16,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d16,d27
+ vadd.i64 d20,d27
+ vadd.i64 d16,d30
+ mov r12,#4
+.L16_79_neon:
+ subs r12,#1
+ vshr.u64 q12,q7,#19
+ vshr.u64 q13,q7,#61
+ vshr.u64 q15,q7,#6
+ vsli.64 q12,q7,#45
+ vext.8 q14,q0,q1,#8 @ X[i+1]
+ vsli.64 q13,q7,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q0,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q4,q5,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d20,#14 @ from NEON_00_15
+ vadd.i64 q0,q14
+ vshr.u64 d25,d20,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d20,#41 @ from NEON_00_15
+ vadd.i64 q0,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vsli.64 d26,d20,#23
+#if 16<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d23
+ veor d29,d21,d22
+ veor d24,d25
+ vand d29,d20
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d22 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d16,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d16,#34
+ vshr.u64 d26,d16,#39
+ vsli.64 d24,d16,#36
+ vsli.64 d25,d16,#30
+ vsli.64 d26,d16,#25
+ vadd.i64 d27,d0
+ vorr d30,d16,d18
+ vand d29,d16,d18
+ veor d23,d24,d25
+ vand d30,d17
+ veor d23,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d23,d27
+ vadd.i64 d19,d27
+ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 17
+#if 17<16
+ vld1.64 {d1},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vsli.64 d26,d19,#23
+#if 17<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d22
+ veor d29,d20,d21
+ veor d24,d25
+ vand d29,d19
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d21 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d23,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d23,#34
+ vshr.u64 d26,d23,#39
+ vsli.64 d24,d23,#36
+ vsli.64 d25,d23,#30
+ vsli.64 d26,d23,#25
+ vadd.i64 d27,d1
+ vorr d30,d23,d17
+ vand d29,d23,d17
+ veor d22,d24,d25
+ vand d30,d16
+ veor d22,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d22,d27
+ vadd.i64 d18,d27
+ vadd.i64 d22,d30
+ vshr.u64 q12,q0,#19
+ vshr.u64 q13,q0,#61
+ vshr.u64 q15,q0,#6
+ vsli.64 q12,q0,#45
+ vext.8 q14,q1,q2,#8 @ X[i+1]
+ vsli.64 q13,q0,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q1,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q5,q6,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d18,#14 @ from NEON_00_15
+ vadd.i64 q1,q14
+ vshr.u64 d25,d18,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d18,#41 @ from NEON_00_15
+ vadd.i64 q1,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vsli.64 d26,d18,#23
+#if 18<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d21
+ veor d29,d19,d20
+ veor d24,d25
+ vand d29,d18
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d20 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d22,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d22,#34
+ vshr.u64 d26,d22,#39
+ vsli.64 d24,d22,#36
+ vsli.64 d25,d22,#30
+ vsli.64 d26,d22,#25
+ vadd.i64 d27,d2
+ vorr d30,d22,d16
+ vand d29,d22,d16
+ veor d21,d24,d25
+ vand d30,d23
+ veor d21,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d21,d27
+ vadd.i64 d17,d27
+ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 19
+#if 19<16
+ vld1.64 {d3},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vsli.64 d26,d17,#23
+#if 19<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d20
+ veor d29,d18,d19
+ veor d24,d25
+ vand d29,d17
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d19 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d21,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d21,#34
+ vshr.u64 d26,d21,#39
+ vsli.64 d24,d21,#36
+ vsli.64 d25,d21,#30
+ vsli.64 d26,d21,#25
+ vadd.i64 d27,d3
+ vorr d30,d21,d23
+ vand d29,d21,d23
+ veor d20,d24,d25
+ vand d30,d22
+ veor d20,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d20,d27
+ vadd.i64 d16,d27
+ vadd.i64 d20,d30
+ vshr.u64 q12,q1,#19
+ vshr.u64 q13,q1,#61
+ vshr.u64 q15,q1,#6
+ vsli.64 q12,q1,#45
+ vext.8 q14,q2,q3,#8 @ X[i+1]
+ vsli.64 q13,q1,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q2,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q6,q7,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d16,#14 @ from NEON_00_15
+ vadd.i64 q2,q14
+ vshr.u64 d25,d16,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d16,#41 @ from NEON_00_15
+ vadd.i64 q2,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vsli.64 d26,d16,#23
+#if 20<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d19
+ veor d29,d17,d18
+ veor d24,d25
+ vand d29,d16
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d18 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d20,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d20,#34
+ vshr.u64 d26,d20,#39
+ vsli.64 d24,d20,#36
+ vsli.64 d25,d20,#30
+ vsli.64 d26,d20,#25
+ vadd.i64 d27,d4
+ vorr d30,d20,d22
+ vand d29,d20,d22
+ veor d19,d24,d25
+ vand d30,d21
+ veor d19,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d19,d27
+ vadd.i64 d23,d27
+ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 21
+#if 21<16
+ vld1.64 {d5},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vsli.64 d26,d23,#23
+#if 21<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d18
+ veor d29,d16,d17
+ veor d24,d25
+ vand d29,d23
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d17 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d19,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d19,#34
+ vshr.u64 d26,d19,#39
+ vsli.64 d24,d19,#36
+ vsli.64 d25,d19,#30
+ vsli.64 d26,d19,#25
+ vadd.i64 d27,d5
+ vorr d30,d19,d21
+ vand d29,d19,d21
+ veor d18,d24,d25
+ vand d30,d20
+ veor d18,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d18,d27
+ vadd.i64 d22,d27
+ vadd.i64 d18,d30
+ vshr.u64 q12,q2,#19
+ vshr.u64 q13,q2,#61
+ vshr.u64 q15,q2,#6
+ vsli.64 q12,q2,#45
+ vext.8 q14,q3,q4,#8 @ X[i+1]
+ vsli.64 q13,q2,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q3,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q7,q0,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d22,#14 @ from NEON_00_15
+ vadd.i64 q3,q14
+ vshr.u64 d25,d22,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d22,#41 @ from NEON_00_15
+ vadd.i64 q3,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vsli.64 d26,d22,#23
+#if 22<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d17
+ veor d29,d23,d16
+ veor d24,d25
+ vand d29,d22
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d16 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d18,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d18,#34
+ vshr.u64 d26,d18,#39
+ vsli.64 d24,d18,#36
+ vsli.64 d25,d18,#30
+ vsli.64 d26,d18,#25
+ vadd.i64 d27,d6
+ vorr d30,d18,d20
+ vand d29,d18,d20
+ veor d17,d24,d25
+ vand d30,d19
+ veor d17,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d17,d27
+ vadd.i64 d21,d27
+ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 23
+#if 23<16
+ vld1.64 {d7},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vsli.64 d26,d21,#23
+#if 23<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d16
+ veor d29,d22,d23
+ veor d24,d25
+ vand d29,d21
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d23 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d17,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d17,#34
+ vshr.u64 d26,d17,#39
+ vsli.64 d24,d17,#36
+ vsli.64 d25,d17,#30
+ vsli.64 d26,d17,#25
+ vadd.i64 d27,d7
+ vorr d30,d17,d19
+ vand d29,d17,d19
+ veor d16,d24,d25
+ vand d30,d18
+ veor d16,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d16,d27
+ vadd.i64 d20,d27
+ vadd.i64 d16,d30
+ vshr.u64 q12,q3,#19
+ vshr.u64 q13,q3,#61
+ vshr.u64 q15,q3,#6
+ vsli.64 q12,q3,#45
+ vext.8 q14,q4,q5,#8 @ X[i+1]
+ vsli.64 q13,q3,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q4,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q0,q1,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d20,#14 @ from NEON_00_15
+ vadd.i64 q4,q14
+ vshr.u64 d25,d20,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d20,#41 @ from NEON_00_15
+ vadd.i64 q4,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vsli.64 d26,d20,#23
+#if 24<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d23
+ veor d29,d21,d22
+ veor d24,d25
+ vand d29,d20
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d22 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d16,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d16,#34
+ vshr.u64 d26,d16,#39
+ vsli.64 d24,d16,#36
+ vsli.64 d25,d16,#30
+ vsli.64 d26,d16,#25
+ vadd.i64 d27,d8
+ vorr d30,d16,d18
+ vand d29,d16,d18
+ veor d23,d24,d25
+ vand d30,d17
+ veor d23,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d23,d27
+ vadd.i64 d19,d27
+ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 25
+#if 25<16
+ vld1.64 {d9},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vsli.64 d26,d19,#23
+#if 25<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d22
+ veor d29,d20,d21
+ veor d24,d25
+ vand d29,d19
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d21 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d23,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d23,#34
+ vshr.u64 d26,d23,#39
+ vsli.64 d24,d23,#36
+ vsli.64 d25,d23,#30
+ vsli.64 d26,d23,#25
+ vadd.i64 d27,d9
+ vorr d30,d23,d17
+ vand d29,d23,d17
+ veor d22,d24,d25
+ vand d30,d16
+ veor d22,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d22,d27
+ vadd.i64 d18,d27
+ vadd.i64 d22,d30
+ vshr.u64 q12,q4,#19
+ vshr.u64 q13,q4,#61
+ vshr.u64 q15,q4,#6
+ vsli.64 q12,q4,#45
+ vext.8 q14,q5,q6,#8 @ X[i+1]
+ vsli.64 q13,q4,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q5,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q1,q2,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d18,#14 @ from NEON_00_15
+ vadd.i64 q5,q14
+ vshr.u64 d25,d18,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d18,#41 @ from NEON_00_15
+ vadd.i64 q5,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vsli.64 d26,d18,#23
+#if 26<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d21
+ veor d29,d19,d20
+ veor d24,d25
+ vand d29,d18
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d20 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d22,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d22,#34
+ vshr.u64 d26,d22,#39
+ vsli.64 d24,d22,#36
+ vsli.64 d25,d22,#30
+ vsli.64 d26,d22,#25
+ vadd.i64 d27,d10
+ vorr d30,d22,d16
+ vand d29,d22,d16
+ veor d21,d24,d25
+ vand d30,d23
+ veor d21,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d21,d27
+ vadd.i64 d17,d27
+ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 27
+#if 27<16
+ vld1.64 {d11},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vsli.64 d26,d17,#23
+#if 27<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d20
+ veor d29,d18,d19
+ veor d24,d25
+ vand d29,d17
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d19 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d21,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d21,#34
+ vshr.u64 d26,d21,#39
+ vsli.64 d24,d21,#36
+ vsli.64 d25,d21,#30
+ vsli.64 d26,d21,#25
+ vadd.i64 d27,d11
+ vorr d30,d21,d23
+ vand d29,d21,d23
+ veor d20,d24,d25
+ vand d30,d22
+ veor d20,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d20,d27
+ vadd.i64 d16,d27
+ vadd.i64 d20,d30
+ vshr.u64 q12,q5,#19
+ vshr.u64 q13,q5,#61
+ vshr.u64 q15,q5,#6
+ vsli.64 q12,q5,#45
+ vext.8 q14,q6,q7,#8 @ X[i+1]
+ vsli.64 q13,q5,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q6,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q2,q3,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d16,#14 @ from NEON_00_15
+ vadd.i64 q6,q14
+ vshr.u64 d25,d16,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d16,#41 @ from NEON_00_15
+ vadd.i64 q6,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vsli.64 d26,d16,#23
+#if 28<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d19
+ veor d29,d17,d18
+ veor d24,d25
+ vand d29,d16
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d18 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d20,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d20,#34
+ vshr.u64 d26,d20,#39
+ vsli.64 d24,d20,#36
+ vsli.64 d25,d20,#30
+ vsli.64 d26,d20,#25
+ vadd.i64 d27,d12
+ vorr d30,d20,d22
+ vand d29,d20,d22
+ veor d19,d24,d25
+ vand d30,d21
+ veor d19,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d19,d27
+ vadd.i64 d23,d27
+ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 29
+#if 29<16
+ vld1.64 {d13},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vsli.64 d26,d23,#23
+#if 29<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d18
+ veor d29,d16,d17
+ veor d24,d25
+ vand d29,d23
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d17 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d19,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d19,#34
+ vshr.u64 d26,d19,#39
+ vsli.64 d24,d19,#36
+ vsli.64 d25,d19,#30
+ vsli.64 d26,d19,#25
+ vadd.i64 d27,d13
+ vorr d30,d19,d21
+ vand d29,d19,d21
+ veor d18,d24,d25
+ vand d30,d20
+ veor d18,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d18,d27
+ vadd.i64 d22,d27
+ vadd.i64 d18,d30
+ vshr.u64 q12,q6,#19
+ vshr.u64 q13,q6,#61
+ vshr.u64 q15,q6,#6
+ vsli.64 q12,q6,#45
+ vext.8 q14,q7,q0,#8 @ X[i+1]
+ vsli.64 q13,q6,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q7,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q3,q4,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d22,#14 @ from NEON_00_15
+ vadd.i64 q7,q14
+ vshr.u64 d25,d22,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d22,#41 @ from NEON_00_15
+ vadd.i64 q7,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vsli.64 d26,d22,#23
+#if 30<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d17
+ veor d29,d23,d16
+ veor d24,d25
+ vand d29,d22
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d16 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d18,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d18,#34
+ vshr.u64 d26,d18,#39
+ vsli.64 d24,d18,#36
+ vsli.64 d25,d18,#30
+ vsli.64 d26,d18,#25
+ vadd.i64 d27,d14
+ vorr d30,d18,d20
+ vand d29,d18,d20
+ veor d17,d24,d25
+ vand d30,d19
+ veor d17,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d17,d27
+ vadd.i64 d21,d27
+ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 31
+#if 31<16
+ vld1.64 {d15},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vsli.64 d26,d21,#23
+#if 31<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d16
+ veor d29,d22,d23
+ veor d24,d25
+ vand d29,d21
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d23 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d17,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d17,#34
+ vshr.u64 d26,d17,#39
+ vsli.64 d24,d17,#36
+ vsli.64 d25,d17,#30
+ vsli.64 d26,d17,#25
+ vadd.i64 d27,d15
+ vorr d30,d17,d19
+ vand d29,d17,d19
+ veor d16,d24,d25
+ vand d30,d18
+ veor d16,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d16,d27
+ vadd.i64 d20,d27
+ vadd.i64 d16,d30
+ bne .L16_79_neon
+
+ vldmia r0,{d24-d31} @ load context to temp
+ vadd.i64 q8,q12 @ vectorized accumulate
+ vadd.i64 q9,q13
+ vadd.i64 q10,q14
+ vadd.i64 q11,q15
+ vstmia r0,{d16-d23} @ save context
+ teq r1,r2
+ sub r3,#640 @ rewind K512
+ bne .Loop_neon
+
+ vldmia sp!,{d8-d15} @ epilogue
+ bx lr @ .word 0xe12fff1e
+#endif
+.size sha512_block_data_order,.-sha512_block_data_order
+.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+.comm OPENSSL_armcap_P,4,4
diff --git a/app/openssl/crypto/sha/asm/sha512-armv4.pl b/app/openssl/crypto/sha/asm/sha512-armv4.pl
index 7faf37b1..71aa9356 100644
--- a/app/openssl/crypto/sha/asm/sha512-armv4.pl
+++ b/app/openssl/crypto/sha/asm/sha512-armv4.pl
@@ -565,7 +565,7 @@ $code.=<<___;
bne .Loop_neon
vldmia sp!,{d8-d15} @ epilogue
- bx lr
+ ret @ bx lr
#endif
___
}
@@ -578,5 +578,6 @@ ___
$code =~ s/\`([^\`]*)\`/eval $1/gem;
$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+$code =~ s/\bret\b/bx lr/gm;
print $code;
close STDOUT; # enforce flush
diff --git a/app/openssl/crypto/sha/asm/sha512-armv4.s b/app/openssl/crypto/sha/asm/sha512-armv4.s
deleted file mode 100644
index 57301922..00000000
--- a/app/openssl/crypto/sha/asm/sha512-armv4.s
+++ /dev/null
@@ -1,1783 +0,0 @@
-#include "arm_arch.h"
-#ifdef __ARMEL__
-# define LO 0
-# define HI 4
-# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
-#else
-# define HI 0
-# define LO 4
-# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
-#endif
-
-.text
-.code 32
-.type K512,%object
-.align 5
-K512:
-WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
-WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
-WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
-WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
-WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
-WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
-WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
-WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
-WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
-WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
-WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
-WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
-WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
-WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
-WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
-WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
-WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
-WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
-WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
-WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
-WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
-WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
-WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
-WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
-WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
-WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
-WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
-WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
-WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
-WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
-WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
-WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
-WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
-WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
-WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
-WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
-WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
-WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
-WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
-WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
-.size K512,.-K512
-.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha512_block_data_order
-.skip 32-4
-
-.global sha512_block_data_order
-.type sha512_block_data_order,%function
-sha512_block_data_order:
- sub r3,pc,#8 @ sha512_block_data_order
- add r2,r1,r2,lsl#7 @ len to point at the end of inp
-#if __ARM_ARCH__>=7
- ldr r12,.LOPENSSL_armcap
- ldr r12,[r3,r12] @ OPENSSL_armcap_P
- tst r12,#1
- bne .LNEON
-#endif
- stmdb sp!,{r4-r12,lr}
- sub r14,r3,#672 @ K512
- sub sp,sp,#9*8
-
- ldr r7,[r0,#32+LO]
- ldr r8,[r0,#32+HI]
- ldr r9, [r0,#48+LO]
- ldr r10, [r0,#48+HI]
- ldr r11, [r0,#56+LO]
- ldr r12, [r0,#56+HI]
-.Loop:
- str r9, [sp,#48+0]
- str r10, [sp,#48+4]
- str r11, [sp,#56+0]
- str r12, [sp,#56+4]
- ldr r5,[r0,#0+LO]
- ldr r6,[r0,#0+HI]
- ldr r3,[r0,#8+LO]
- ldr r4,[r0,#8+HI]
- ldr r9, [r0,#16+LO]
- ldr r10, [r0,#16+HI]
- ldr r11, [r0,#24+LO]
- ldr r12, [r0,#24+HI]
- str r3,[sp,#8+0]
- str r4,[sp,#8+4]
- str r9, [sp,#16+0]
- str r10, [sp,#16+4]
- str r11, [sp,#24+0]
- str r12, [sp,#24+4]
- ldr r3,[r0,#40+LO]
- ldr r4,[r0,#40+HI]
- str r3,[sp,#40+0]
- str r4,[sp,#40+4]
-
-.L00_15:
-#if __ARM_ARCH__<7
- ldrb r3,[r1,#7]
- ldrb r9, [r1,#6]
- ldrb r10, [r1,#5]
- ldrb r11, [r1,#4]
- ldrb r4,[r1,#3]
- ldrb r12, [r1,#2]
- orr r3,r3,r9,lsl#8
- ldrb r9, [r1,#1]
- orr r3,r3,r10,lsl#16
- ldrb r10, [r1],#8
- orr r3,r3,r11,lsl#24
- orr r4,r4,r12,lsl#8
- orr r4,r4,r9,lsl#16
- orr r4,r4,r10,lsl#24
-#else
- ldr r3,[r1,#4]
- ldr r4,[r1],#8
-#ifdef __ARMEL__
- rev r3,r3
- rev r4,r4
-#endif
-#endif
- @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
- @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
- @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
- mov r9,r7,lsr#14
- str r3,[sp,#64+0]
- mov r10,r8,lsr#14
- str r4,[sp,#64+4]
- eor r9,r9,r8,lsl#18
- ldr r11,[sp,#56+0] @ h.lo
- eor r10,r10,r7,lsl#18
- ldr r12,[sp,#56+4] @ h.hi
- eor r9,r9,r7,lsr#18
- eor r10,r10,r8,lsr#18
- eor r9,r9,r8,lsl#14
- eor r10,r10,r7,lsl#14
- eor r9,r9,r8,lsr#9
- eor r10,r10,r7,lsr#9
- eor r9,r9,r7,lsl#23
- eor r10,r10,r8,lsl#23 @ Sigma1(e)
- adds r3,r3,r9
- ldr r9,[sp,#40+0] @ f.lo
- adc r4,r4,r10 @ T += Sigma1(e)
- ldr r10,[sp,#40+4] @ f.hi
- adds r3,r3,r11
- ldr r11,[sp,#48+0] @ g.lo
- adc r4,r4,r12 @ T += h
- ldr r12,[sp,#48+4] @ g.hi
-
- eor r9,r9,r11
- str r7,[sp,#32+0]
- eor r10,r10,r12
- str r8,[sp,#32+4]
- and r9,r9,r7
- str r5,[sp,#0+0]
- and r10,r10,r8
- str r6,[sp,#0+4]
- eor r9,r9,r11
- ldr r11,[r14,#LO] @ K[i].lo
- eor r10,r10,r12 @ Ch(e,f,g)
- ldr r12,[r14,#HI] @ K[i].hi
-
- adds r3,r3,r9
- ldr r7,[sp,#24+0] @ d.lo
- adc r4,r4,r10 @ T += Ch(e,f,g)
- ldr r8,[sp,#24+4] @ d.hi
- adds r3,r3,r11
- and r9,r11,#0xff
- adc r4,r4,r12 @ T += K[i]
- adds r7,r7,r3
- ldr r11,[sp,#8+0] @ b.lo
- adc r8,r8,r4 @ d += T
- teq r9,#148
-
- ldr r12,[sp,#16+0] @ c.lo
- orreq r14,r14,#1
- @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
- @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
- @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
- mov r9,r5,lsr#28
- mov r10,r6,lsr#28
- eor r9,r9,r6,lsl#4
- eor r10,r10,r5,lsl#4
- eor r9,r9,r6,lsr#2
- eor r10,r10,r5,lsr#2
- eor r9,r9,r5,lsl#30
- eor r10,r10,r6,lsl#30
- eor r9,r9,r6,lsr#7
- eor r10,r10,r5,lsr#7
- eor r9,r9,r5,lsl#25
- eor r10,r10,r6,lsl#25 @ Sigma0(a)
- adds r3,r3,r9
- and r9,r5,r11
- adc r4,r4,r10 @ T += Sigma0(a)
-
- ldr r10,[sp,#8+4] @ b.hi
- orr r5,r5,r11
- ldr r11,[sp,#16+4] @ c.hi
- and r5,r5,r12
- and r12,r6,r10
- orr r6,r6,r10
- orr r5,r5,r9 @ Maj(a,b,c).lo
- and r6,r6,r11
- adds r5,r5,r3
- orr r6,r6,r12 @ Maj(a,b,c).hi
- sub sp,sp,#8
- adc r6,r6,r4 @ h += T
- tst r14,#1
- add r14,r14,#8
- tst r14,#1
- beq .L00_15
- ldr r9,[sp,#184+0]
- ldr r10,[sp,#184+4]
- bic r14,r14,#1
-.L16_79:
- @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
- @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
- @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
- mov r3,r9,lsr#1
- ldr r11,[sp,#80+0]
- mov r4,r10,lsr#1
- ldr r12,[sp,#80+4]
- eor r3,r3,r10,lsl#31
- eor r4,r4,r9,lsl#31
- eor r3,r3,r9,lsr#8
- eor r4,r4,r10,lsr#8
- eor r3,r3,r10,lsl#24
- eor r4,r4,r9,lsl#24
- eor r3,r3,r9,lsr#7
- eor r4,r4,r10,lsr#7
- eor r3,r3,r10,lsl#25
-
- @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
- @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
- @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
- mov r9,r11,lsr#19
- mov r10,r12,lsr#19
- eor r9,r9,r12,lsl#13
- eor r10,r10,r11,lsl#13
- eor r9,r9,r12,lsr#29
- eor r10,r10,r11,lsr#29
- eor r9,r9,r11,lsl#3
- eor r10,r10,r12,lsl#3
- eor r9,r9,r11,lsr#6
- eor r10,r10,r12,lsr#6
- ldr r11,[sp,#120+0]
- eor r9,r9,r12,lsl#26
-
- ldr r12,[sp,#120+4]
- adds r3,r3,r9
- ldr r9,[sp,#192+0]
- adc r4,r4,r10
-
- ldr r10,[sp,#192+4]
- adds r3,r3,r11
- adc r4,r4,r12
- adds r3,r3,r9
- adc r4,r4,r10
- @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
- @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
- @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
- mov r9,r7,lsr#14
- str r3,[sp,#64+0]
- mov r10,r8,lsr#14
- str r4,[sp,#64+4]
- eor r9,r9,r8,lsl#18
- ldr r11,[sp,#56+0] @ h.lo
- eor r10,r10,r7,lsl#18
- ldr r12,[sp,#56+4] @ h.hi
- eor r9,r9,r7,lsr#18
- eor r10,r10,r8,lsr#18
- eor r9,r9,r8,lsl#14
- eor r10,r10,r7,lsl#14
- eor r9,r9,r8,lsr#9
- eor r10,r10,r7,lsr#9
- eor r9,r9,r7,lsl#23
- eor r10,r10,r8,lsl#23 @ Sigma1(e)
- adds r3,r3,r9
- ldr r9,[sp,#40+0] @ f.lo
- adc r4,r4,r10 @ T += Sigma1(e)
- ldr r10,[sp,#40+4] @ f.hi
- adds r3,r3,r11
- ldr r11,[sp,#48+0] @ g.lo
- adc r4,r4,r12 @ T += h
- ldr r12,[sp,#48+4] @ g.hi
-
- eor r9,r9,r11
- str r7,[sp,#32+0]
- eor r10,r10,r12
- str r8,[sp,#32+4]
- and r9,r9,r7
- str r5,[sp,#0+0]
- and r10,r10,r8
- str r6,[sp,#0+4]
- eor r9,r9,r11
- ldr r11,[r14,#LO] @ K[i].lo
- eor r10,r10,r12 @ Ch(e,f,g)
- ldr r12,[r14,#HI] @ K[i].hi
-
- adds r3,r3,r9
- ldr r7,[sp,#24+0] @ d.lo
- adc r4,r4,r10 @ T += Ch(e,f,g)
- ldr r8,[sp,#24+4] @ d.hi
- adds r3,r3,r11
- and r9,r11,#0xff
- adc r4,r4,r12 @ T += K[i]
- adds r7,r7,r3
- ldr r11,[sp,#8+0] @ b.lo
- adc r8,r8,r4 @ d += T
- teq r9,#23
-
- ldr r12,[sp,#16+0] @ c.lo
- orreq r14,r14,#1
- @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
- @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
- @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
- mov r9,r5,lsr#28
- mov r10,r6,lsr#28
- eor r9,r9,r6,lsl#4
- eor r10,r10,r5,lsl#4
- eor r9,r9,r6,lsr#2
- eor r10,r10,r5,lsr#2
- eor r9,r9,r5,lsl#30
- eor r10,r10,r6,lsl#30
- eor r9,r9,r6,lsr#7
- eor r10,r10,r5,lsr#7
- eor r9,r9,r5,lsl#25
- eor r10,r10,r6,lsl#25 @ Sigma0(a)
- adds r3,r3,r9
- and r9,r5,r11
- adc r4,r4,r10 @ T += Sigma0(a)
-
- ldr r10,[sp,#8+4] @ b.hi
- orr r5,r5,r11
- ldr r11,[sp,#16+4] @ c.hi
- and r5,r5,r12
- and r12,r6,r10
- orr r6,r6,r10
- orr r5,r5,r9 @ Maj(a,b,c).lo
- and r6,r6,r11
- adds r5,r5,r3
- orr r6,r6,r12 @ Maj(a,b,c).hi
- sub sp,sp,#8
- adc r6,r6,r4 @ h += T
- tst r14,#1
- add r14,r14,#8
- ldreq r9,[sp,#184+0]
- ldreq r10,[sp,#184+4]
- beq .L16_79
- bic r14,r14,#1
-
- ldr r3,[sp,#8+0]
- ldr r4,[sp,#8+4]
- ldr r9, [r0,#0+LO]
- ldr r10, [r0,#0+HI]
- ldr r11, [r0,#8+LO]
- ldr r12, [r0,#8+HI]
- adds r9,r5,r9
- str r9, [r0,#0+LO]
- adc r10,r6,r10
- str r10, [r0,#0+HI]
- adds r11,r3,r11
- str r11, [r0,#8+LO]
- adc r12,r4,r12
- str r12, [r0,#8+HI]
-
- ldr r5,[sp,#16+0]
- ldr r6,[sp,#16+4]
- ldr r3,[sp,#24+0]
- ldr r4,[sp,#24+4]
- ldr r9, [r0,#16+LO]
- ldr r10, [r0,#16+HI]
- ldr r11, [r0,#24+LO]
- ldr r12, [r0,#24+HI]
- adds r9,r5,r9
- str r9, [r0,#16+LO]
- adc r10,r6,r10
- str r10, [r0,#16+HI]
- adds r11,r3,r11
- str r11, [r0,#24+LO]
- adc r12,r4,r12
- str r12, [r0,#24+HI]
-
- ldr r3,[sp,#40+0]
- ldr r4,[sp,#40+4]
- ldr r9, [r0,#32+LO]
- ldr r10, [r0,#32+HI]
- ldr r11, [r0,#40+LO]
- ldr r12, [r0,#40+HI]
- adds r7,r7,r9
- str r7,[r0,#32+LO]
- adc r8,r8,r10
- str r8,[r0,#32+HI]
- adds r11,r3,r11
- str r11, [r0,#40+LO]
- adc r12,r4,r12
- str r12, [r0,#40+HI]
-
- ldr r5,[sp,#48+0]
- ldr r6,[sp,#48+4]
- ldr r3,[sp,#56+0]
- ldr r4,[sp,#56+4]
- ldr r9, [r0,#48+LO]
- ldr r10, [r0,#48+HI]
- ldr r11, [r0,#56+LO]
- ldr r12, [r0,#56+HI]
- adds r9,r5,r9
- str r9, [r0,#48+LO]
- adc r10,r6,r10
- str r10, [r0,#48+HI]
- adds r11,r3,r11
- str r11, [r0,#56+LO]
- adc r12,r4,r12
- str r12, [r0,#56+HI]
-
- add sp,sp,#640
- sub r14,r14,#640
-
- teq r1,r2
- bne .Loop
-
- add sp,sp,#8*9 @ destroy frame
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-#if __ARM_ARCH__>=7
-.fpu neon
-
-.align 4
-.LNEON:
- dmb @ errata #451034 on early Cortex A8
- vstmdb sp!,{d8-d15} @ ABI specification says so
- sub r3,r3,#672 @ K512
- vldmia r0,{d16-d23} @ load context
-.Loop_neon:
- vshr.u64 d24,d20,#14 @ 0
-#if 0<16
- vld1.64 {d0},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d20,#18
- vshr.u64 d26,d20,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vsli.64 d26,d20,#23
-#if 0<16 && defined(__ARMEL__)
- vrev64.8 d0,d0
-#endif
- vadd.i64 d27,d28,d23
- veor d29,d21,d22
- veor d24,d25
- vand d29,d20
- veor d24,d26 @ Sigma1(e)
- veor d29,d22 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d16,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d16,#34
- vshr.u64 d26,d16,#39
- vsli.64 d24,d16,#36
- vsli.64 d25,d16,#30
- vsli.64 d26,d16,#25
- vadd.i64 d27,d0
- vorr d30,d16,d18
- vand d29,d16,d18
- veor d23,d24,d25
- vand d30,d17
- veor d23,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d23,d27
- vadd.i64 d19,d27
- vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 1
-#if 1<16
- vld1.64 {d1},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vsli.64 d26,d19,#23
-#if 1<16 && defined(__ARMEL__)
- vrev64.8 d1,d1
-#endif
- vadd.i64 d27,d28,d22
- veor d29,d20,d21
- veor d24,d25
- vand d29,d19
- veor d24,d26 @ Sigma1(e)
- veor d29,d21 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d23,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d23,#34
- vshr.u64 d26,d23,#39
- vsli.64 d24,d23,#36
- vsli.64 d25,d23,#30
- vsli.64 d26,d23,#25
- vadd.i64 d27,d1
- vorr d30,d23,d17
- vand d29,d23,d17
- veor d22,d24,d25
- vand d30,d16
- veor d22,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d22,d27
- vadd.i64 d18,d27
- vadd.i64 d22,d30
- vshr.u64 d24,d18,#14 @ 2
-#if 2<16
- vld1.64 {d2},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d18,#18
- vshr.u64 d26,d18,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vsli.64 d26,d18,#23
-#if 2<16 && defined(__ARMEL__)
- vrev64.8 d2,d2
-#endif
- vadd.i64 d27,d28,d21
- veor d29,d19,d20
- veor d24,d25
- vand d29,d18
- veor d24,d26 @ Sigma1(e)
- veor d29,d20 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d22,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d22,#34
- vshr.u64 d26,d22,#39
- vsli.64 d24,d22,#36
- vsli.64 d25,d22,#30
- vsli.64 d26,d22,#25
- vadd.i64 d27,d2
- vorr d30,d22,d16
- vand d29,d22,d16
- veor d21,d24,d25
- vand d30,d23
- veor d21,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d21,d27
- vadd.i64 d17,d27
- vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 3
-#if 3<16
- vld1.64 {d3},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vsli.64 d26,d17,#23
-#if 3<16 && defined(__ARMEL__)
- vrev64.8 d3,d3
-#endif
- vadd.i64 d27,d28,d20
- veor d29,d18,d19
- veor d24,d25
- vand d29,d17
- veor d24,d26 @ Sigma1(e)
- veor d29,d19 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d21,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d21,#34
- vshr.u64 d26,d21,#39
- vsli.64 d24,d21,#36
- vsli.64 d25,d21,#30
- vsli.64 d26,d21,#25
- vadd.i64 d27,d3
- vorr d30,d21,d23
- vand d29,d21,d23
- veor d20,d24,d25
- vand d30,d22
- veor d20,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d20,d27
- vadd.i64 d16,d27
- vadd.i64 d20,d30
- vshr.u64 d24,d16,#14 @ 4
-#if 4<16
- vld1.64 {d4},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d16,#18
- vshr.u64 d26,d16,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vsli.64 d26,d16,#23
-#if 4<16 && defined(__ARMEL__)
- vrev64.8 d4,d4
-#endif
- vadd.i64 d27,d28,d19
- veor d29,d17,d18
- veor d24,d25
- vand d29,d16
- veor d24,d26 @ Sigma1(e)
- veor d29,d18 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d20,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d20,#34
- vshr.u64 d26,d20,#39
- vsli.64 d24,d20,#36
- vsli.64 d25,d20,#30
- vsli.64 d26,d20,#25
- vadd.i64 d27,d4
- vorr d30,d20,d22
- vand d29,d20,d22
- veor d19,d24,d25
- vand d30,d21
- veor d19,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d19,d27
- vadd.i64 d23,d27
- vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 5
-#if 5<16
- vld1.64 {d5},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vsli.64 d26,d23,#23
-#if 5<16 && defined(__ARMEL__)
- vrev64.8 d5,d5
-#endif
- vadd.i64 d27,d28,d18
- veor d29,d16,d17
- veor d24,d25
- vand d29,d23
- veor d24,d26 @ Sigma1(e)
- veor d29,d17 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d19,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d19,#34
- vshr.u64 d26,d19,#39
- vsli.64 d24,d19,#36
- vsli.64 d25,d19,#30
- vsli.64 d26,d19,#25
- vadd.i64 d27,d5
- vorr d30,d19,d21
- vand d29,d19,d21
- veor d18,d24,d25
- vand d30,d20
- veor d18,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d18,d27
- vadd.i64 d22,d27
- vadd.i64 d18,d30
- vshr.u64 d24,d22,#14 @ 6
-#if 6<16
- vld1.64 {d6},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d22,#18
- vshr.u64 d26,d22,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vsli.64 d26,d22,#23
-#if 6<16 && defined(__ARMEL__)
- vrev64.8 d6,d6
-#endif
- vadd.i64 d27,d28,d17
- veor d29,d23,d16
- veor d24,d25
- vand d29,d22
- veor d24,d26 @ Sigma1(e)
- veor d29,d16 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d18,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d18,#34
- vshr.u64 d26,d18,#39
- vsli.64 d24,d18,#36
- vsli.64 d25,d18,#30
- vsli.64 d26,d18,#25
- vadd.i64 d27,d6
- vorr d30,d18,d20
- vand d29,d18,d20
- veor d17,d24,d25
- vand d30,d19
- veor d17,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d17,d27
- vadd.i64 d21,d27
- vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 7
-#if 7<16
- vld1.64 {d7},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vsli.64 d26,d21,#23
-#if 7<16 && defined(__ARMEL__)
- vrev64.8 d7,d7
-#endif
- vadd.i64 d27,d28,d16
- veor d29,d22,d23
- veor d24,d25
- vand d29,d21
- veor d24,d26 @ Sigma1(e)
- veor d29,d23 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d17,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d17,#34
- vshr.u64 d26,d17,#39
- vsli.64 d24,d17,#36
- vsli.64 d25,d17,#30
- vsli.64 d26,d17,#25
- vadd.i64 d27,d7
- vorr d30,d17,d19
- vand d29,d17,d19
- veor d16,d24,d25
- vand d30,d18
- veor d16,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d16,d27
- vadd.i64 d20,d27
- vadd.i64 d16,d30
- vshr.u64 d24,d20,#14 @ 8
-#if 8<16
- vld1.64 {d8},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d20,#18
- vshr.u64 d26,d20,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vsli.64 d26,d20,#23
-#if 8<16 && defined(__ARMEL__)
- vrev64.8 d8,d8
-#endif
- vadd.i64 d27,d28,d23
- veor d29,d21,d22
- veor d24,d25
- vand d29,d20
- veor d24,d26 @ Sigma1(e)
- veor d29,d22 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d16,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d16,#34
- vshr.u64 d26,d16,#39
- vsli.64 d24,d16,#36
- vsli.64 d25,d16,#30
- vsli.64 d26,d16,#25
- vadd.i64 d27,d8
- vorr d30,d16,d18
- vand d29,d16,d18
- veor d23,d24,d25
- vand d30,d17
- veor d23,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d23,d27
- vadd.i64 d19,d27
- vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 9
-#if 9<16
- vld1.64 {d9},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vsli.64 d26,d19,#23
-#if 9<16 && defined(__ARMEL__)
- vrev64.8 d9,d9
-#endif
- vadd.i64 d27,d28,d22
- veor d29,d20,d21
- veor d24,d25
- vand d29,d19
- veor d24,d26 @ Sigma1(e)
- veor d29,d21 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d23,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d23,#34
- vshr.u64 d26,d23,#39
- vsli.64 d24,d23,#36
- vsli.64 d25,d23,#30
- vsli.64 d26,d23,#25
- vadd.i64 d27,d9
- vorr d30,d23,d17
- vand d29,d23,d17
- veor d22,d24,d25
- vand d30,d16
- veor d22,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d22,d27
- vadd.i64 d18,d27
- vadd.i64 d22,d30
- vshr.u64 d24,d18,#14 @ 10
-#if 10<16
- vld1.64 {d10},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d18,#18
- vshr.u64 d26,d18,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vsli.64 d26,d18,#23
-#if 10<16 && defined(__ARMEL__)
- vrev64.8 d10,d10
-#endif
- vadd.i64 d27,d28,d21
- veor d29,d19,d20
- veor d24,d25
- vand d29,d18
- veor d24,d26 @ Sigma1(e)
- veor d29,d20 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d22,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d22,#34
- vshr.u64 d26,d22,#39
- vsli.64 d24,d22,#36
- vsli.64 d25,d22,#30
- vsli.64 d26,d22,#25
- vadd.i64 d27,d10
- vorr d30,d22,d16
- vand d29,d22,d16
- veor d21,d24,d25
- vand d30,d23
- veor d21,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d21,d27
- vadd.i64 d17,d27
- vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 11
-#if 11<16
- vld1.64 {d11},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vsli.64 d26,d17,#23
-#if 11<16 && defined(__ARMEL__)
- vrev64.8 d11,d11
-#endif
- vadd.i64 d27,d28,d20
- veor d29,d18,d19
- veor d24,d25
- vand d29,d17
- veor d24,d26 @ Sigma1(e)
- veor d29,d19 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d21,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d21,#34
- vshr.u64 d26,d21,#39
- vsli.64 d24,d21,#36
- vsli.64 d25,d21,#30
- vsli.64 d26,d21,#25
- vadd.i64 d27,d11
- vorr d30,d21,d23
- vand d29,d21,d23
- veor d20,d24,d25
- vand d30,d22
- veor d20,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d20,d27
- vadd.i64 d16,d27
- vadd.i64 d20,d30
- vshr.u64 d24,d16,#14 @ 12
-#if 12<16
- vld1.64 {d12},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d16,#18
- vshr.u64 d26,d16,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vsli.64 d26,d16,#23
-#if 12<16 && defined(__ARMEL__)
- vrev64.8 d12,d12
-#endif
- vadd.i64 d27,d28,d19
- veor d29,d17,d18
- veor d24,d25
- vand d29,d16
- veor d24,d26 @ Sigma1(e)
- veor d29,d18 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d20,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d20,#34
- vshr.u64 d26,d20,#39
- vsli.64 d24,d20,#36
- vsli.64 d25,d20,#30
- vsli.64 d26,d20,#25
- vadd.i64 d27,d12
- vorr d30,d20,d22
- vand d29,d20,d22
- veor d19,d24,d25
- vand d30,d21
- veor d19,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d19,d27
- vadd.i64 d23,d27
- vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 13
-#if 13<16
- vld1.64 {d13},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vsli.64 d26,d23,#23
-#if 13<16 && defined(__ARMEL__)
- vrev64.8 d13,d13
-#endif
- vadd.i64 d27,d28,d18
- veor d29,d16,d17
- veor d24,d25
- vand d29,d23
- veor d24,d26 @ Sigma1(e)
- veor d29,d17 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d19,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d19,#34
- vshr.u64 d26,d19,#39
- vsli.64 d24,d19,#36
- vsli.64 d25,d19,#30
- vsli.64 d26,d19,#25
- vadd.i64 d27,d13
- vorr d30,d19,d21
- vand d29,d19,d21
- veor d18,d24,d25
- vand d30,d20
- veor d18,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d18,d27
- vadd.i64 d22,d27
- vadd.i64 d18,d30
- vshr.u64 d24,d22,#14 @ 14
-#if 14<16
- vld1.64 {d14},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d22,#18
- vshr.u64 d26,d22,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vsli.64 d26,d22,#23
-#if 14<16 && defined(__ARMEL__)
- vrev64.8 d14,d14
-#endif
- vadd.i64 d27,d28,d17
- veor d29,d23,d16
- veor d24,d25
- vand d29,d22
- veor d24,d26 @ Sigma1(e)
- veor d29,d16 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d18,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d18,#34
- vshr.u64 d26,d18,#39
- vsli.64 d24,d18,#36
- vsli.64 d25,d18,#30
- vsli.64 d26,d18,#25
- vadd.i64 d27,d14
- vorr d30,d18,d20
- vand d29,d18,d20
- veor d17,d24,d25
- vand d30,d19
- veor d17,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d17,d27
- vadd.i64 d21,d27
- vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 15
-#if 15<16
- vld1.64 {d15},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vsli.64 d26,d21,#23
-#if 15<16 && defined(__ARMEL__)
- vrev64.8 d15,d15
-#endif
- vadd.i64 d27,d28,d16
- veor d29,d22,d23
- veor d24,d25
- vand d29,d21
- veor d24,d26 @ Sigma1(e)
- veor d29,d23 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d17,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d17,#34
- vshr.u64 d26,d17,#39
- vsli.64 d24,d17,#36
- vsli.64 d25,d17,#30
- vsli.64 d26,d17,#25
- vadd.i64 d27,d15
- vorr d30,d17,d19
- vand d29,d17,d19
- veor d16,d24,d25
- vand d30,d18
- veor d16,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d16,d27
- vadd.i64 d20,d27
- vadd.i64 d16,d30
- mov r12,#4
-.L16_79_neon:
- subs r12,#1
- vshr.u64 q12,q7,#19
- vshr.u64 q13,q7,#61
- vshr.u64 q15,q7,#6
- vsli.64 q12,q7,#45
- vext.8 q14,q0,q1,#8 @ X[i+1]
- vsli.64 q13,q7,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q0,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q4,q5,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d20,#14 @ from NEON_00_15
- vadd.i64 q0,q14
- vshr.u64 d25,d20,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d20,#41 @ from NEON_00_15
- vadd.i64 q0,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vsli.64 d26,d20,#23
-#if 16<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d23
- veor d29,d21,d22
- veor d24,d25
- vand d29,d20
- veor d24,d26 @ Sigma1(e)
- veor d29,d22 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d16,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d16,#34
- vshr.u64 d26,d16,#39
- vsli.64 d24,d16,#36
- vsli.64 d25,d16,#30
- vsli.64 d26,d16,#25
- vadd.i64 d27,d0
- vorr d30,d16,d18
- vand d29,d16,d18
- veor d23,d24,d25
- vand d30,d17
- veor d23,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d23,d27
- vadd.i64 d19,d27
- vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 17
-#if 17<16
- vld1.64 {d1},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vsli.64 d26,d19,#23
-#if 17<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d22
- veor d29,d20,d21
- veor d24,d25
- vand d29,d19
- veor d24,d26 @ Sigma1(e)
- veor d29,d21 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d23,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d23,#34
- vshr.u64 d26,d23,#39
- vsli.64 d24,d23,#36
- vsli.64 d25,d23,#30
- vsli.64 d26,d23,#25
- vadd.i64 d27,d1
- vorr d30,d23,d17
- vand d29,d23,d17
- veor d22,d24,d25
- vand d30,d16
- veor d22,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d22,d27
- vadd.i64 d18,d27
- vadd.i64 d22,d30
- vshr.u64 q12,q0,#19
- vshr.u64 q13,q0,#61
- vshr.u64 q15,q0,#6
- vsli.64 q12,q0,#45
- vext.8 q14,q1,q2,#8 @ X[i+1]
- vsli.64 q13,q0,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q1,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q5,q6,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d18,#14 @ from NEON_00_15
- vadd.i64 q1,q14
- vshr.u64 d25,d18,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d18,#41 @ from NEON_00_15
- vadd.i64 q1,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vsli.64 d26,d18,#23
-#if 18<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d21
- veor d29,d19,d20
- veor d24,d25
- vand d29,d18
- veor d24,d26 @ Sigma1(e)
- veor d29,d20 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d22,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d22,#34
- vshr.u64 d26,d22,#39
- vsli.64 d24,d22,#36
- vsli.64 d25,d22,#30
- vsli.64 d26,d22,#25
- vadd.i64 d27,d2
- vorr d30,d22,d16
- vand d29,d22,d16
- veor d21,d24,d25
- vand d30,d23
- veor d21,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d21,d27
- vadd.i64 d17,d27
- vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 19
-#if 19<16
- vld1.64 {d3},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vsli.64 d26,d17,#23
-#if 19<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d20
- veor d29,d18,d19
- veor d24,d25
- vand d29,d17
- veor d24,d26 @ Sigma1(e)
- veor d29,d19 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d21,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d21,#34
- vshr.u64 d26,d21,#39
- vsli.64 d24,d21,#36
- vsli.64 d25,d21,#30
- vsli.64 d26,d21,#25
- vadd.i64 d27,d3
- vorr d30,d21,d23
- vand d29,d21,d23
- veor d20,d24,d25
- vand d30,d22
- veor d20,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d20,d27
- vadd.i64 d16,d27
- vadd.i64 d20,d30
- vshr.u64 q12,q1,#19
- vshr.u64 q13,q1,#61
- vshr.u64 q15,q1,#6
- vsli.64 q12,q1,#45
- vext.8 q14,q2,q3,#8 @ X[i+1]
- vsli.64 q13,q1,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q2,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q6,q7,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d16,#14 @ from NEON_00_15
- vadd.i64 q2,q14
- vshr.u64 d25,d16,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d16,#41 @ from NEON_00_15
- vadd.i64 q2,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vsli.64 d26,d16,#23
-#if 20<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d19
- veor d29,d17,d18
- veor d24,d25
- vand d29,d16
- veor d24,d26 @ Sigma1(e)
- veor d29,d18 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d20,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d20,#34
- vshr.u64 d26,d20,#39
- vsli.64 d24,d20,#36
- vsli.64 d25,d20,#30
- vsli.64 d26,d20,#25
- vadd.i64 d27,d4
- vorr d30,d20,d22
- vand d29,d20,d22
- veor d19,d24,d25
- vand d30,d21
- veor d19,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d19,d27
- vadd.i64 d23,d27
- vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 21
-#if 21<16
- vld1.64 {d5},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vsli.64 d26,d23,#23
-#if 21<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d18
- veor d29,d16,d17
- veor d24,d25
- vand d29,d23
- veor d24,d26 @ Sigma1(e)
- veor d29,d17 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d19,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d19,#34
- vshr.u64 d26,d19,#39
- vsli.64 d24,d19,#36
- vsli.64 d25,d19,#30
- vsli.64 d26,d19,#25
- vadd.i64 d27,d5
- vorr d30,d19,d21
- vand d29,d19,d21
- veor d18,d24,d25
- vand d30,d20
- veor d18,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d18,d27
- vadd.i64 d22,d27
- vadd.i64 d18,d30
- vshr.u64 q12,q2,#19
- vshr.u64 q13,q2,#61
- vshr.u64 q15,q2,#6
- vsli.64 q12,q2,#45
- vext.8 q14,q3,q4,#8 @ X[i+1]
- vsli.64 q13,q2,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q3,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q7,q0,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d22,#14 @ from NEON_00_15
- vadd.i64 q3,q14
- vshr.u64 d25,d22,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d22,#41 @ from NEON_00_15
- vadd.i64 q3,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vsli.64 d26,d22,#23
-#if 22<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d17
- veor d29,d23,d16
- veor d24,d25
- vand d29,d22
- veor d24,d26 @ Sigma1(e)
- veor d29,d16 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d18,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d18,#34
- vshr.u64 d26,d18,#39
- vsli.64 d24,d18,#36
- vsli.64 d25,d18,#30
- vsli.64 d26,d18,#25
- vadd.i64 d27,d6
- vorr d30,d18,d20
- vand d29,d18,d20
- veor d17,d24,d25
- vand d30,d19
- veor d17,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d17,d27
- vadd.i64 d21,d27
- vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 23
-#if 23<16
- vld1.64 {d7},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vsli.64 d26,d21,#23
-#if 23<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d16
- veor d29,d22,d23
- veor d24,d25
- vand d29,d21
- veor d24,d26 @ Sigma1(e)
- veor d29,d23 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d17,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d17,#34
- vshr.u64 d26,d17,#39
- vsli.64 d24,d17,#36
- vsli.64 d25,d17,#30
- vsli.64 d26,d17,#25
- vadd.i64 d27,d7
- vorr d30,d17,d19
- vand d29,d17,d19
- veor d16,d24,d25
- vand d30,d18
- veor d16,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d16,d27
- vadd.i64 d20,d27
- vadd.i64 d16,d30
- vshr.u64 q12,q3,#19
- vshr.u64 q13,q3,#61
- vshr.u64 q15,q3,#6
- vsli.64 q12,q3,#45
- vext.8 q14,q4,q5,#8 @ X[i+1]
- vsli.64 q13,q3,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q4,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q0,q1,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d20,#14 @ from NEON_00_15
- vadd.i64 q4,q14
- vshr.u64 d25,d20,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d20,#41 @ from NEON_00_15
- vadd.i64 q4,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vsli.64 d26,d20,#23
-#if 24<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d23
- veor d29,d21,d22
- veor d24,d25
- vand d29,d20
- veor d24,d26 @ Sigma1(e)
- veor d29,d22 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d16,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d16,#34
- vshr.u64 d26,d16,#39
- vsli.64 d24,d16,#36
- vsli.64 d25,d16,#30
- vsli.64 d26,d16,#25
- vadd.i64 d27,d8
- vorr d30,d16,d18
- vand d29,d16,d18
- veor d23,d24,d25
- vand d30,d17
- veor d23,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d23,d27
- vadd.i64 d19,d27
- vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 25
-#if 25<16
- vld1.64 {d9},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vsli.64 d26,d19,#23
-#if 25<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d22
- veor d29,d20,d21
- veor d24,d25
- vand d29,d19
- veor d24,d26 @ Sigma1(e)
- veor d29,d21 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d23,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d23,#34
- vshr.u64 d26,d23,#39
- vsli.64 d24,d23,#36
- vsli.64 d25,d23,#30
- vsli.64 d26,d23,#25
- vadd.i64 d27,d9
- vorr d30,d23,d17
- vand d29,d23,d17
- veor d22,d24,d25
- vand d30,d16
- veor d22,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d22,d27
- vadd.i64 d18,d27
- vadd.i64 d22,d30
- vshr.u64 q12,q4,#19
- vshr.u64 q13,q4,#61
- vshr.u64 q15,q4,#6
- vsli.64 q12,q4,#45
- vext.8 q14,q5,q6,#8 @ X[i+1]
- vsli.64 q13,q4,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q5,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q1,q2,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d18,#14 @ from NEON_00_15
- vadd.i64 q5,q14
- vshr.u64 d25,d18,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d18,#41 @ from NEON_00_15
- vadd.i64 q5,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vsli.64 d26,d18,#23
-#if 26<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d21
- veor d29,d19,d20
- veor d24,d25
- vand d29,d18
- veor d24,d26 @ Sigma1(e)
- veor d29,d20 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d22,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d22,#34
- vshr.u64 d26,d22,#39
- vsli.64 d24,d22,#36
- vsli.64 d25,d22,#30
- vsli.64 d26,d22,#25
- vadd.i64 d27,d10
- vorr d30,d22,d16
- vand d29,d22,d16
- veor d21,d24,d25
- vand d30,d23
- veor d21,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d21,d27
- vadd.i64 d17,d27
- vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 27
-#if 27<16
- vld1.64 {d11},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vsli.64 d26,d17,#23
-#if 27<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d20
- veor d29,d18,d19
- veor d24,d25
- vand d29,d17
- veor d24,d26 @ Sigma1(e)
- veor d29,d19 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d21,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d21,#34
- vshr.u64 d26,d21,#39
- vsli.64 d24,d21,#36
- vsli.64 d25,d21,#30
- vsli.64 d26,d21,#25
- vadd.i64 d27,d11
- vorr d30,d21,d23
- vand d29,d21,d23
- veor d20,d24,d25
- vand d30,d22
- veor d20,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d20,d27
- vadd.i64 d16,d27
- vadd.i64 d20,d30
- vshr.u64 q12,q5,#19
- vshr.u64 q13,q5,#61
- vshr.u64 q15,q5,#6
- vsli.64 q12,q5,#45
- vext.8 q14,q6,q7,#8 @ X[i+1]
- vsli.64 q13,q5,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q6,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q2,q3,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d16,#14 @ from NEON_00_15
- vadd.i64 q6,q14
- vshr.u64 d25,d16,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d16,#41 @ from NEON_00_15
- vadd.i64 q6,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vsli.64 d26,d16,#23
-#if 28<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d19
- veor d29,d17,d18
- veor d24,d25
- vand d29,d16
- veor d24,d26 @ Sigma1(e)
- veor d29,d18 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d20,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d20,#34
- vshr.u64 d26,d20,#39
- vsli.64 d24,d20,#36
- vsli.64 d25,d20,#30
- vsli.64 d26,d20,#25
- vadd.i64 d27,d12
- vorr d30,d20,d22
- vand d29,d20,d22
- veor d19,d24,d25
- vand d30,d21
- veor d19,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d19,d27
- vadd.i64 d23,d27
- vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 29
-#if 29<16
- vld1.64 {d13},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vsli.64 d26,d23,#23
-#if 29<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d18
- veor d29,d16,d17
- veor d24,d25
- vand d29,d23
- veor d24,d26 @ Sigma1(e)
- veor d29,d17 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d19,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d19,#34
- vshr.u64 d26,d19,#39
- vsli.64 d24,d19,#36
- vsli.64 d25,d19,#30
- vsli.64 d26,d19,#25
- vadd.i64 d27,d13
- vorr d30,d19,d21
- vand d29,d19,d21
- veor d18,d24,d25
- vand d30,d20
- veor d18,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d18,d27
- vadd.i64 d22,d27
- vadd.i64 d18,d30
- vshr.u64 q12,q6,#19
- vshr.u64 q13,q6,#61
- vshr.u64 q15,q6,#6
- vsli.64 q12,q6,#45
- vext.8 q14,q7,q0,#8 @ X[i+1]
- vsli.64 q13,q6,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q7,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q3,q4,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d22,#14 @ from NEON_00_15
- vadd.i64 q7,q14
- vshr.u64 d25,d22,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d22,#41 @ from NEON_00_15
- vadd.i64 q7,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vsli.64 d26,d22,#23
-#if 30<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d17
- veor d29,d23,d16
- veor d24,d25
- vand d29,d22
- veor d24,d26 @ Sigma1(e)
- veor d29,d16 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d18,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d18,#34
- vshr.u64 d26,d18,#39
- vsli.64 d24,d18,#36
- vsli.64 d25,d18,#30
- vsli.64 d26,d18,#25
- vadd.i64 d27,d14
- vorr d30,d18,d20
- vand d29,d18,d20
- veor d17,d24,d25
- vand d30,d19
- veor d17,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d17,d27
- vadd.i64 d21,d27
- vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 31
-#if 31<16
- vld1.64 {d15},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vsli.64 d26,d21,#23
-#if 31<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- vadd.i64 d27,d28,d16
- veor d29,d22,d23
- veor d24,d25
- vand d29,d21
- veor d24,d26 @ Sigma1(e)
- veor d29,d23 @ Ch(e,f,g)
- vadd.i64 d27,d24
- vshr.u64 d24,d17,#28
- vadd.i64 d27,d29
- vshr.u64 d25,d17,#34
- vshr.u64 d26,d17,#39
- vsli.64 d24,d17,#36
- vsli.64 d25,d17,#30
- vsli.64 d26,d17,#25
- vadd.i64 d27,d15
- vorr d30,d17,d19
- vand d29,d17,d19
- veor d16,d24,d25
- vand d30,d18
- veor d16,d26 @ Sigma0(a)
- vorr d30,d29 @ Maj(a,b,c)
- vadd.i64 d16,d27
- vadd.i64 d20,d27
- vadd.i64 d16,d30
- bne .L16_79_neon
-
- vldmia r0,{d24-d31} @ load context to temp
- vadd.i64 q8,q12 @ vectorized accumulate
- vadd.i64 q9,q13
- vadd.i64 q10,q14
- vadd.i64 q11,q15
- vstmia r0,{d16-d23} @ save context
- teq r1,r2
- sub r3,#640 @ rewind K512
- bne .Loop_neon
-
- vldmia sp!,{d8-d15} @ epilogue
- .word 0xe12fff1e
-#endif
-.size sha512_block_data_order,.-sha512_block_data_order
-.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-.comm OPENSSL_armcap_P,4,4
diff --git a/app/openssl/crypto/sha/asm/sha512-armv8.S b/app/openssl/crypto/sha/asm/sha512-armv8.S
new file mode 100644
index 00000000..6b0d1940
--- /dev/null
+++ b/app/openssl/crypto/sha/asm/sha512-armv8.S
@@ -0,0 +1,1021 @@
+#include "arm_arch.h"
+
+.text
+
+.globl sha512_block_data_order
+.type sha512_block_data_order,%function
+.align 6
+sha512_block_data_order:
+ stp x29,x30,[sp,#-128]!
+ add x29,sp,#0
+
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ sub sp,sp,#4*8
+
+ ldp x20,x21,[x0] // load context
+ ldp x22,x23,[x0,#2*8]
+ ldp x24,x25,[x0,#4*8]
+ add x2,x1,x2,lsl#7 // end of input
+ ldp x26,x27,[x0,#6*8]
+ adr x30,K512
+ stp x0,x2,[x29,#96]
+
+.Loop:
+ ldp x3,x4,[x1],#2*8
+ ldr x19,[x30],#8 // *K++
+ eor x28,x21,x22 // magic seed
+ str x1,[x29,#112]
+#ifndef __ARMEB__
+ rev x3,x3 // 0
+#endif
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ eor x6,x24,x24,ror#23
+ and x17,x25,x24
+ bic x19,x26,x24
+ add x27,x27,x3 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x6,ror#18 // Sigma1(e)
+ ror x6,x20,#28
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ eor x17,x20,x20,ror#5
+ add x27,x27,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x23,x23,x27 // d+=h
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x6,x17,ror#34 // Sigma0(a)
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x27,x27,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x4,x4 // 1
+#endif
+ ldp x5,x6,[x1],#2*8
+ add x27,x27,x17 // h+=Sigma0(a)
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ eor x7,x23,x23,ror#23
+ and x17,x24,x23
+ bic x28,x25,x23
+ add x26,x26,x4 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x7,ror#18 // Sigma1(e)
+ ror x7,x27,#28
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ eor x17,x27,x27,ror#5
+ add x26,x26,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x22,x22,x26 // d+=h
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x7,x17,ror#34 // Sigma0(a)
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x26,x26,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x5,x5 // 2
+#endif
+ add x26,x26,x17 // h+=Sigma0(a)
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ eor x8,x22,x22,ror#23
+ and x17,x23,x22
+ bic x19,x24,x22
+ add x25,x25,x5 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x8,ror#18 // Sigma1(e)
+ ror x8,x26,#28
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ eor x17,x26,x26,ror#5
+ add x25,x25,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x21,x21,x25 // d+=h
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x8,x17,ror#34 // Sigma0(a)
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x25,x25,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x6,x6 // 3
+#endif
+ ldp x7,x8,[x1],#2*8
+ add x25,x25,x17 // h+=Sigma0(a)
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ eor x9,x21,x21,ror#23
+ and x17,x22,x21
+ bic x28,x23,x21
+ add x24,x24,x6 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x9,ror#18 // Sigma1(e)
+ ror x9,x25,#28
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ eor x17,x25,x25,ror#5
+ add x24,x24,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x20,x20,x24 // d+=h
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x9,x17,ror#34 // Sigma0(a)
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x24,x24,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x7,x7 // 4
+#endif
+ add x24,x24,x17 // h+=Sigma0(a)
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ eor x10,x20,x20,ror#23
+ and x17,x21,x20
+ bic x19,x22,x20
+ add x23,x23,x7 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x10,ror#18 // Sigma1(e)
+ ror x10,x24,#28
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ eor x17,x24,x24,ror#5
+ add x23,x23,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x27,x27,x23 // d+=h
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x10,x17,ror#34 // Sigma0(a)
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x23,x23,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x8,x8 // 5
+#endif
+ ldp x9,x10,[x1],#2*8
+ add x23,x23,x17 // h+=Sigma0(a)
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ eor x11,x27,x27,ror#23
+ and x17,x20,x27
+ bic x28,x21,x27
+ add x22,x22,x8 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x11,ror#18 // Sigma1(e)
+ ror x11,x23,#28
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ eor x17,x23,x23,ror#5
+ add x22,x22,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x26,x26,x22 // d+=h
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x11,x17,ror#34 // Sigma0(a)
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x22,x22,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x9,x9 // 6
+#endif
+ add x22,x22,x17 // h+=Sigma0(a)
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ eor x12,x26,x26,ror#23
+ and x17,x27,x26
+ bic x19,x20,x26
+ add x21,x21,x9 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x12,ror#18 // Sigma1(e)
+ ror x12,x22,#28
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ eor x17,x22,x22,ror#5
+ add x21,x21,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x25,x25,x21 // d+=h
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x12,x17,ror#34 // Sigma0(a)
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x21,x21,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x10,x10 // 7
+#endif
+ ldp x11,x12,[x1],#2*8
+ add x21,x21,x17 // h+=Sigma0(a)
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ eor x13,x25,x25,ror#23
+ and x17,x26,x25
+ bic x28,x27,x25
+ add x20,x20,x10 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x13,ror#18 // Sigma1(e)
+ ror x13,x21,#28
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ eor x17,x21,x21,ror#5
+ add x20,x20,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x24,x24,x20 // d+=h
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x13,x17,ror#34 // Sigma0(a)
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x20,x20,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x11,x11 // 8
+#endif
+ add x20,x20,x17 // h+=Sigma0(a)
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ eor x14,x24,x24,ror#23
+ and x17,x25,x24
+ bic x19,x26,x24
+ add x27,x27,x11 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x14,ror#18 // Sigma1(e)
+ ror x14,x20,#28
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ eor x17,x20,x20,ror#5
+ add x27,x27,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x23,x23,x27 // d+=h
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x14,x17,ror#34 // Sigma0(a)
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x27,x27,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x12,x12 // 9
+#endif
+ ldp x13,x14,[x1],#2*8
+ add x27,x27,x17 // h+=Sigma0(a)
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ eor x15,x23,x23,ror#23
+ and x17,x24,x23
+ bic x28,x25,x23
+ add x26,x26,x12 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x15,ror#18 // Sigma1(e)
+ ror x15,x27,#28
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ eor x17,x27,x27,ror#5
+ add x26,x26,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x22,x22,x26 // d+=h
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x15,x17,ror#34 // Sigma0(a)
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x26,x26,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x13,x13 // 10
+#endif
+ add x26,x26,x17 // h+=Sigma0(a)
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ eor x0,x22,x22,ror#23
+ and x17,x23,x22
+ bic x19,x24,x22
+ add x25,x25,x13 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x0,ror#18 // Sigma1(e)
+ ror x0,x26,#28
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ eor x17,x26,x26,ror#5
+ add x25,x25,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x21,x21,x25 // d+=h
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x0,x17,ror#34 // Sigma0(a)
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x25,x25,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x14,x14 // 11
+#endif
+ ldp x15,x0,[x1],#2*8
+ add x25,x25,x17 // h+=Sigma0(a)
+ str x6,[sp,#24]
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ eor x6,x21,x21,ror#23
+ and x17,x22,x21
+ bic x28,x23,x21
+ add x24,x24,x14 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x6,ror#18 // Sigma1(e)
+ ror x6,x25,#28
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ eor x17,x25,x25,ror#5
+ add x24,x24,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x20,x20,x24 // d+=h
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x6,x17,ror#34 // Sigma0(a)
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x24,x24,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x15,x15 // 12
+#endif
+ add x24,x24,x17 // h+=Sigma0(a)
+ str x7,[sp,#0]
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ eor x7,x20,x20,ror#23
+ and x17,x21,x20
+ bic x19,x22,x20
+ add x23,x23,x15 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x7,ror#18 // Sigma1(e)
+ ror x7,x24,#28
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ eor x17,x24,x24,ror#5
+ add x23,x23,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x27,x27,x23 // d+=h
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x7,x17,ror#34 // Sigma0(a)
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x23,x23,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x0,x0 // 13
+#endif
+ ldp x1,x2,[x1]
+ add x23,x23,x17 // h+=Sigma0(a)
+ str x8,[sp,#8]
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ eor x8,x27,x27,ror#23
+ and x17,x20,x27
+ bic x28,x21,x27
+ add x22,x22,x0 // h+=X[i]
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x8,ror#18 // Sigma1(e)
+ ror x8,x23,#28
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ eor x17,x23,x23,ror#5
+ add x22,x22,x16 // h+=Sigma1(e)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ add x26,x26,x22 // d+=h
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x8,x17,ror#34 // Sigma0(a)
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ //add x22,x22,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x1,x1 // 14
+#endif
+ ldr x6,[sp,#24]
+ add x22,x22,x17 // h+=Sigma0(a)
+ str x9,[sp,#16]
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ eor x9,x26,x26,ror#23
+ and x17,x27,x26
+ bic x19,x20,x26
+ add x21,x21,x1 // h+=X[i]
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x9,ror#18 // Sigma1(e)
+ ror x9,x22,#28
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ eor x17,x22,x22,ror#5
+ add x21,x21,x16 // h+=Sigma1(e)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ add x25,x25,x21 // d+=h
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x9,x17,ror#34 // Sigma0(a)
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ //add x21,x21,x17 // h+=Sigma0(a)
+#ifndef __ARMEB__
+ rev x2,x2 // 15
+#endif
+ ldr x7,[sp,#0]
+ add x21,x21,x17 // h+=Sigma0(a)
+ str x10,[sp,#24]
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ ror x9,x4,#1
+ and x17,x26,x25
+ ror x8,x1,#19
+ bic x28,x27,x25
+ ror x10,x21,#28
+ add x20,x20,x2 // h+=X[i]
+ eor x16,x16,x25,ror#18
+ eor x9,x9,x4,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x25,ror#41 // Sigma1(e)
+ eor x10,x10,x21,ror#34
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x8,x8,x1,ror#61
+ eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
+ add x20,x20,x16 // h+=Sigma1(e)
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x10,x21,ror#39 // Sigma0(a)
+ eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
+ add x3,x3,x12
+ add x24,x24,x20 // d+=h
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x3,x3,x9
+ add x20,x20,x17 // h+=Sigma0(a)
+ add x3,x3,x8
+.Loop_16_xx:
+ ldr x8,[sp,#8]
+ str x11,[sp,#0]
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ ror x10,x5,#1
+ and x17,x25,x24
+ ror x9,x2,#19
+ bic x19,x26,x24
+ ror x11,x20,#28
+ add x27,x27,x3 // h+=X[i]
+ eor x16,x16,x24,ror#18
+ eor x10,x10,x5,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x24,ror#41 // Sigma1(e)
+ eor x11,x11,x20,ror#34
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x9,x9,x2,ror#61
+ eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
+ add x27,x27,x16 // h+=Sigma1(e)
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x11,x20,ror#39 // Sigma0(a)
+ eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
+ add x4,x4,x13
+ add x23,x23,x27 // d+=h
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x4,x4,x10
+ add x27,x27,x17 // h+=Sigma0(a)
+ add x4,x4,x9
+ ldr x9,[sp,#16]
+ str x12,[sp,#8]
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ ror x11,x6,#1
+ and x17,x24,x23
+ ror x10,x3,#19
+ bic x28,x25,x23
+ ror x12,x27,#28
+ add x26,x26,x4 // h+=X[i]
+ eor x16,x16,x23,ror#18
+ eor x11,x11,x6,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x23,ror#41 // Sigma1(e)
+ eor x12,x12,x27,ror#34
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x10,x10,x3,ror#61
+ eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
+ add x26,x26,x16 // h+=Sigma1(e)
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x12,x27,ror#39 // Sigma0(a)
+ eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
+ add x5,x5,x14
+ add x22,x22,x26 // d+=h
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x5,x5,x11
+ add x26,x26,x17 // h+=Sigma0(a)
+ add x5,x5,x10
+ ldr x10,[sp,#24]
+ str x13,[sp,#16]
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ ror x12,x7,#1
+ and x17,x23,x22
+ ror x11,x4,#19
+ bic x19,x24,x22
+ ror x13,x26,#28
+ add x25,x25,x5 // h+=X[i]
+ eor x16,x16,x22,ror#18
+ eor x12,x12,x7,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x22,ror#41 // Sigma1(e)
+ eor x13,x13,x26,ror#34
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x11,x11,x4,ror#61
+ eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
+ add x25,x25,x16 // h+=Sigma1(e)
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x13,x26,ror#39 // Sigma0(a)
+ eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
+ add x6,x6,x15
+ add x21,x21,x25 // d+=h
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x6,x6,x12
+ add x25,x25,x17 // h+=Sigma0(a)
+ add x6,x6,x11
+ ldr x11,[sp,#0]
+ str x14,[sp,#24]
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ ror x13,x8,#1
+ and x17,x22,x21
+ ror x12,x5,#19
+ bic x28,x23,x21
+ ror x14,x25,#28
+ add x24,x24,x6 // h+=X[i]
+ eor x16,x16,x21,ror#18
+ eor x13,x13,x8,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x21,ror#41 // Sigma1(e)
+ eor x14,x14,x25,ror#34
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x12,x12,x5,ror#61
+ eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
+ add x24,x24,x16 // h+=Sigma1(e)
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x14,x25,ror#39 // Sigma0(a)
+ eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
+ add x7,x7,x0
+ add x20,x20,x24 // d+=h
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x7,x7,x13
+ add x24,x24,x17 // h+=Sigma0(a)
+ add x7,x7,x12
+ ldr x12,[sp,#8]
+ str x15,[sp,#0]
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ ror x14,x9,#1
+ and x17,x21,x20
+ ror x13,x6,#19
+ bic x19,x22,x20
+ ror x15,x24,#28
+ add x23,x23,x7 // h+=X[i]
+ eor x16,x16,x20,ror#18
+ eor x14,x14,x9,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x20,ror#41 // Sigma1(e)
+ eor x15,x15,x24,ror#34
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x13,x13,x6,ror#61
+ eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
+ add x23,x23,x16 // h+=Sigma1(e)
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x15,x24,ror#39 // Sigma0(a)
+ eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
+ add x8,x8,x1
+ add x27,x27,x23 // d+=h
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x8,x8,x14
+ add x23,x23,x17 // h+=Sigma0(a)
+ add x8,x8,x13
+ ldr x13,[sp,#16]
+ str x0,[sp,#8]
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ ror x15,x10,#1
+ and x17,x20,x27
+ ror x14,x7,#19
+ bic x28,x21,x27
+ ror x0,x23,#28
+ add x22,x22,x8 // h+=X[i]
+ eor x16,x16,x27,ror#18
+ eor x15,x15,x10,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x27,ror#41 // Sigma1(e)
+ eor x0,x0,x23,ror#34
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x14,x14,x7,ror#61
+ eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
+ add x22,x22,x16 // h+=Sigma1(e)
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x0,x23,ror#39 // Sigma0(a)
+ eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
+ add x9,x9,x2
+ add x26,x26,x22 // d+=h
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x9,x9,x15
+ add x22,x22,x17 // h+=Sigma0(a)
+ add x9,x9,x14
+ ldr x14,[sp,#24]
+ str x1,[sp,#16]
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ ror x0,x11,#1
+ and x17,x27,x26
+ ror x15,x8,#19
+ bic x19,x20,x26
+ ror x1,x22,#28
+ add x21,x21,x9 // h+=X[i]
+ eor x16,x16,x26,ror#18
+ eor x0,x0,x11,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x26,ror#41 // Sigma1(e)
+ eor x1,x1,x22,ror#34
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x15,x15,x8,ror#61
+ eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
+ add x21,x21,x16 // h+=Sigma1(e)
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x1,x22,ror#39 // Sigma0(a)
+ eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
+ add x10,x10,x3
+ add x25,x25,x21 // d+=h
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x10,x10,x0
+ add x21,x21,x17 // h+=Sigma0(a)
+ add x10,x10,x15
+ ldr x15,[sp,#0]
+ str x2,[sp,#24]
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ ror x1,x12,#1
+ and x17,x26,x25
+ ror x0,x9,#19
+ bic x28,x27,x25
+ ror x2,x21,#28
+ add x20,x20,x10 // h+=X[i]
+ eor x16,x16,x25,ror#18
+ eor x1,x1,x12,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x25,ror#41 // Sigma1(e)
+ eor x2,x2,x21,ror#34
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x0,x0,x9,ror#61
+ eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
+ add x20,x20,x16 // h+=Sigma1(e)
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x2,x21,ror#39 // Sigma0(a)
+ eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
+ add x11,x11,x4
+ add x24,x24,x20 // d+=h
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x11,x11,x1
+ add x20,x20,x17 // h+=Sigma0(a)
+ add x11,x11,x0
+ ldr x0,[sp,#8]
+ str x3,[sp,#0]
+ ror x16,x24,#14
+ add x27,x27,x19 // h+=K[i]
+ ror x2,x13,#1
+ and x17,x25,x24
+ ror x1,x10,#19
+ bic x19,x26,x24
+ ror x3,x20,#28
+ add x27,x27,x11 // h+=X[i]
+ eor x16,x16,x24,ror#18
+ eor x2,x2,x13,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x20,x21 // a^b, b^c in next round
+ eor x16,x16,x24,ror#41 // Sigma1(e)
+ eor x3,x3,x20,ror#34
+ add x27,x27,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x1,x1,x10,ror#61
+ eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
+ add x27,x27,x16 // h+=Sigma1(e)
+ eor x28,x28,x21 // Maj(a,b,c)
+ eor x17,x3,x20,ror#39 // Sigma0(a)
+ eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
+ add x12,x12,x5
+ add x23,x23,x27 // d+=h
+ add x27,x27,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x12,x12,x2
+ add x27,x27,x17 // h+=Sigma0(a)
+ add x12,x12,x1
+ ldr x1,[sp,#16]
+ str x4,[sp,#8]
+ ror x16,x23,#14
+ add x26,x26,x28 // h+=K[i]
+ ror x3,x14,#1
+ and x17,x24,x23
+ ror x2,x11,#19
+ bic x28,x25,x23
+ ror x4,x27,#28
+ add x26,x26,x12 // h+=X[i]
+ eor x16,x16,x23,ror#18
+ eor x3,x3,x14,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x27,x20 // a^b, b^c in next round
+ eor x16,x16,x23,ror#41 // Sigma1(e)
+ eor x4,x4,x27,ror#34
+ add x26,x26,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x2,x2,x11,ror#61
+ eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
+ add x26,x26,x16 // h+=Sigma1(e)
+ eor x19,x19,x20 // Maj(a,b,c)
+ eor x17,x4,x27,ror#39 // Sigma0(a)
+ eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
+ add x13,x13,x6
+ add x22,x22,x26 // d+=h
+ add x26,x26,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x13,x13,x3
+ add x26,x26,x17 // h+=Sigma0(a)
+ add x13,x13,x2
+ ldr x2,[sp,#24]
+ str x5,[sp,#16]
+ ror x16,x22,#14
+ add x25,x25,x19 // h+=K[i]
+ ror x4,x15,#1
+ and x17,x23,x22
+ ror x3,x12,#19
+ bic x19,x24,x22
+ ror x5,x26,#28
+ add x25,x25,x13 // h+=X[i]
+ eor x16,x16,x22,ror#18
+ eor x4,x4,x15,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x26,x27 // a^b, b^c in next round
+ eor x16,x16,x22,ror#41 // Sigma1(e)
+ eor x5,x5,x26,ror#34
+ add x25,x25,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x3,x3,x12,ror#61
+ eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
+ add x25,x25,x16 // h+=Sigma1(e)
+ eor x28,x28,x27 // Maj(a,b,c)
+ eor x17,x5,x26,ror#39 // Sigma0(a)
+ eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
+ add x14,x14,x7
+ add x21,x21,x25 // d+=h
+ add x25,x25,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x14,x14,x4
+ add x25,x25,x17 // h+=Sigma0(a)
+ add x14,x14,x3
+ ldr x3,[sp,#0]
+ str x6,[sp,#24]
+ ror x16,x21,#14
+ add x24,x24,x28 // h+=K[i]
+ ror x5,x0,#1
+ and x17,x22,x21
+ ror x4,x13,#19
+ bic x28,x23,x21
+ ror x6,x25,#28
+ add x24,x24,x14 // h+=X[i]
+ eor x16,x16,x21,ror#18
+ eor x5,x5,x0,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x25,x26 // a^b, b^c in next round
+ eor x16,x16,x21,ror#41 // Sigma1(e)
+ eor x6,x6,x25,ror#34
+ add x24,x24,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x4,x4,x13,ror#61
+ eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
+ add x24,x24,x16 // h+=Sigma1(e)
+ eor x19,x19,x26 // Maj(a,b,c)
+ eor x17,x6,x25,ror#39 // Sigma0(a)
+ eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
+ add x15,x15,x8
+ add x20,x20,x24 // d+=h
+ add x24,x24,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x15,x15,x5
+ add x24,x24,x17 // h+=Sigma0(a)
+ add x15,x15,x4
+ ldr x4,[sp,#8]
+ str x7,[sp,#0]
+ ror x16,x20,#14
+ add x23,x23,x19 // h+=K[i]
+ ror x6,x1,#1
+ and x17,x21,x20
+ ror x5,x14,#19
+ bic x19,x22,x20
+ ror x7,x24,#28
+ add x23,x23,x15 // h+=X[i]
+ eor x16,x16,x20,ror#18
+ eor x6,x6,x1,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x24,x25 // a^b, b^c in next round
+ eor x16,x16,x20,ror#41 // Sigma1(e)
+ eor x7,x7,x24,ror#34
+ add x23,x23,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x5,x5,x14,ror#61
+ eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
+ add x23,x23,x16 // h+=Sigma1(e)
+ eor x28,x28,x25 // Maj(a,b,c)
+ eor x17,x7,x24,ror#39 // Sigma0(a)
+ eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
+ add x0,x0,x9
+ add x27,x27,x23 // d+=h
+ add x23,x23,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x0,x0,x6
+ add x23,x23,x17 // h+=Sigma0(a)
+ add x0,x0,x5
+ ldr x5,[sp,#16]
+ str x8,[sp,#8]
+ ror x16,x27,#14
+ add x22,x22,x28 // h+=K[i]
+ ror x7,x2,#1
+ and x17,x20,x27
+ ror x6,x15,#19
+ bic x28,x21,x27
+ ror x8,x23,#28
+ add x22,x22,x0 // h+=X[i]
+ eor x16,x16,x27,ror#18
+ eor x7,x7,x2,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x23,x24 // a^b, b^c in next round
+ eor x16,x16,x27,ror#41 // Sigma1(e)
+ eor x8,x8,x23,ror#34
+ add x22,x22,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x6,x6,x15,ror#61
+ eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
+ add x22,x22,x16 // h+=Sigma1(e)
+ eor x19,x19,x24 // Maj(a,b,c)
+ eor x17,x8,x23,ror#39 // Sigma0(a)
+ eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
+ add x1,x1,x10
+ add x26,x26,x22 // d+=h
+ add x22,x22,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x1,x1,x7
+ add x22,x22,x17 // h+=Sigma0(a)
+ add x1,x1,x6
+ ldr x6,[sp,#24]
+ str x9,[sp,#16]
+ ror x16,x26,#14
+ add x21,x21,x19 // h+=K[i]
+ ror x8,x3,#1
+ and x17,x27,x26
+ ror x7,x0,#19
+ bic x19,x20,x26
+ ror x9,x22,#28
+ add x21,x21,x1 // h+=X[i]
+ eor x16,x16,x26,ror#18
+ eor x8,x8,x3,ror#8
+ orr x17,x17,x19 // Ch(e,f,g)
+ eor x19,x22,x23 // a^b, b^c in next round
+ eor x16,x16,x26,ror#41 // Sigma1(e)
+ eor x9,x9,x22,ror#34
+ add x21,x21,x17 // h+=Ch(e,f,g)
+ and x28,x28,x19 // (b^c)&=(a^b)
+ eor x7,x7,x0,ror#61
+ eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
+ add x21,x21,x16 // h+=Sigma1(e)
+ eor x28,x28,x23 // Maj(a,b,c)
+ eor x17,x9,x22,ror#39 // Sigma0(a)
+ eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
+ add x2,x2,x11
+ add x25,x25,x21 // d+=h
+ add x21,x21,x28 // h+=Maj(a,b,c)
+ ldr x28,[x30],#8 // *K++, x19 in next round
+ add x2,x2,x8
+ add x21,x21,x17 // h+=Sigma0(a)
+ add x2,x2,x7
+ ldr x7,[sp,#0]
+ str x10,[sp,#24]
+ ror x16,x25,#14
+ add x20,x20,x28 // h+=K[i]
+ ror x9,x4,#1
+ and x17,x26,x25
+ ror x8,x1,#19
+ bic x28,x27,x25
+ ror x10,x21,#28
+ add x20,x20,x2 // h+=X[i]
+ eor x16,x16,x25,ror#18
+ eor x9,x9,x4,ror#8
+ orr x17,x17,x28 // Ch(e,f,g)
+ eor x28,x21,x22 // a^b, b^c in next round
+ eor x16,x16,x25,ror#41 // Sigma1(e)
+ eor x10,x10,x21,ror#34
+ add x20,x20,x17 // h+=Ch(e,f,g)
+ and x19,x19,x28 // (b^c)&=(a^b)
+ eor x8,x8,x1,ror#61
+ eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
+ add x20,x20,x16 // h+=Sigma1(e)
+ eor x19,x19,x22 // Maj(a,b,c)
+ eor x17,x10,x21,ror#39 // Sigma0(a)
+ eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
+ add x3,x3,x12
+ add x24,x24,x20 // d+=h
+ add x20,x20,x19 // h+=Maj(a,b,c)
+ ldr x19,[x30],#8 // *K++, x28 in next round
+ add x3,x3,x9
+ add x20,x20,x17 // h+=Sigma0(a)
+ add x3,x3,x8
+ cbnz x19,.Loop_16_xx
+
+ ldp x0,x2,[x29,#96]
+ ldr x1,[x29,#112]
+ sub x30,x30,#648 // rewind
+
+ ldp x3,x4,[x0]
+ ldp x5,x6,[x0,#2*8]
+ add x1,x1,#14*8 // advance input pointer
+ ldp x7,x8,[x0,#4*8]
+ add x20,x20,x3
+ ldp x9,x10,[x0,#6*8]
+ add x21,x21,x4
+ add x22,x22,x5
+ add x23,x23,x6
+ stp x20,x21,[x0]
+ add x24,x24,x7
+ add x25,x25,x8
+ stp x22,x23,[x0,#2*8]
+ add x26,x26,x9
+ add x27,x27,x10
+ cmp x1,x2
+ stp x24,x25,[x0,#4*8]
+ stp x26,x27,[x0,#6*8]
+ b.ne .Loop
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#4*8
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#128
+ ret
+.size sha512_block_data_order,.-sha512_block_data_order
+
+.align 6
+.type K512,%object
+K512:
+ .quad 0x428a2f98d728ae22,0x7137449123ef65cd
+ .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+ .quad 0x3956c25bf348b538,0x59f111f1b605d019
+ .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+ .quad 0xd807aa98a3030242,0x12835b0145706fbe
+ .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+ .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+ .quad 0x9bdc06a725c71235,0xc19bf174cf692694
+ .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+ .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+ .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+ .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+ .quad 0x983e5152ee66dfab,0xa831c66d2db43210
+ .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+ .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+ .quad 0x06ca6351e003826f,0x142929670a0e6e70
+ .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+ .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+ .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+ .quad 0x81c2c92e47edaee6,0x92722c851482353b
+ .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+ .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+ .quad 0xd192e819d6ef5218,0xd69906245565a910
+ .quad 0xf40e35855771202a,0x106aa07032bbd1b8
+ .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+ .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+ .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+ .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+ .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+ .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+ .quad 0x90befffa23631e28,0xa4506cebde82bde9
+ .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+ .quad 0xca273eceea26619c,0xd186b8c721c0c207
+ .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+ .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+ .quad 0x113f9804bef90dae,0x1b710b35131c471b
+ .quad 0x28db77f523047d84,0x32caab7b40c72493
+ .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+ .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+ .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+ .quad 0 // terminator
+.size K512,.-K512
+.align 3
+.LOPENSSL_armcap_P:
+ .quad OPENSSL_armcap_P-.
+.asciz "SHA512 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+.comm OPENSSL_armcap_P,4,4
diff --git a/app/openssl/crypto/sha/asm/sha512-armv8.pl b/app/openssl/crypto/sha/asm/sha512-armv8.pl
new file mode 100644
index 00000000..6935ed65
--- /dev/null
+++ b/app/openssl/crypto/sha/asm/sha512-armv8.pl
@@ -0,0 +1,414 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA256/512 for ARMv8.
+#
+# Performance in cycles per processed byte and improvement coefficient
+# over code generated with "default" compiler:
+#
+# SHA256-hw SHA256(*) SHA512
+# Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
+# Cortex-A5x n/a n/a n/a
+#
+# (*) Software SHA256 results are of lesser relevance, presented
+# mostly for informational purposes.
+# (**) The result is a trade-off: it's possible to improve it by
+# 10%, but at the cost of 20% loss on Cortex-A5x.
+
+$flavour=shift;
+$output=shift;
+open STDOUT,">$output";
+
+if ($output =~ /512/) {
+ $BITS=512;
+ $SZ=8;
+ @Sigma0=(28,34,39);
+ @Sigma1=(14,18,41);
+ @sigma0=(1, 8, 7);
+ @sigma1=(19,61, 6);
+ $rounds=80;
+ $reg_t="x";
+} else {
+ $BITS=256;
+ $SZ=4;
+ @Sigma0=( 2,13,22);
+ @Sigma1=( 6,11,25);
+ @sigma0=( 7,18, 3);
+ @sigma1=(17,19,10);
+ $rounds=64;
+ $reg_t="w";
+}
+
+$func="sha${BITS}_block_data_order";
+
+($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
+
+@X=map("$reg_t$_",(3..15,0..2));
+@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27));
+($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28));
+
+sub BODY_00_xx {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
+my $j=($i+1)&15;
+my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]);
+ $T0=@X[$i+3] if ($i<11);
+
+$code.=<<___ if ($i<16);
+#ifndef __ARMEB__
+ rev @X[$i],@X[$i] // $i
+#endif
+___
+$code.=<<___ if ($i<13 && ($i&1));
+ ldp @X[$i+1],@X[$i+2],[$inp],#2*$SZ
+___
+$code.=<<___ if ($i==13);
+ ldp @X[14],@X[15],[$inp]
+___
+$code.=<<___ if ($i>=14);
+ ldr @X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`]
+___
+$code.=<<___ if ($i>0 && $i<16);
+ add $a,$a,$t1 // h+=Sigma0(a)
+___
+$code.=<<___ if ($i>=11);
+ str @X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`]
+___
+# While ARMv8 specifies merged rotate-n-logical operation such as
+# 'eor x,y,z,ror#n', it was found to negatively affect performance
+# on Apple A7. The reason seems to be that it requires even 'y' to
+# be available earlier. This means that such merged instruction is
+# not necessarily best choice on critical path... On the other hand
+# Cortex-A5x handles merged instructions much better than disjoint
+# rotate and logical... See (**) footnote above.
+$code.=<<___ if ($i<15);
+ ror $t0,$e,#$Sigma1[0]
+ add $h,$h,$t2 // h+=K[i]
+ eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]`
+ and $t1,$f,$e
+ bic $t2,$g,$e
+ add $h,$h,@X[$i&15] // h+=X[i]
+ orr $t1,$t1,$t2 // Ch(e,f,g)
+ eor $t2,$a,$b // a^b, b^c in next round
+ eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e)
+ ror $T0,$a,#$Sigma0[0]
+ add $h,$h,$t1 // h+=Ch(e,f,g)
+ eor $t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]`
+ add $h,$h,$t0 // h+=Sigma1(e)
+ and $t3,$t3,$t2 // (b^c)&=(a^b)
+ add $d,$d,$h // d+=h
+ eor $t3,$t3,$b // Maj(a,b,c)
+ eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a)
+ add $h,$h,$t3 // h+=Maj(a,b,c)
+ ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
+ //add $h,$h,$t1 // h+=Sigma0(a)
+___
+$code.=<<___ if ($i>=15);
+ ror $t0,$e,#$Sigma1[0]
+ add $h,$h,$t2 // h+=K[i]
+ ror $T1,@X[($j+1)&15],#$sigma0[0]
+ and $t1,$f,$e
+ ror $T2,@X[($j+14)&15],#$sigma1[0]
+ bic $t2,$g,$e
+ ror $T0,$a,#$Sigma0[0]
+ add $h,$h,@X[$i&15] // h+=X[i]
+ eor $t0,$t0,$e,ror#$Sigma1[1]
+ eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1]
+ orr $t1,$t1,$t2 // Ch(e,f,g)
+ eor $t2,$a,$b // a^b, b^c in next round
+ eor $t0,$t0,$e,ror#$Sigma1[2] // Sigma1(e)
+ eor $T0,$T0,$a,ror#$Sigma0[1]
+ add $h,$h,$t1 // h+=Ch(e,f,g)
+ and $t3,$t3,$t2 // (b^c)&=(a^b)
+ eor $T2,$T2,@X[($j+14)&15],ror#$sigma1[1]
+ eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1])
+ add $h,$h,$t0 // h+=Sigma1(e)
+ eor $t3,$t3,$b // Maj(a,b,c)
+ eor $t1,$T0,$a,ror#$Sigma0[2] // Sigma0(a)
+ eor $T2,$T2,@X[($j+14)&15],lsr#$sigma1[2] // sigma1(X[i+14])
+ add @X[$j],@X[$j],@X[($j+9)&15]
+ add $d,$d,$h // d+=h
+ add $h,$h,$t3 // h+=Maj(a,b,c)
+ ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round
+ add @X[$j],@X[$j],$T1
+ add $h,$h,$t1 // h+=Sigma0(a)
+ add @X[$j],@X[$j],$T2
+___
+ ($t2,$t3)=($t3,$t2);
+}
+
+$code.=<<___;
+#include "arm_arch.h"
+
+.text
+
+.globl $func
+.type $func,%function
+.align 6
+$func:
+___
+$code.=<<___ if ($SZ==4);
+ ldr x16,.LOPENSSL_armcap_P
+ adr x17,.LOPENSSL_armcap_P
+ add x16,x16,x17
+ ldr w16,[x16]
+ tst w16,#ARMV8_SHA256
+ b.ne .Lv8_entry
+___
+$code.=<<___;
+ stp x29,x30,[sp,#-128]!
+ add x29,sp,#0
+
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ sub sp,sp,#4*$SZ
+
+ ldp $A,$B,[$ctx] // load context
+ ldp $C,$D,[$ctx,#2*$SZ]
+ ldp $E,$F,[$ctx,#4*$SZ]
+ add $num,$inp,$num,lsl#`log(16*$SZ)/log(2)` // end of input
+ ldp $G,$H,[$ctx,#6*$SZ]
+ adr $Ktbl,K$BITS
+ stp $ctx,$num,[x29,#96]
+
+.Loop:
+ ldp @X[0],@X[1],[$inp],#2*$SZ
+ ldr $t2,[$Ktbl],#$SZ // *K++
+ eor $t3,$B,$C // magic seed
+ str $inp,[x29,#112]
+___
+for ($i=0;$i<16;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
+$code.=".Loop_16_xx:\n";
+for (;$i<32;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ cbnz $t2,.Loop_16_xx
+
+ ldp $ctx,$num,[x29,#96]
+ ldr $inp,[x29,#112]
+ sub $Ktbl,$Ktbl,#`$SZ*($rounds+1)` // rewind
+
+ ldp @X[0],@X[1],[$ctx]
+ ldp @X[2],@X[3],[$ctx,#2*$SZ]
+ add $inp,$inp,#14*$SZ // advance input pointer
+ ldp @X[4],@X[5],[$ctx,#4*$SZ]
+ add $A,$A,@X[0]
+ ldp @X[6],@X[7],[$ctx,#6*$SZ]
+ add $B,$B,@X[1]
+ add $C,$C,@X[2]
+ add $D,$D,@X[3]
+ stp $A,$B,[$ctx]
+ add $E,$E,@X[4]
+ add $F,$F,@X[5]
+ stp $C,$D,[$ctx,#2*$SZ]
+ add $G,$G,@X[6]
+ add $H,$H,@X[7]
+ cmp $inp,$num
+ stp $E,$F,[$ctx,#4*$SZ]
+ stp $G,$H,[$ctx,#6*$SZ]
+ b.ne .Loop
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#4*$SZ
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#128
+ ret
+.size $func,.-$func
+
+.align 6
+.type K$BITS,%object
+K$BITS:
+___
+$code.=<<___ if ($SZ==8);
+ .quad 0x428a2f98d728ae22,0x7137449123ef65cd
+ .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+ .quad 0x3956c25bf348b538,0x59f111f1b605d019
+ .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+ .quad 0xd807aa98a3030242,0x12835b0145706fbe
+ .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+ .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+ .quad 0x9bdc06a725c71235,0xc19bf174cf692694
+ .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+ .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+ .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+ .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+ .quad 0x983e5152ee66dfab,0xa831c66d2db43210
+ .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+ .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+ .quad 0x06ca6351e003826f,0x142929670a0e6e70
+ .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+ .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+ .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+ .quad 0x81c2c92e47edaee6,0x92722c851482353b
+ .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+ .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+ .quad 0xd192e819d6ef5218,0xd69906245565a910
+ .quad 0xf40e35855771202a,0x106aa07032bbd1b8
+ .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+ .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+ .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+ .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+ .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+ .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+ .quad 0x90befffa23631e28,0xa4506cebde82bde9
+ .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+ .quad 0xca273eceea26619c,0xd186b8c721c0c207
+ .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+ .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+ .quad 0x113f9804bef90dae,0x1b710b35131c471b
+ .quad 0x28db77f523047d84,0x32caab7b40c72493
+ .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+ .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+ .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+ .quad 0 // terminator
+___
+$code.=<<___ if ($SZ==4);
+ .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+ .long 0 //terminator
+___
+$code.=<<___;
+.size K$BITS,.-K$BITS
+.align 3
+.LOPENSSL_armcap_P:
+ .quad OPENSSL_armcap_P-.
+.asciz "SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+
+if ($SZ==4) {
+my $Ktbl="x3";
+
+my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2));
+my @MSG=map("v$_.16b",(4..7));
+my ($W0,$W1)=("v16.4s","v17.4s");
+my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b");
+
+$code.=<<___;
+.type sha256_block_armv8,%function
+.align 6
+sha256_block_armv8:
+.Lv8_entry:
+ stp x29,x30,[sp,#-16]!
+ add x29,sp,#0
+
+ ld1.32 {$ABCD,$EFGH},[$ctx]
+ adr $Ktbl,K256
+
+.Loop_hw:
+ ld1 {@MSG[0]-@MSG[3]},[$inp],#64
+ sub $num,$num,#1
+ ld1.32 {$W0},[$Ktbl],#16
+ rev32 @MSG[0],@MSG[0]
+ rev32 @MSG[1],@MSG[1]
+ rev32 @MSG[2],@MSG[2]
+ rev32 @MSG[3],@MSG[3]
+ orr $ABCD_SAVE,$ABCD,$ABCD // offload
+ orr $EFGH_SAVE,$EFGH,$EFGH
+___
+for($i=0;$i<12;$i++) {
+$code.=<<___;
+ ld1.32 {$W1},[$Ktbl],#16
+ add.i32 $W0,$W0,@MSG[0]
+ sha256su0 @MSG[0],@MSG[1]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+ sha256su1 @MSG[0],@MSG[2],@MSG[3]
+___
+ ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+ ld1.32 {$W1},[$Ktbl],#16
+ add.i32 $W0,$W0,@MSG[0]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ ld1.32 {$W0},[$Ktbl],#16
+ add.i32 $W1,$W1,@MSG[1]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ ld1.32 {$W1},[$Ktbl]
+ add.i32 $W0,$W0,@MSG[2]
+ sub $Ktbl,$Ktbl,#$rounds*$SZ-16 // rewind
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W0
+ sha256h2 $EFGH,$abcd,$W0
+
+ add.i32 $W1,$W1,@MSG[3]
+ orr $abcd,$ABCD,$ABCD
+ sha256h $ABCD,$EFGH,$W1
+ sha256h2 $EFGH,$abcd,$W1
+
+ add.i32 $ABCD,$ABCD,$ABCD_SAVE
+ add.i32 $EFGH,$EFGH,$EFGH_SAVE
+
+ cbnz $num,.Loop_hw
+
+ st1.32 {$ABCD,$EFGH},[$ctx]
+
+ ldr x29,[sp],#16
+ ret
+.size sha256_block_armv8,.-sha256_block_armv8
+___
+}
+
+$code.=<<___;
+.comm OPENSSL_armcap_P,4,4
+___
+
+{ my %opcode = (
+ "sha256h" => 0x5e004000, "sha256h2" => 0x5e005000,
+ "sha256su0" => 0x5e282800, "sha256su1" => 0x5e006000 );
+
+ sub unsha256 {
+ my ($mnemonic,$arg)=@_;
+
+ $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
+ &&
+ sprintf ".inst\t0x%08x\t//%s %s",
+ $opcode{$mnemonic}|$1|($2<<5)|($3<<16),
+ $mnemonic,$arg;
+ }
+}
+
+foreach(split("\n",$code)) {
+
+ s/\`([^\`]*)\`/eval($1)/geo;
+
+ s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/geo;
+
+ s/\.\w?32\b//o and s/\.16b/\.4s/go;
+ m/(ld|st)1[^\[]+\[0\]/o and s/\.4s/\.s/go;
+
+ print $_,"\n";
+}
+
+close STDOUT;