From 3e121542d8b7ab5201c47bbd3ba5611a23c54759 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Parm=C3=A9nides=20GV?= Date: Wed, 11 Jun 2014 11:56:59 +0200 Subject: Correctly connects to millipede. Location keyword on android.cfg isn't supported, EIP corresponding code has been commented out. I think we should support it in ics-openvpn, so that we can show the location instead of the server name. I've updated all opensssl, openvpn, etc. subprojects from rev 813 of ics-openvpn, and jni too. --- app/openssl/crypto/sha/asm/sha256-armv4.s | 858 ++++++++++++++++++++++-------- 1 file changed, 632 insertions(+), 226 deletions(-) (limited to 'app/openssl/crypto/sha/asm/sha256-armv4.s') diff --git a/app/openssl/crypto/sha/asm/sha256-armv4.s b/app/openssl/crypto/sha/asm/sha256-armv4.s index ee903dc4..9c20a63c 100644 --- a/app/openssl/crypto/sha/asm/sha256-armv4.s +++ b/app/openssl/crypto/sha/asm/sha256-armv4.s @@ -1,3 +1,5 @@ +#include "arm_arch.h" + .text .code 32 @@ -27,11 +29,14 @@ K256: sha256_block_data_order: sub r3,pc,#8 @ sha256_block_data_order add r2,r1,r2,lsl#6 @ len to point at the end of inp - stmdb sp!,{r0,r1,r2,r4-r12,lr} + stmdb sp!,{r0,r1,r2,r4-r11,lr} ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} sub r14,r3,#256 @ K256 sub sp,sp,#16*4 @ alloca(X[16]) .Loop: +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 0 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -39,14 +44,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r8,ror#6 - str r3,[sp,#0*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r8,ror#11 eor r2,r9,r10 +#if 0>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 0==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 + str r3,[sp,#0*4] add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) add r3,r3,r11 @@ -55,6 +68,9 @@ sha256_block_data_order: eor r11,r11,r4,ror#13 add r3,r3,r12 eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 0>=15 + ldr r1,[sp,#2*4] @ from BODY_16_xx +#endif orr r0,r4,r5 and r2,r4,r5 and r0,r0,r6 @@ -62,6 +78,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r7,r7,r3 add r11,r11,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 1 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -69,14 +88,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r7,ror#6 - str r3,[sp,#1*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r7,ror#11 eor r2,r8,r9 +#if 1>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 1==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 + str r3,[sp,#1*4] add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) add r3,r3,r10 @@ -85,6 +112,9 @@ sha256_block_data_order: eor r10,r10,r11,ror#13 add r3,r3,r12 eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 1>=15 + ldr r1,[sp,#3*4] @ from BODY_16_xx +#endif orr r0,r11,r4 and r2,r11,r4 and r0,r0,r5 @@ -92,6 +122,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r6,r6,r3 add r10,r10,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 2 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -99,14 +132,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r6,ror#6 - str r3,[sp,#2*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r6,ror#11 eor r2,r7,r8 +#if 2>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 2==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 + str r3,[sp,#2*4] add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) add r3,r3,r9 @@ -115,6 +156,9 @@ sha256_block_data_order: eor r9,r9,r10,ror#13 add r3,r3,r12 eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 2>=15 + ldr r1,[sp,#4*4] @ from BODY_16_xx +#endif orr r0,r10,r11 and r2,r10,r11 and r0,r0,r4 @@ -122,6 +166,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r5,r5,r3 add r9,r9,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 3 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -129,14 +176,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r5,ror#6 - str r3,[sp,#3*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r5,ror#11 eor r2,r6,r7 +#if 3>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 3==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 + str r3,[sp,#3*4] add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) add r3,r3,r8 @@ -145,6 +200,9 @@ sha256_block_data_order: eor r8,r8,r9,ror#13 add r3,r3,r12 eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 3>=15 + ldr r1,[sp,#5*4] @ from BODY_16_xx +#endif orr r0,r9,r10 and r2,r9,r10 and r0,r0,r11 @@ -152,6 +210,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r4,r4,r3 add r8,r8,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 4 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -159,14 +220,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r4,ror#6 - str r3,[sp,#4*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r4,ror#11 eor r2,r5,r6 +#if 4>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 4==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 + str r3,[sp,#4*4] add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) add r3,r3,r7 @@ -175,6 +244,9 @@ sha256_block_data_order: eor r7,r7,r8,ror#13 add r3,r3,r12 eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 4>=15 + ldr r1,[sp,#6*4] @ from BODY_16_xx +#endif orr r0,r8,r9 and r2,r8,r9 and r0,r0,r10 @@ -182,6 +254,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r11,r11,r3 add r7,r7,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 5 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -189,14 +264,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r11,ror#6 - str r3,[sp,#5*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r11,ror#11 eor r2,r4,r5 +#if 5>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 5==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 + str r3,[sp,#5*4] add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) add r3,r3,r6 @@ -205,6 +288,9 @@ sha256_block_data_order: eor r6,r6,r7,ror#13 add r3,r3,r12 eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 5>=15 + ldr r1,[sp,#7*4] @ from BODY_16_xx +#endif orr r0,r7,r8 and r2,r7,r8 and r0,r0,r9 @@ -212,6 +298,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r10,r10,r3 add r6,r6,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 6 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -219,14 +308,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r10,ror#6 - str r3,[sp,#6*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r10,ror#11 eor r2,r11,r4 +#if 6>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 6==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 + str r3,[sp,#6*4] add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) add r3,r3,r5 @@ -235,6 +332,9 @@ sha256_block_data_order: eor r5,r5,r6,ror#13 add r3,r3,r12 eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 6>=15 + ldr r1,[sp,#8*4] @ from BODY_16_xx +#endif orr r0,r6,r7 and r2,r6,r7 and r0,r0,r8 @@ -242,6 +342,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r9,r9,r3 add r5,r5,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 7 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -249,14 +352,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r9,ror#6 - str r3,[sp,#7*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r9,ror#11 eor r2,r10,r11 +#if 7>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 7==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 + str r3,[sp,#7*4] add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) add r3,r3,r4 @@ -265,6 +376,9 @@ sha256_block_data_order: eor r4,r4,r5,ror#13 add r3,r3,r12 eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 7>=15 + ldr r1,[sp,#9*4] @ from BODY_16_xx +#endif orr r0,r5,r6 and r2,r5,r6 and r0,r0,r7 @@ -272,6 +386,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r8,r8,r3 add r4,r4,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 8 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -279,14 +396,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r8,ror#6 - str r3,[sp,#8*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r8,ror#11 eor r2,r9,r10 +#if 8>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 8==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 + str r3,[sp,#8*4] add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) add r3,r3,r11 @@ -295,6 +420,9 @@ sha256_block_data_order: eor r11,r11,r4,ror#13 add r3,r3,r12 eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 8>=15 + ldr r1,[sp,#10*4] @ from BODY_16_xx +#endif orr r0,r4,r5 and r2,r4,r5 and r0,r0,r6 @@ -302,6 +430,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r7,r7,r3 add r11,r11,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 9 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -309,14 +440,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r7,ror#6 - str r3,[sp,#9*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r7,ror#11 eor r2,r8,r9 +#if 9>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 9==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 + str r3,[sp,#9*4] add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) add r3,r3,r10 @@ -325,6 +464,9 @@ sha256_block_data_order: eor r10,r10,r11,ror#13 add r3,r3,r12 eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 9>=15 + ldr r1,[sp,#11*4] @ from BODY_16_xx +#endif orr r0,r11,r4 and r2,r11,r4 and r0,r0,r5 @@ -332,6 +474,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r6,r6,r3 add r10,r10,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 10 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -339,14 +484,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r6,ror#6 - str r3,[sp,#10*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r6,ror#11 eor r2,r7,r8 +#if 10>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 10==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 + str r3,[sp,#10*4] add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) add r3,r3,r9 @@ -355,6 +508,9 @@ sha256_block_data_order: eor r9,r9,r10,ror#13 add r3,r3,r12 eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 10>=15 + ldr r1,[sp,#12*4] @ from BODY_16_xx +#endif orr r0,r10,r11 and r2,r10,r11 and r0,r0,r4 @@ -362,6 +518,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r5,r5,r3 add r9,r9,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 11 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -369,14 +528,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r5,ror#6 - str r3,[sp,#11*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r5,ror#11 eor r2,r6,r7 +#if 11>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 11==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 + str r3,[sp,#11*4] add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) add r3,r3,r8 @@ -385,6 +552,9 @@ sha256_block_data_order: eor r8,r8,r9,ror#13 add r3,r3,r12 eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 11>=15 + ldr r1,[sp,#13*4] @ from BODY_16_xx +#endif orr r0,r9,r10 and r2,r9,r10 and r0,r0,r11 @@ -392,6 +562,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r4,r4,r3 add r8,r8,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 12 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -399,14 +572,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r4,ror#6 - str r3,[sp,#12*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r4,ror#11 eor r2,r5,r6 +#if 12>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 12==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 + str r3,[sp,#12*4] add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) add r3,r3,r7 @@ -415,6 +596,9 @@ sha256_block_data_order: eor r7,r7,r8,ror#13 add r3,r3,r12 eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 12>=15 + ldr r1,[sp,#14*4] @ from BODY_16_xx +#endif orr r0,r8,r9 and r2,r8,r9 and r0,r0,r10 @@ -422,6 +606,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r11,r11,r3 add r7,r7,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 13 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -429,14 +616,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r11,ror#6 - str r3,[sp,#13*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r11,ror#11 eor r2,r4,r5 +#if 13>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 13==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 + str r3,[sp,#13*4] add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) add r3,r3,r6 @@ -445,6 +640,9 @@ sha256_block_data_order: eor r6,r6,r7,ror#13 add r3,r3,r12 eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 13>=15 + ldr r1,[sp,#15*4] @ from BODY_16_xx +#endif orr r0,r7,r8 and r2,r7,r8 and r0,r0,r9 @@ -452,6 +650,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r10,r10,r3 add r6,r6,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 14 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -459,14 +660,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r10,ror#6 - str r3,[sp,#14*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r10,ror#11 eor r2,r11,r4 +#if 14>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 14==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 + str r3,[sp,#14*4] add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) add r3,r3,r5 @@ -475,6 +684,9 @@ sha256_block_data_order: eor r5,r5,r6,ror#13 add r3,r3,r12 eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 14>=15 + ldr r1,[sp,#0*4] @ from BODY_16_xx +#endif orr r0,r6,r7 and r2,r6,r7 and r0,r0,r8 @@ -482,6 +694,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r9,r9,r3 add r5,r5,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 15 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -489,14 +704,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - str r1,[sp,#17*4] - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r9,ror#6 - str r3,[sp,#15*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r9,ror#11 eor r2,r10,r11 +#if 15>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 15==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 + str r3,[sp,#15*4] add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) add r3,r3,r4 @@ -505,6 +728,9 @@ sha256_block_data_order: eor r4,r4,r5,ror#13 add r3,r3,r12 eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 15>=15 + ldr r1,[sp,#1*4] @ from BODY_16_xx +#endif orr r0,r5,r6 and r2,r5,r6 and r0,r0,r7 @@ -513,26 +739,34 @@ sha256_block_data_order: add r8,r8,r3 add r4,r4,r0 .Lrounds_16_xx: - ldr r2,[sp,#1*4] @ 16 + @ ldr r1,[sp,#1*4] @ 16 ldr r12,[sp,#14*4] + mov r0,r1,ror#7 ldr r3,[sp,#0*4] - mov r0,r2,ror#7 - ldr r1,[sp,#9*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#9*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r8,ror#6 - str r3,[sp,#0*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r8,ror#11 eor r2,r9,r10 +#if 16>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 16==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 + str r3,[sp,#0*4] add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) add r3,r3,r11 @@ -541,6 +775,9 @@ sha256_block_data_order: eor r11,r11,r4,ror#13 add r3,r3,r12 eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 16>=15 + ldr r1,[sp,#2*4] @ from BODY_16_xx +#endif orr r0,r4,r5 and r2,r4,r5 and r0,r0,r6 @@ -548,26 +785,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r7,r7,r3 add r11,r11,r0 - ldr r2,[sp,#2*4] @ 17 + @ ldr r1,[sp,#2*4] @ 17 ldr r12,[sp,#15*4] + mov r0,r1,ror#7 ldr r3,[sp,#1*4] - mov r0,r2,ror#7 - ldr r1,[sp,#10*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#10*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r7,ror#6 - str r3,[sp,#1*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r7,ror#11 eor r2,r8,r9 +#if 17>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 17==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 + str r3,[sp,#1*4] add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) add r3,r3,r10 @@ -576,6 +821,9 @@ sha256_block_data_order: eor r10,r10,r11,ror#13 add r3,r3,r12 eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 17>=15 + ldr r1,[sp,#3*4] @ from BODY_16_xx +#endif orr r0,r11,r4 and r2,r11,r4 and r0,r0,r5 @@ -583,26 +831,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r6,r6,r3 add r10,r10,r0 - ldr r2,[sp,#3*4] @ 18 + @ ldr r1,[sp,#3*4] @ 18 ldr r12,[sp,#0*4] + mov r0,r1,ror#7 ldr r3,[sp,#2*4] - mov r0,r2,ror#7 - ldr r1,[sp,#11*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#11*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r6,ror#6 - str r3,[sp,#2*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r6,ror#11 eor r2,r7,r8 +#if 18>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 18==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 + str r3,[sp,#2*4] add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) add r3,r3,r9 @@ -611,6 +867,9 @@ sha256_block_data_order: eor r9,r9,r10,ror#13 add r3,r3,r12 eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 18>=15 + ldr r1,[sp,#4*4] @ from BODY_16_xx +#endif orr r0,r10,r11 and r2,r10,r11 and r0,r0,r4 @@ -618,26 +877,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r5,r5,r3 add r9,r9,r0 - ldr r2,[sp,#4*4] @ 19 + @ ldr r1,[sp,#4*4] @ 19 ldr r12,[sp,#1*4] + mov r0,r1,ror#7 ldr r3,[sp,#3*4] - mov r0,r2,ror#7 - ldr r1,[sp,#12*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#12*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r5,ror#6 - str r3,[sp,#3*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r5,ror#11 eor r2,r6,r7 +#if 19>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 19==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 + str r3,[sp,#3*4] add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) add r3,r3,r8 @@ -646,6 +913,9 @@ sha256_block_data_order: eor r8,r8,r9,ror#13 add r3,r3,r12 eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 19>=15 + ldr r1,[sp,#5*4] @ from BODY_16_xx +#endif orr r0,r9,r10 and r2,r9,r10 and r0,r0,r11 @@ -653,26 +923,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r4,r4,r3 add r8,r8,r0 - ldr r2,[sp,#5*4] @ 20 + @ ldr r1,[sp,#5*4] @ 20 ldr r12,[sp,#2*4] + mov r0,r1,ror#7 ldr r3,[sp,#4*4] - mov r0,r2,ror#7 - ldr r1,[sp,#13*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#13*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r4,ror#6 - str r3,[sp,#4*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r4,ror#11 eor r2,r5,r6 +#if 20>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 20==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 + str r3,[sp,#4*4] add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) add r3,r3,r7 @@ -681,6 +959,9 @@ sha256_block_data_order: eor r7,r7,r8,ror#13 add r3,r3,r12 eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 20>=15 + ldr r1,[sp,#6*4] @ from BODY_16_xx +#endif orr r0,r8,r9 and r2,r8,r9 and r0,r0,r10 @@ -688,26 +969,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r11,r11,r3 add r7,r7,r0 - ldr r2,[sp,#6*4] @ 21 + @ ldr r1,[sp,#6*4] @ 21 ldr r12,[sp,#3*4] + mov r0,r1,ror#7 ldr r3,[sp,#5*4] - mov r0,r2,ror#7 - ldr r1,[sp,#14*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#14*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r11,ror#6 - str r3,[sp,#5*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r11,ror#11 eor r2,r4,r5 +#if 21>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 21==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 + str r3,[sp,#5*4] add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) add r3,r3,r6 @@ -716,6 +1005,9 @@ sha256_block_data_order: eor r6,r6,r7,ror#13 add r3,r3,r12 eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 21>=15 + ldr r1,[sp,#7*4] @ from BODY_16_xx +#endif orr r0,r7,r8 and r2,r7,r8 and r0,r0,r9 @@ -723,26 +1015,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r10,r10,r3 add r6,r6,r0 - ldr r2,[sp,#7*4] @ 22 + @ ldr r1,[sp,#7*4] @ 22 ldr r12,[sp,#4*4] + mov r0,r1,ror#7 ldr r3,[sp,#6*4] - mov r0,r2,ror#7 - ldr r1,[sp,#15*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#15*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r10,ror#6 - str r3,[sp,#6*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r10,ror#11 eor r2,r11,r4 +#if 22>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 22==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 + str r3,[sp,#6*4] add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) add r3,r3,r5 @@ -751,6 +1051,9 @@ sha256_block_data_order: eor r5,r5,r6,ror#13 add r3,r3,r12 eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 22>=15 + ldr r1,[sp,#8*4] @ from BODY_16_xx +#endif orr r0,r6,r7 and r2,r6,r7 and r0,r0,r8 @@ -758,26 +1061,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r9,r9,r3 add r5,r5,r0 - ldr r2,[sp,#8*4] @ 23 + @ ldr r1,[sp,#8*4] @ 23 ldr r12,[sp,#5*4] + mov r0,r1,ror#7 ldr r3,[sp,#7*4] - mov r0,r2,ror#7 - ldr r1,[sp,#0*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#0*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r9,ror#6 - str r3,[sp,#7*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r9,ror#11 eor r2,r10,r11 +#if 23>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 23==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 + str r3,[sp,#7*4] add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) add r3,r3,r4 @@ -786,6 +1097,9 @@ sha256_block_data_order: eor r4,r4,r5,ror#13 add r3,r3,r12 eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 23>=15 + ldr r1,[sp,#9*4] @ from BODY_16_xx +#endif orr r0,r5,r6 and r2,r5,r6 and r0,r0,r7 @@ -793,26 +1107,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r8,r8,r3 add r4,r4,r0 - ldr r2,[sp,#9*4] @ 24 + @ ldr r1,[sp,#9*4] @ 24 ldr r12,[sp,#6*4] + mov r0,r1,ror#7 ldr r3,[sp,#8*4] - mov r0,r2,ror#7 - ldr r1,[sp,#1*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#1*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r8,ror#6 - str r3,[sp,#8*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r8,ror#11 eor r2,r9,r10 +#if 24>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 24==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 + str r3,[sp,#8*4] add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) add r3,r3,r11 @@ -821,6 +1143,9 @@ sha256_block_data_order: eor r11,r11,r4,ror#13 add r3,r3,r12 eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 24>=15 + ldr r1,[sp,#10*4] @ from BODY_16_xx +#endif orr r0,r4,r5 and r2,r4,r5 and r0,r0,r6 @@ -828,26 +1153,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r7,r7,r3 add r11,r11,r0 - ldr r2,[sp,#10*4] @ 25 + @ ldr r1,[sp,#10*4] @ 25 ldr r12,[sp,#7*4] + mov r0,r1,ror#7 ldr r3,[sp,#9*4] - mov r0,r2,ror#7 - ldr r1,[sp,#2*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#2*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r7,ror#6 - str r3,[sp,#9*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r7,ror#11 eor r2,r8,r9 +#if 25>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 25==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 + str r3,[sp,#9*4] add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) add r3,r3,r10 @@ -856,6 +1189,9 @@ sha256_block_data_order: eor r10,r10,r11,ror#13 add r3,r3,r12 eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 25>=15 + ldr r1,[sp,#11*4] @ from BODY_16_xx +#endif orr r0,r11,r4 and r2,r11,r4 and r0,r0,r5 @@ -863,26 +1199,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r6,r6,r3 add r10,r10,r0 - ldr r2,[sp,#11*4] @ 26 + @ ldr r1,[sp,#11*4] @ 26 ldr r12,[sp,#8*4] + mov r0,r1,ror#7 ldr r3,[sp,#10*4] - mov r0,r2,ror#7 - ldr r1,[sp,#3*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#3*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r6,ror#6 - str r3,[sp,#10*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r6,ror#11 eor r2,r7,r8 +#if 26>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 26==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 + str r3,[sp,#10*4] add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) add r3,r3,r9 @@ -891,6 +1235,9 @@ sha256_block_data_order: eor r9,r9,r10,ror#13 add r3,r3,r12 eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 26>=15 + ldr r1,[sp,#12*4] @ from BODY_16_xx +#endif orr r0,r10,r11 and r2,r10,r11 and r0,r0,r4 @@ -898,26 +1245,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r5,r5,r3 add r9,r9,r0 - ldr r2,[sp,#12*4] @ 27 + @ ldr r1,[sp,#12*4] @ 27 ldr r12,[sp,#9*4] + mov r0,r1,ror#7 ldr r3,[sp,#11*4] - mov r0,r2,ror#7 - ldr r1,[sp,#4*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#4*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r5,ror#6 - str r3,[sp,#11*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r5,ror#11 eor r2,r6,r7 +#if 27>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 27==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 + str r3,[sp,#11*4] add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) add r3,r3,r8 @@ -926,6 +1281,9 @@ sha256_block_data_order: eor r8,r8,r9,ror#13 add r3,r3,r12 eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 27>=15 + ldr r1,[sp,#13*4] @ from BODY_16_xx +#endif orr r0,r9,r10 and r2,r9,r10 and r0,r0,r11 @@ -933,26 +1291,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r4,r4,r3 add r8,r8,r0 - ldr r2,[sp,#13*4] @ 28 + @ ldr r1,[sp,#13*4] @ 28 ldr r12,[sp,#10*4] + mov r0,r1,ror#7 ldr r3,[sp,#12*4] - mov r0,r2,ror#7 - ldr r1,[sp,#5*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#5*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r4,ror#6 - str r3,[sp,#12*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r4,ror#11 eor r2,r5,r6 +#if 28>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 28==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 + str r3,[sp,#12*4] add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) add r3,r3,r7 @@ -961,6 +1327,9 @@ sha256_block_data_order: eor r7,r7,r8,ror#13 add r3,r3,r12 eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 28>=15 + ldr r1,[sp,#14*4] @ from BODY_16_xx +#endif orr r0,r8,r9 and r2,r8,r9 and r0,r0,r10 @@ -968,26 +1337,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r11,r11,r3 add r7,r7,r0 - ldr r2,[sp,#14*4] @ 29 + @ ldr r1,[sp,#14*4] @ 29 ldr r12,[sp,#11*4] + mov r0,r1,ror#7 ldr r3,[sp,#13*4] - mov r0,r2,ror#7 - ldr r1,[sp,#6*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#6*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r11,ror#6 - str r3,[sp,#13*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r11,ror#11 eor r2,r4,r5 +#if 29>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 29==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 + str r3,[sp,#13*4] add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) add r3,r3,r6 @@ -996,6 +1373,9 @@ sha256_block_data_order: eor r6,r6,r7,ror#13 add r3,r3,r12 eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 29>=15 + ldr r1,[sp,#15*4] @ from BODY_16_xx +#endif orr r0,r7,r8 and r2,r7,r8 and r0,r0,r9 @@ -1003,26 +1383,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r10,r10,r3 add r6,r6,r0 - ldr r2,[sp,#15*4] @ 30 + @ ldr r1,[sp,#15*4] @ 30 ldr r12,[sp,#12*4] + mov r0,r1,ror#7 ldr r3,[sp,#14*4] - mov r0,r2,ror#7 - ldr r1,[sp,#7*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#7*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r10,ror#6 - str r3,[sp,#14*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r10,ror#11 eor r2,r11,r4 +#if 30>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 30==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 + str r3,[sp,#14*4] add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) add r3,r3,r5 @@ -1031,6 +1419,9 @@ sha256_block_data_order: eor r5,r5,r6,ror#13 add r3,r3,r12 eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 30>=15 + ldr r1,[sp,#0*4] @ from BODY_16_xx +#endif orr r0,r6,r7 and r2,r6,r7 and r0,r0,r8 @@ -1038,26 +1429,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r9,r9,r3 add r5,r5,r0 - ldr r2,[sp,#0*4] @ 31 + @ ldr r1,[sp,#0*4] @ 31 ldr r12,[sp,#13*4] + mov r0,r1,ror#7 ldr r3,[sp,#15*4] - mov r0,r2,ror#7 - ldr r1,[sp,#8*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#8*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r9,ror#6 - str r3,[sp,#15*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r9,ror#11 eor r2,r10,r11 +#if 31>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 31==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 + str r3,[sp,#15*4] add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) add r3,r3,r4 @@ -1066,6 +1465,9 @@ sha256_block_data_order: eor r4,r4,r5,ror#13 add r3,r3,r12 eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 31>=15 + ldr r1,[sp,#1*4] @ from BODY_16_xx +#endif orr r0,r5,r6 and r2,r5,r6 and r0,r0,r7 @@ -1102,10 +1504,14 @@ sha256_block_data_order: bne .Loop add sp,sp,#19*4 @ destroy frame - ldmia sp!,{r4-r12,lr} +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r11,pc} +#else + ldmia sp!,{r4-r11,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif .size sha256_block_data_order,.-sha256_block_data_order .asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by " .align 2 -- cgit v1.2.3