diff options
author | Parménides GV <parmegv@sdf.org> | 2014-10-02 18:07:56 +0200 |
---|---|---|
committer | Parménides GV <parmegv@sdf.org> | 2014-10-02 18:07:56 +0200 |
commit | 914c5156b014970dde717b9a27c0c69f11cc7d98 (patch) | |
tree | cb15666fb01b0f0410327ae7aaa23df444ac3b4c /app/openssl/crypto/bn | |
parent | 22b7ee4614a2f47d55496de8a9b55040c0f4ba85 (diff) |
Binaries from r885 of ics-openvpn, ndk10b 32 bits.
We don't support 64 bits targets because of https://code.google.com/p/android/issues/detail?id=77004&thanks=77004&ts=1412248443.
Diffstat (limited to 'app/openssl/crypto/bn')
-rw-r--r-- | app/openssl/crypto/bn/asm/armv4-gf2m.S | 106 | ||||
-rw-r--r-- | app/openssl/crypto/bn/asm/armv4-gf2m.pl | 139 | ||||
-rw-r--r--[l---------] | app/openssl/crypto/bn/asm/armv4-mont.S | 580 | ||||
-rw-r--r-- | app/openssl/crypto/bn/asm/armv4-mont.pl | 483 | ||||
-rw-r--r-- | app/openssl/crypto/bn/asm/armv4-mont.s | 145 | ||||
-rw-r--r-- | app/openssl/crypto/bn/asm/mips3.S (renamed from app/openssl/crypto/bn/asm/mips3.s) | 0 | ||||
-rw-r--r-- | app/openssl/crypto/bn/asm/pa-risc2.S (renamed from app/openssl/crypto/bn/asm/pa-risc2.s) | 0 | ||||
-rw-r--r-- | app/openssl/crypto/bn/asm/pa-risc2W.S (renamed from app/openssl/crypto/bn/asm/pa-risc2W.s) | 0 | ||||
-rw-r--r-- | app/openssl/crypto/bn/bn_mont.c | 46 |
9 files changed, 1200 insertions, 299 deletions
diff --git a/app/openssl/crypto/bn/asm/armv4-gf2m.S b/app/openssl/crypto/bn/asm/armv4-gf2m.S index 038f0864..0fa25b26 100644 --- a/app/openssl/crypto/bn/asm/armv4-gf2m.S +++ b/app/openssl/crypto/bn/asm/armv4-gf2m.S @@ -5,31 +5,6 @@ #if __ARM_ARCH__>=7 .fpu neon - -.type mul_1x1_neon,%function -.align 5 -mul_1x1_neon: - vshl.u64 d2,d16,#8 @ q1-q3 are slided - vmull.p8 q0,d16,d17 @ a·bb - vshl.u64 d4,d16,#16 - vmull.p8 q1,d2,d17 @ a<<8·bb - vshl.u64 d6,d16,#24 - vmull.p8 q2,d4,d17 @ a<<16·bb - vshr.u64 d2,#8 - vmull.p8 q3,d6,d17 @ a<<24·bb - vshl.u64 d3,#24 - veor d0,d2 - vshr.u64 d4,#16 - veor d0,d3 - vshl.u64 d5,#16 - veor d0,d4 - vshr.u64 d6,#24 - veor d0,d5 - vshl.u64 d7,#8 - veor d0,d6 - veor d0,d7 - .word 0xe12fff1e -.size mul_1x1_neon,.-mul_1x1_neon #endif .type mul_1x1_ialu,%function .align 5 @@ -120,40 +95,53 @@ bn_GF2m_mul_2x2: tst r12,#1 beq .Lialu - veor d18,d18 - vmov.32 d19,r3,r3 @ two copies of b1 - vmov.32 d18[0],r1 @ a1 - - veor d20,d20 - vld1.32 d21[],[sp,:32] @ two copies of b0 - vmov.32 d20[0],r2 @ a0 - mov r12,lr - - vmov d16,d18 - vmov d17,d19 - bl mul_1x1_neon @ a1·b1 - vmov d22,d0 - - vmov d16,d20 - vmov d17,d21 - bl mul_1x1_neon @ a0·b0 - vmov d23,d0 - - veor d16,d20,d18 - veor d17,d21,d19 - veor d20,d23,d22 - bl mul_1x1_neon @ (a0+a1)·(b0+b1) - - veor d0,d20 @ (a0+a1)·(b0+b1)-a0·b0-a1·b1 - vshl.u64 d1,d0,#32 - vshr.u64 d0,d0,#32 - veor d23,d1 - veor d22,d0 - vst1.32 {d23[0]},[r0,:32]! - vst1.32 {d23[1]},[r0,:32]! - vst1.32 {d22[0]},[r0,:32]! - vst1.32 {d22[1]},[r0,:32] - bx r12 + ldr r12, [sp] @ 5th argument + vmov.32 d26, r2, r1 + vmov.32 d27, r12, r3 + vmov.i64 d28, #0x0000ffffffffffff + vmov.i64 d29, #0x00000000ffffffff + vmov.i64 d30, #0x000000000000ffff + + vext.8 d2, d26, d26, #1 @ A1 + vmull.p8 q1, d2, d27 @ F = A1*B + vext.8 d0, d27, d27, #1 @ B1 + vmull.p8 q0, d26, d0 @ E = A*B1 + vext.8 d4, d26, d26, #2 @ A2 + vmull.p8 q2, d4, d27 @ H = A2*B + vext.8 d16, d27, d27, #2 @ B2 + vmull.p8 q8, d26, d16 @ G = A*B2 + vext.8 d6, d26, d26, #3 @ A3 + veor q1, q1, q0 @ L = E + F + vmull.p8 q3, d6, d27 @ J = A3*B + vext.8 d0, d27, d27, #3 @ B3 + veor q2, q2, q8 @ M = G + H + vmull.p8 q0, d26, d0 @ I = A*B3 + veor d2, d2, d3 @ t0 = (L) (P0 + P1) << 8 + vand d3, d3, d28 + vext.8 d16, d27, d27, #4 @ B4 + veor d4, d4, d5 @ t1 = (M) (P2 + P3) << 16 + vand d5, d5, d29 + vmull.p8 q8, d26, d16 @ K = A*B4 + veor q3, q3, q0 @ N = I + J + veor d2, d2, d3 + veor d4, d4, d5 + veor d6, d6, d7 @ t2 = (N) (P4 + P5) << 24 + vand d7, d7, d30 + vext.8 q1, q1, q1, #15 + veor d16, d16, d17 @ t3 = (K) (P6 + P7) << 32 + vmov.i64 d17, #0 + vext.8 q2, q2, q2, #14 + veor d6, d6, d7 + vmull.p8 q0, d26, d27 @ D = A*B + vext.8 q8, q8, q8, #12 + vext.8 q3, q3, q3, #13 + veor q1, q1, q2 + veor q3, q3, q8 + veor q0, q0, q1 + veor q0, q0, q3 + + vst1.32 {q0}, [r0] + bx lr @ bx lr .align 4 .Lialu: #endif diff --git a/app/openssl/crypto/bn/asm/armv4-gf2m.pl b/app/openssl/crypto/bn/asm/armv4-gf2m.pl index 22ad1f85..3f1f4f67 100644 --- a/app/openssl/crypto/bn/asm/armv4-gf2m.pl +++ b/app/openssl/crypto/bn/asm/armv4-gf2m.pl @@ -20,14 +20,21 @@ # length, more for longer keys. Even though NEON 1x1 multiplication # runs in even less cycles, ~30, improvement is measurable only on # longer keys. One has to optimize code elsewhere to get NEON glow... +# +# April 2014 +# +# Double bn_GF2m_mul_2x2 performance by using algorithm from paper +# referred below, which improves ECDH and ECDSA verify benchmarks +# by 18-40%. +# +# Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software +# Polynomial Multiplication on ARM Processors using the NEON Engine. +# +# http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} open STDOUT,">$output"; -sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; } -sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; } -sub Q() { shift=~m|d([1-3]?[02468])|?"q".($1/2):""; } - $code=<<___; #include "arm_arch.h" @@ -36,31 +43,6 @@ $code=<<___; #if __ARM_ARCH__>=7 .fpu neon - -.type mul_1x1_neon,%function -.align 5 -mul_1x1_neon: - vshl.u64 `&Dlo("q1")`,d16,#8 @ q1-q3 are slided $a - vmull.p8 `&Q("d0")`,d16,d17 @ a·bb - vshl.u64 `&Dlo("q2")`,d16,#16 - vmull.p8 q1,`&Dlo("q1")`,d17 @ a<<8·bb - vshl.u64 `&Dlo("q3")`,d16,#24 - vmull.p8 q2,`&Dlo("q2")`,d17 @ a<<16·bb - vshr.u64 `&Dlo("q1")`,#8 - vmull.p8 q3,`&Dlo("q3")`,d17 @ a<<24·bb - vshl.u64 `&Dhi("q1")`,#24 - veor d0,`&Dlo("q1")` - vshr.u64 `&Dlo("q2")`,#16 - veor d0,`&Dhi("q1")` - vshl.u64 `&Dhi("q2")`,#16 - veor d0,`&Dlo("q2")` - vshr.u64 `&Dlo("q3")`,#24 - veor d0,`&Dhi("q2")` - vshl.u64 `&Dhi("q3")`,#8 - veor d0,`&Dlo("q3")` - veor d0,`&Dhi("q3")` - bx lr -.size mul_1x1_neon,.-mul_1x1_neon #endif ___ ################ @@ -159,8 +141,9 @@ ___ # void bn_GF2m_mul_2x2(BN_ULONG *r, # BN_ULONG a1,BN_ULONG a0, # BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0 - -($A1,$B1,$A0,$B0,$A1B1,$A0B0)=map("d$_",(18..23)); +{ +my ($r,$t0,$t1,$t2,$t3)=map("q$_",(0..3,8..12)); +my ($a,$b,$k48,$k32,$k16)=map("d$_",(26..31)); $code.=<<___; .global bn_GF2m_mul_2x2 @@ -173,44 +156,58 @@ bn_GF2m_mul_2x2: tst r12,#1 beq .Lialu - veor $A1,$A1 - vmov.32 $B1,r3,r3 @ two copies of b1 - vmov.32 ${A1}[0],r1 @ a1 - - veor $A0,$A0 - vld1.32 ${B0}[],[sp,:32] @ two copies of b0 - vmov.32 ${A0}[0],r2 @ a0 - mov r12,lr - - vmov d16,$A1 - vmov d17,$B1 - bl mul_1x1_neon @ a1·b1 - vmov $A1B1,d0 - - vmov d16,$A0 - vmov d17,$B0 - bl mul_1x1_neon @ a0·b0 - vmov $A0B0,d0 - - veor d16,$A0,$A1 - veor d17,$B0,$B1 - veor $A0,$A0B0,$A1B1 - bl mul_1x1_neon @ (a0+a1)·(b0+b1) - - veor d0,$A0 @ (a0+a1)·(b0+b1)-a0·b0-a1·b1 - vshl.u64 d1,d0,#32 - vshr.u64 d0,d0,#32 - veor $A0B0,d1 - veor $A1B1,d0 - vst1.32 {${A0B0}[0]},[r0,:32]! - vst1.32 {${A0B0}[1]},[r0,:32]! - vst1.32 {${A1B1}[0]},[r0,:32]! - vst1.32 {${A1B1}[1]},[r0,:32] - bx r12 + ldr r12, [sp] @ 5th argument + vmov.32 $a, r2, r1 + vmov.32 $b, r12, r3 + vmov.i64 $k48, #0x0000ffffffffffff + vmov.i64 $k32, #0x00000000ffffffff + vmov.i64 $k16, #0x000000000000ffff + + vext.8 $t0#lo, $a, $a, #1 @ A1 + vmull.p8 $t0, $t0#lo, $b @ F = A1*B + vext.8 $r#lo, $b, $b, #1 @ B1 + vmull.p8 $r, $a, $r#lo @ E = A*B1 + vext.8 $t1#lo, $a, $a, #2 @ A2 + vmull.p8 $t1, $t1#lo, $b @ H = A2*B + vext.8 $t3#lo, $b, $b, #2 @ B2 + vmull.p8 $t3, $a, $t3#lo @ G = A*B2 + vext.8 $t2#lo, $a, $a, #3 @ A3 + veor $t0, $t0, $r @ L = E + F + vmull.p8 $t2, $t2#lo, $b @ J = A3*B + vext.8 $r#lo, $b, $b, #3 @ B3 + veor $t1, $t1, $t3 @ M = G + H + vmull.p8 $r, $a, $r#lo @ I = A*B3 + veor $t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8 + vand $t0#hi, $t0#hi, $k48 + vext.8 $t3#lo, $b, $b, #4 @ B4 + veor $t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16 + vand $t1#hi, $t1#hi, $k32 + vmull.p8 $t3, $a, $t3#lo @ K = A*B4 + veor $t2, $t2, $r @ N = I + J + veor $t0#lo, $t0#lo, $t0#hi + veor $t1#lo, $t1#lo, $t1#hi + veor $t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24 + vand $t2#hi, $t2#hi, $k16 + vext.8 $t0, $t0, $t0, #15 + veor $t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32 + vmov.i64 $t3#hi, #0 + vext.8 $t1, $t1, $t1, #14 + veor $t2#lo, $t2#lo, $t2#hi + vmull.p8 $r, $a, $b @ D = A*B + vext.8 $t3, $t3, $t3, #12 + vext.8 $t2, $t2, $t2, #13 + veor $t0, $t0, $t1 + veor $t2, $t2, $t3 + veor $r, $r, $t0 + veor $r, $r, $t2 + + vst1.32 {$r}, [r0] + ret @ bx lr .align 4 .Lialu: #endif ___ +} $ret="r10"; # reassigned 1st argument $code.=<<___; stmdb sp!,{r4-r10,lr} @@ -272,7 +269,13 @@ $code.=<<___; .comm OPENSSL_armcap_P,4,4 ___ -$code =~ s/\`([^\`]*)\`/eval $1/gem; -$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 -print $code; +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/geo; + + s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or + s/\bret\b/bx lr/go or + s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4 + + print $_,"\n"; +} close STDOUT; # enforce flush diff --git a/app/openssl/crypto/bn/asm/armv4-mont.S b/app/openssl/crypto/bn/asm/armv4-mont.S index eb5cd951..fecae15e 120000..100644 --- a/app/openssl/crypto/bn/asm/armv4-mont.S +++ b/app/openssl/crypto/bn/asm/armv4-mont.S @@ -1 +1,579 @@ -armv4-mont.s
\ No newline at end of file +#include "arm_arch.h" + +.text +.code 32 + +#if __ARM_ARCH__>=7 +.align 5 +.LOPENSSL_armcap: +.word OPENSSL_armcap_P-bn_mul_mont +#endif + +.global bn_mul_mont +.type bn_mul_mont,%function + +.align 5 +bn_mul_mont: + ldr ip,[sp,#4] @ load num + stmdb sp!,{r0,r2} @ sp points at argument block +#if __ARM_ARCH__>=7 + tst ip,#7 + bne .Lialu + adr r0,bn_mul_mont + ldr r2,.LOPENSSL_armcap + ldr r0,[r0,r2] + tst r0,#1 @ NEON available? + ldmia sp, {r0,r2} + beq .Lialu + add sp,sp,#8 + b bn_mul8x_mont_neon +.align 4 +.Lialu: +#endif + cmp ip,#2 + mov r0,ip @ load num + movlt r0,#0 + addlt sp,sp,#2*4 + blt .Labrt + + stmdb sp!,{r4-r12,lr} @ save 10 registers + + mov r0,r0,lsl#2 @ rescale r0 for byte count + sub sp,sp,r0 @ alloca(4*num) + sub sp,sp,#4 @ +extra dword + sub r0,r0,#4 @ "num=num-1" + add r4,r2,r0 @ &bp[num-1] + + add r0,sp,r0 @ r0 to point at &tp[num-1] + ldr r8,[r0,#14*4] @ &n0 + ldr r2,[r2] @ bp[0] + ldr r5,[r1],#4 @ ap[0],ap++ + ldr r6,[r3],#4 @ np[0],np++ + ldr r8,[r8] @ *n0 + str r4,[r0,#15*4] @ save &bp[num] + + umull r10,r11,r5,r2 @ ap[0]*bp[0] + str r8,[r0,#14*4] @ save n0 value + mul r8,r10,r8 @ "tp[0]"*n0 + mov r12,#0 + umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" + mov r4,sp + +.L1st: + ldr r5,[r1],#4 @ ap[j],ap++ + mov r10,r11 + ldr r6,[r3],#4 @ np[j],np++ + mov r11,#0 + umlal r10,r11,r5,r2 @ ap[j]*bp[0] + mov r14,#0 + umlal r12,r14,r6,r8 @ np[j]*n0 + adds r12,r12,r10 + str r12,[r4],#4 @ tp[j-1]=,tp++ + adc r12,r14,#0 + cmp r4,r0 + bne .L1st + + adds r12,r12,r11 + ldr r4,[r0,#13*4] @ restore bp + mov r14,#0 + ldr r8,[r0,#14*4] @ restore n0 + adc r14,r14,#0 + str r12,[r0] @ tp[num-1]= + str r14,[r0,#4] @ tp[num]= + +.Louter: + sub r7,r0,sp @ "original" r0-1 value + sub r1,r1,r7 @ "rewind" ap to &ap[1] + ldr r2,[r4,#4]! @ *(++bp) + sub r3,r3,r7 @ "rewind" np to &np[1] + ldr r5,[r1,#-4] @ ap[0] + ldr r10,[sp] @ tp[0] + ldr r6,[r3,#-4] @ np[0] + ldr r7,[sp,#4] @ tp[1] + + mov r11,#0 + umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] + str r4,[r0,#13*4] @ save bp + mul r8,r10,r8 + mov r12,#0 + umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]" + mov r4,sp + +.Linner: + ldr r5,[r1],#4 @ ap[j],ap++ + adds r10,r11,r7 @ +=tp[j] + ldr r6,[r3],#4 @ np[j],np++ + mov r11,#0 + umlal r10,r11,r5,r2 @ ap[j]*bp[i] + mov r14,#0 + umlal r12,r14,r6,r8 @ np[j]*n0 + adc r11,r11,#0 + ldr r7,[r4,#8] @ tp[j+1] + adds r12,r12,r10 + str r12,[r4],#4 @ tp[j-1]=,tp++ + adc r12,r14,#0 + cmp r4,r0 + bne .Linner + + adds r12,r12,r11 + mov r14,#0 + ldr r4,[r0,#13*4] @ restore bp + adc r14,r14,#0 + ldr r8,[r0,#14*4] @ restore n0 + adds r12,r12,r7 + ldr r7,[r0,#15*4] @ restore &bp[num] + adc r14,r14,#0 + str r12,[r0] @ tp[num-1]= + str r14,[r0,#4] @ tp[num]= + + cmp r4,r7 + bne .Louter + + ldr r2,[r0,#12*4] @ pull rp + add r0,r0,#4 @ r0 to point at &tp[num] + sub r5,r0,sp @ "original" num value + mov r4,sp @ "rewind" r4 + mov r1,r4 @ "borrow" r1 + sub r3,r3,r5 @ "rewind" r3 to &np[0] + + subs r7,r7,r7 @ "clear" carry flag +.Lsub: ldr r7,[r4],#4 + ldr r6,[r3],#4 + sbcs r7,r7,r6 @ tp[j]-np[j] + str r7,[r2],#4 @ rp[j]= + teq r4,r0 @ preserve carry + bne .Lsub + sbcs r14,r14,#0 @ upmost carry + mov r4,sp @ "rewind" r4 + sub r2,r2,r5 @ "rewind" r2 + + and r1,r4,r14 + bic r3,r2,r14 + orr r1,r1,r3 @ ap=borrow?tp:rp + +.Lcopy: ldr r7,[r1],#4 @ copy or in-place refresh + str sp,[r4],#4 @ zap tp + str r7,[r2],#4 + cmp r4,r0 + bne .Lcopy + + add sp,r0,#4 @ skip over tp[num+1] + ldmia sp!,{r4-r12,lr} @ restore registers + add sp,sp,#2*4 @ skip over {r0,r2} + mov r0,#1 +.Labrt: +#if __ARM_ARCH__>=5 + bx lr @ .word 0xe12fff1e +#else + tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif +.size bn_mul_mont,.-bn_mul_mont +#if __ARM_ARCH__>=7 +.fpu neon + +.type bn_mul8x_mont_neon,%function +.align 5 +bn_mul8x_mont_neon: + mov ip,sp + stmdb sp!,{r4-r11} + vstmdb sp!,{d8-d15} @ ABI specification says so + ldmia ip,{r4-r5} @ load rest of parameter block + + sub r7,sp,#16 + vld1.32 {d28[0]}, [r2,:32]! + sub r7,r7,r5,lsl#4 + vld1.32 {d0-d3}, [r1]! @ can't specify :32 :-( + and r7,r7,#-64 + vld1.32 {d30[0]}, [r4,:32] + mov sp,r7 @ alloca + veor d8,d8,d8 + subs r8,r5,#8 + vzip.16 d28,d8 + + vmull.u32 q6,d28,d0[0] + vmull.u32 q7,d28,d0[1] + vmull.u32 q8,d28,d1[0] + vshl.i64 d10,d13,#16 + vmull.u32 q9,d28,d1[1] + + vadd.u64 d10,d10,d12 + veor d8,d8,d8 + vmul.u32 d29,d10,d30 + + vmull.u32 q10,d28,d2[0] + vld1.32 {d4-d7}, [r3]! + vmull.u32 q11,d28,d2[1] + vmull.u32 q12,d28,d3[0] + vzip.16 d29,d8 + vmull.u32 q13,d28,d3[1] + + bne .LNEON_1st + + @ special case for num=8, everything is in register bank... + + vmlal.u32 q6,d29,d4[0] + sub r9,r5,#1 + vmlal.u32 q7,d29,d4[1] + vmlal.u32 q8,d29,d5[0] + vmlal.u32 q9,d29,d5[1] + + vmlal.u32 q10,d29,d6[0] + vmov q5,q6 + vmlal.u32 q11,d29,d6[1] + vmov q6,q7 + vmlal.u32 q12,d29,d7[0] + vmov q7,q8 + vmlal.u32 q13,d29,d7[1] + vmov q8,q9 + vmov q9,q10 + vshr.u64 d10,d10,#16 + vmov q10,q11 + vmov q11,q12 + vadd.u64 d10,d10,d11 + vmov q12,q13 + veor q13,q13 + vshr.u64 d10,d10,#16 + + b .LNEON_outer8 + +.align 4 +.LNEON_outer8: + vld1.32 {d28[0]}, [r2,:32]! + veor d8,d8,d8 + vzip.16 d28,d8 + vadd.u64 d12,d12,d10 + + vmlal.u32 q6,d28,d0[0] + vmlal.u32 q7,d28,d0[1] + vmlal.u32 q8,d28,d1[0] + vshl.i64 d10,d13,#16 + vmlal.u32 q9,d28,d1[1] + + vadd.u64 d10,d10,d12 + veor d8,d8,d8 + subs r9,r9,#1 + vmul.u32 d29,d10,d30 + + vmlal.u32 q10,d28,d2[0] + vmlal.u32 q11,d28,d2[1] + vmlal.u32 q12,d28,d3[0] + vzip.16 d29,d8 + vmlal.u32 q13,d28,d3[1] + + vmlal.u32 q6,d29,d4[0] + vmlal.u32 q7,d29,d4[1] + vmlal.u32 q8,d29,d5[0] + vmlal.u32 q9,d29,d5[1] + + vmlal.u32 q10,d29,d6[0] + vmov q5,q6 + vmlal.u32 q11,d29,d6[1] + vmov q6,q7 + vmlal.u32 q12,d29,d7[0] + vmov q7,q8 + vmlal.u32 q13,d29,d7[1] + vmov q8,q9 + vmov q9,q10 + vshr.u64 d10,d10,#16 + vmov q10,q11 + vmov q11,q12 + vadd.u64 d10,d10,d11 + vmov q12,q13 + veor q13,q13 + vshr.u64 d10,d10,#16 + + bne .LNEON_outer8 + + vadd.u64 d12,d12,d10 + mov r7,sp + vshr.u64 d10,d12,#16 + mov r8,r5 + vadd.u64 d13,d13,d10 + add r6,sp,#16 + vshr.u64 d10,d13,#16 + vzip.16 d12,d13 + + b .LNEON_tail2 + +.align 4 +.LNEON_1st: + vmlal.u32 q6,d29,d4[0] + vld1.32 {d0-d3}, [r1]! + vmlal.u32 q7,d29,d4[1] + subs r8,r8,#8 + vmlal.u32 q8,d29,d5[0] + vmlal.u32 q9,d29,d5[1] + + vmlal.u32 q10,d29,d6[0] + vld1.32 {d4-d5}, [r3]! + vmlal.u32 q11,d29,d6[1] + vst1.64 {q6-q7}, [r7,:256]! + vmlal.u32 q12,d29,d7[0] + vmlal.u32 q13,d29,d7[1] + vst1.64 {q8-q9}, [r7,:256]! + + vmull.u32 q6,d28,d0[0] + vld1.32 {d6-d7}, [r3]! + vmull.u32 q7,d28,d0[1] + vst1.64 {q10-q11}, [r7,:256]! + vmull.u32 q8,d28,d1[0] + vmull.u32 q9,d28,d1[1] + vst1.64 {q12-q13}, [r7,:256]! + + vmull.u32 q10,d28,d2[0] + vmull.u32 q11,d28,d2[1] + vmull.u32 q12,d28,d3[0] + vmull.u32 q13,d28,d3[1] + + bne .LNEON_1st + + vmlal.u32 q6,d29,d4[0] + add r6,sp,#16 + vmlal.u32 q7,d29,d4[1] + sub r1,r1,r5,lsl#2 @ rewind r1 + vmlal.u32 q8,d29,d5[0] + vld1.64 {q5}, [sp,:128] + vmlal.u32 q9,d29,d5[1] + sub r9,r5,#1 + + vmlal.u32 q10,d29,d6[0] + vst1.64 {q6-q7}, [r7,:256]! + vmlal.u32 q11,d29,d6[1] + vshr.u64 d10,d10,#16 + vld1.64 {q6}, [r6, :128]! + vmlal.u32 q12,d29,d7[0] + vst1.64 {q8-q9}, [r7,:256]! + vmlal.u32 q13,d29,d7[1] + + vst1.64 {q10-q11}, [r7,:256]! + vadd.u64 d10,d10,d11 + veor q4,q4,q4 + vst1.64 {q12-q13}, [r7,:256]! + vld1.64 {q7-q8}, [r6, :256]! + vst1.64 {q4}, [r7,:128] + vshr.u64 d10,d10,#16 + + b .LNEON_outer + +.align 4 +.LNEON_outer: + vld1.32 {d28[0]}, [r2,:32]! + sub r3,r3,r5,lsl#2 @ rewind r3 + vld1.32 {d0-d3}, [r1]! + veor d8,d8,d8 + mov r7,sp + vzip.16 d28,d8 + sub r8,r5,#8 + vadd.u64 d12,d12,d10 + + vmlal.u32 q6,d28,d0[0] + vld1.64 {q9-q10},[r6,:256]! + vmlal.u32 q7,d28,d0[1] + vmlal.u32 q8,d28,d1[0] + vld1.64 {q11-q12},[r6,:256]! + vmlal.u32 q9,d28,d1[1] + + vshl.i64 d10,d13,#16 + veor d8,d8,d8 + vadd.u64 d10,d10,d12 + vld1.64 {q13},[r6,:128]! + vmul.u32 d29,d10,d30 + + vmlal.u32 q10,d28,d2[0] + vld1.32 {d4-d7}, [r3]! + vmlal.u32 q11,d28,d2[1] + vmlal.u32 q12,d28,d3[0] + vzip.16 d29,d8 + vmlal.u32 q13,d28,d3[1] + +.LNEON_inner: + vmlal.u32 q6,d29,d4[0] + vld1.32 {d0-d3}, [r1]! + vmlal.u32 q7,d29,d4[1] + subs r8,r8,#8 + vmlal.u32 q8,d29,d5[0] + vmlal.u32 q9,d29,d5[1] + vst1.64 {q6-q7}, [r7,:256]! + + vmlal.u32 q10,d29,d6[0] + vld1.64 {q6}, [r6, :128]! + vmlal.u32 q11,d29,d6[1] + vst1.64 {q8-q9}, [r7,:256]! + vmlal.u32 q12,d29,d7[0] + vld1.64 {q7-q8}, [r6, :256]! + vmlal.u32 q13,d29,d7[1] + vst1.64 {q10-q11}, [r7,:256]! + + vmlal.u32 q6,d28,d0[0] + vld1.64 {q9-q10}, [r6, :256]! + vmlal.u32 q7,d28,d0[1] + vst1.64 {q12-q13}, [r7,:256]! + vmlal.u32 q8,d28,d1[0] + vld1.64 {q11-q12}, [r6, :256]! + vmlal.u32 q9,d28,d1[1] + vld1.32 {d4-d7}, [r3]! + + vmlal.u32 q10,d28,d2[0] + vld1.64 {q13}, [r6, :128]! + vmlal.u32 q11,d28,d2[1] + vmlal.u32 q12,d28,d3[0] + vmlal.u32 q13,d28,d3[1] + + bne .LNEON_inner + + vmlal.u32 q6,d29,d4[0] + add r6,sp,#16 + vmlal.u32 q7,d29,d4[1] + sub r1,r1,r5,lsl#2 @ rewind r1 + vmlal.u32 q8,d29,d5[0] + vld1.64 {q5}, [sp,:128] + vmlal.u32 q9,d29,d5[1] + subs r9,r9,#1 + + vmlal.u32 q10,d29,d6[0] + vst1.64 {q6-q7}, [r7,:256]! + vmlal.u32 q11,d29,d6[1] + vld1.64 {q6}, [r6, :128]! + vshr.u64 d10,d10,#16 + vst1.64 {q8-q9}, [r7,:256]! + vmlal.u32 q12,d29,d7[0] + vld1.64 {q7-q8}, [r6, :256]! + vmlal.u32 q13,d29,d7[1] + + vst1.64 {q10-q11}, [r7,:256]! + vadd.u64 d10,d10,d11 + vst1.64 {q12-q13}, [r7,:256]! + vshr.u64 d10,d10,#16 + + bne .LNEON_outer + + mov r7,sp + mov r8,r5 + +.LNEON_tail: + vadd.u64 d12,d12,d10 + vld1.64 {q9-q10}, [r6, :256]! + vshr.u64 d10,d12,#16 + vadd.u64 d13,d13,d10 + vld1.64 {q11-q12}, [r6, :256]! + vshr.u64 d10,d13,#16 + vld1.64 {q13}, [r6, :128]! + vzip.16 d12,d13 + +.LNEON_tail2: + vadd.u64 d14,d14,d10 + vst1.32 {d12[0]}, [r7, :32]! + vshr.u64 d10,d14,#16 + vadd.u64 d15,d15,d10 + vshr.u64 d10,d15,#16 + vzip.16 d14,d15 + + vadd.u64 d16,d16,d10 + vst1.32 {d14[0]}, [r7, :32]! + vshr.u64 d10,d16,#16 + vadd.u64 d17,d17,d10 + vshr.u64 d10,d17,#16 + vzip.16 d16,d17 + + vadd.u64 d18,d18,d10 + vst1.32 {d16[0]}, [r7, :32]! + vshr.u64 d10,d18,#16 + vadd.u64 d19,d19,d10 + vshr.u64 d10,d19,#16 + vzip.16 d18,d19 + + vadd.u64 d20,d20,d10 + vst1.32 {d18[0]}, [r7, :32]! + vshr.u64 d10,d20,#16 + vadd.u64 d21,d21,d10 + vshr.u64 d10,d21,#16 + vzip.16 d20,d21 + + vadd.u64 d22,d22,d10 + vst1.32 {d20[0]}, [r7, :32]! + vshr.u64 d10,d22,#16 + vadd.u64 d23,d23,d10 + vshr.u64 d10,d23,#16 + vzip.16 d22,d23 + + vadd.u64 d24,d24,d10 + vst1.32 {d22[0]}, [r7, :32]! + vshr.u64 d10,d24,#16 + vadd.u64 d25,d25,d10 + vld1.64 {q6}, [r6, :128]! + vshr.u64 d10,d25,#16 + vzip.16 d24,d25 + + vadd.u64 d26,d26,d10 + vst1.32 {d24[0]}, [r7, :32]! + vshr.u64 d10,d26,#16 + vadd.u64 d27,d27,d10 + vld1.64 {q7-q8}, [r6, :256]! + vshr.u64 d10,d27,#16 + vzip.16 d26,d27 + subs r8,r8,#8 + vst1.32 {d26[0]}, [r7, :32]! + + bne .LNEON_tail + + vst1.32 {d10[0]}, [r7, :32] @ top-most bit + sub r3,r3,r5,lsl#2 @ rewind r3 + subs r1,sp,#0 @ clear carry flag + add r2,sp,r5,lsl#2 + +.LNEON_sub: + ldmia r1!, {r4-r7} + ldmia r3!, {r8-r11} + sbcs r8, r4,r8 + sbcs r9, r5,r9 + sbcs r10,r6,r10 + sbcs r11,r7,r11 + teq r1,r2 @ preserves carry + stmia r0!, {r8-r11} + bne .LNEON_sub + + ldr r10, [r1] @ load top-most bit + veor q0,q0,q0 + sub r11,r2,sp @ this is num*4 + veor q1,q1,q1 + mov r1,sp + sub r0,r0,r11 @ rewind r0 + mov r3,r2 @ second 3/4th of frame + sbcs r10,r10,#0 @ result is carry flag + +.LNEON_copy_n_zap: + ldmia r1!, {r4-r7} + ldmia r0, {r8-r11} + movcc r8, r4 + vst1.64 {q0-q1}, [r3,:256]! @ wipe + movcc r9, r5 + movcc r10,r6 + vst1.64 {q0-q1}, [r3,:256]! @ wipe + movcc r11,r7 + ldmia r1, {r4-r7} + stmia r0!, {r8-r11} + sub r1,r1,#16 + ldmia r0, {r8-r11} + movcc r8, r4 + vst1.64 {q0-q1}, [r1,:256]! @ wipe + movcc r9, r5 + movcc r10,r6 + vst1.64 {q0-q1}, [r3,:256]! @ wipe + movcc r11,r7 + teq r1,r2 @ preserves carry + stmia r0!, {r8-r11} + bne .LNEON_copy_n_zap + + sub sp,ip,#96 + vldmia sp!,{d8-d15} + ldmia sp!,{r4-r11} + bx lr @ .word 0xe12fff1e +.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon +#endif +.asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>" +.align 2 +#if __ARM_ARCH__>=7 +.comm OPENSSL_armcap_P,4,4 +#endif diff --git a/app/openssl/crypto/bn/asm/armv4-mont.pl b/app/openssl/crypto/bn/asm/armv4-mont.pl index f78a8b5f..72bad8e3 100644 --- a/app/openssl/crypto/bn/asm/armv4-mont.pl +++ b/app/openssl/crypto/bn/asm/armv4-mont.pl @@ -1,7 +1,7 @@ #!/usr/bin/env perl # ==================================================================== -# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. @@ -23,6 +23,21 @@ # than 1/2KB. Windows CE port would be trivial, as it's exclusively # about decorations, ABI and instruction syntax are identical. +# November 2013 +# +# Add NEON code path, which handles lengths divisible by 8. RSA/DSA +# performance improvement on Cortex-A8 is ~45-100% depending on key +# length, more for longer keys. On Cortex-A15 the span is ~10-105%. +# On Snapdragon S4 improvement was measured to vary from ~70% to +# incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is +# rather because original integer-only code seems to perform +# suboptimally on S4. Situation on Cortex-A9 is unfortunately +# different. It's being looked into, but the trouble is that +# performance for vectors longer than 256 bits is actually couple +# of percent worse than for integer-only code. The code is chosen +# for execution on all NEON-capable processors, because gain on +# others outweighs the marginal loss on Cortex-A9. + while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} open STDOUT,">$output"; @@ -52,16 +67,40 @@ $_n0="$num,#14*4"; $_num="$num,#15*4"; $_bpend=$_num; $code=<<___; +#include "arm_arch.h" + .text +.code 32 + +#if __ARM_ARCH__>=7 +.align 5 +.LOPENSSL_armcap: +.word OPENSSL_armcap_P-bn_mul_mont +#endif .global bn_mul_mont .type bn_mul_mont,%function -.align 2 +.align 5 bn_mul_mont: + ldr ip,[sp,#4] @ load num stmdb sp!,{r0,r2} @ sp points at argument block - ldr $num,[sp,#3*4] @ load num - cmp $num,#2 +#if __ARM_ARCH__>=7 + tst ip,#7 + bne .Lialu + adr r0,bn_mul_mont + ldr r2,.LOPENSSL_armcap + ldr r0,[r0,r2] + tst r0,#1 @ NEON available? + ldmia sp, {r0,r2} + beq .Lialu + add sp,sp,#8 + b bn_mul8x_mont_neon +.align 4 +.Lialu: +#endif + cmp ip,#2 + mov $num,ip @ load num movlt r0,#0 addlt sp,sp,#2*4 blt .Labrt @@ -191,14 +230,446 @@ bn_mul_mont: ldmia sp!,{r4-r12,lr} @ restore registers add sp,sp,#2*4 @ skip over {r0,r2} mov r0,#1 -.Labrt: tst lr,#1 +.Labrt: +#if __ARM_ARCH__>=5 + ret @ bx lr +#else + tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet bx lr @ interoperable with Thumb ISA:-) +#endif .size bn_mul_mont,.-bn_mul_mont -.asciz "Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro\@openssl.org>" +___ +{ +sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; } +sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; } + +my ($A0,$A1,$A2,$A3)=map("d$_",(0..3)); +my ($N0,$N1,$N2,$N3)=map("d$_",(4..7)); +my ($Z,$Temp)=("q4","q5"); +my ($A0xB,$A1xB,$A2xB,$A3xB,$A4xB,$A5xB,$A6xB,$A7xB)=map("q$_",(6..13)); +my ($Bi,$Ni,$M0)=map("d$_",(28..31)); +my $zero=&Dlo($Z); +my $temp=&Dlo($Temp); + +my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5)); +my ($tinptr,$toutptr,$inner,$outer)=map("r$_",(6..9)); + +$code.=<<___; +#if __ARM_ARCH__>=7 +.fpu neon + +.type bn_mul8x_mont_neon,%function +.align 5 +bn_mul8x_mont_neon: + mov ip,sp + stmdb sp!,{r4-r11} + vstmdb sp!,{d8-d15} @ ABI specification says so + ldmia ip,{r4-r5} @ load rest of parameter block + + sub $toutptr,sp,#16 + vld1.32 {${Bi}[0]}, [$bptr,:32]! + sub $toutptr,$toutptr,$num,lsl#4 + vld1.32 {$A0-$A3}, [$aptr]! @ can't specify :32 :-( + and $toutptr,$toutptr,#-64 + vld1.32 {${M0}[0]}, [$n0,:32] + mov sp,$toutptr @ alloca + veor $zero,$zero,$zero + subs $inner,$num,#8 + vzip.16 $Bi,$zero + + vmull.u32 $A0xB,$Bi,${A0}[0] + vmull.u32 $A1xB,$Bi,${A0}[1] + vmull.u32 $A2xB,$Bi,${A1}[0] + vshl.i64 $temp,`&Dhi("$A0xB")`,#16 + vmull.u32 $A3xB,$Bi,${A1}[1] + + vadd.u64 $temp,$temp,`&Dlo("$A0xB")` + veor $zero,$zero,$zero + vmul.u32 $Ni,$temp,$M0 + + vmull.u32 $A4xB,$Bi,${A2}[0] + vld1.32 {$N0-$N3}, [$nptr]! + vmull.u32 $A5xB,$Bi,${A2}[1] + vmull.u32 $A6xB,$Bi,${A3}[0] + vzip.16 $Ni,$zero + vmull.u32 $A7xB,$Bi,${A3}[1] + + bne .LNEON_1st + + @ special case for num=8, everything is in register bank... + + vmlal.u32 $A0xB,$Ni,${N0}[0] + sub $outer,$num,#1 + vmlal.u32 $A1xB,$Ni,${N0}[1] + vmlal.u32 $A2xB,$Ni,${N1}[0] + vmlal.u32 $A3xB,$Ni,${N1}[1] + + vmlal.u32 $A4xB,$Ni,${N2}[0] + vmov $Temp,$A0xB + vmlal.u32 $A5xB,$Ni,${N2}[1] + vmov $A0xB,$A1xB + vmlal.u32 $A6xB,$Ni,${N3}[0] + vmov $A1xB,$A2xB + vmlal.u32 $A7xB,$Ni,${N3}[1] + vmov $A2xB,$A3xB + vmov $A3xB,$A4xB + vshr.u64 $temp,$temp,#16 + vmov $A4xB,$A5xB + vmov $A5xB,$A6xB + vadd.u64 $temp,$temp,`&Dhi("$Temp")` + vmov $A6xB,$A7xB + veor $A7xB,$A7xB + vshr.u64 $temp,$temp,#16 + + b .LNEON_outer8 + +.align 4 +.LNEON_outer8: + vld1.32 {${Bi}[0]}, [$bptr,:32]! + veor $zero,$zero,$zero + vzip.16 $Bi,$zero + vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp + + vmlal.u32 $A0xB,$Bi,${A0}[0] + vmlal.u32 $A1xB,$Bi,${A0}[1] + vmlal.u32 $A2xB,$Bi,${A1}[0] + vshl.i64 $temp,`&Dhi("$A0xB")`,#16 + vmlal.u32 $A3xB,$Bi,${A1}[1] + + vadd.u64 $temp,$temp,`&Dlo("$A0xB")` + veor $zero,$zero,$zero + subs $outer,$outer,#1 + vmul.u32 $Ni,$temp,$M0 + + vmlal.u32 $A4xB,$Bi,${A2}[0] + vmlal.u32 $A5xB,$Bi,${A2}[1] + vmlal.u32 $A6xB,$Bi,${A3}[0] + vzip.16 $Ni,$zero + vmlal.u32 $A7xB,$Bi,${A3}[1] + + vmlal.u32 $A0xB,$Ni,${N0}[0] + vmlal.u32 $A1xB,$Ni,${N0}[1] + vmlal.u32 $A2xB,$Ni,${N1}[0] + vmlal.u32 $A3xB,$Ni,${N1}[1] + + vmlal.u32 $A4xB,$Ni,${N2}[0] + vmov $Temp,$A0xB + vmlal.u32 $A5xB,$Ni,${N2}[1] + vmov $A0xB,$A1xB + vmlal.u32 $A6xB,$Ni,${N3}[0] + vmov $A1xB,$A2xB + vmlal.u32 $A7xB,$Ni,${N3}[1] + vmov $A2xB,$A3xB + vmov $A3xB,$A4xB + vshr.u64 $temp,$temp,#16 + vmov $A4xB,$A5xB + vmov $A5xB,$A6xB + vadd.u64 $temp,$temp,`&Dhi("$Temp")` + vmov $A6xB,$A7xB + veor $A7xB,$A7xB + vshr.u64 $temp,$temp,#16 + + bne .LNEON_outer8 + + vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp + mov $toutptr,sp + vshr.u64 $temp,`&Dlo("$A0xB")`,#16 + mov $inner,$num + vadd.u64 `&Dhi("$A0xB")`,`&Dhi("$A0xB")`,$temp + add $tinptr,sp,#16 + vshr.u64 $temp,`&Dhi("$A0xB")`,#16 + vzip.16 `&Dlo("$A0xB")`,`&Dhi("$A0xB")` + + b .LNEON_tail2 + +.align 4 +.LNEON_1st: + vmlal.u32 $A0xB,$Ni,${N0}[0] + vld1.32 {$A0-$A3}, [$aptr]! + vmlal.u32 $A1xB,$Ni,${N0}[1] + subs $inner,$inner,#8 + vmlal.u32 $A2xB,$Ni,${N1}[0] + vmlal.u32 $A3xB,$Ni,${N1}[1] + + vmlal.u32 $A4xB,$Ni,${N2}[0] + vld1.32 {$N0-$N1}, [$nptr]! + vmlal.u32 $A5xB,$Ni,${N2}[1] + vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]! + vmlal.u32 $A6xB,$Ni,${N3}[0] + vmlal.u32 $A7xB,$Ni,${N3}[1] + vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]! + + vmull.u32 $A0xB,$Bi,${A0}[0] + vld1.32 {$N2-$N3}, [$nptr]! + vmull.u32 $A1xB,$Bi,${A0}[1] + vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]! + vmull.u32 $A2xB,$Bi,${A1}[0] + vmull.u32 $A3xB,$Bi,${A1}[1] + vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]! + + vmull.u32 $A4xB,$Bi,${A2}[0] + vmull.u32 $A5xB,$Bi,${A2}[1] + vmull.u32 $A6xB,$Bi,${A3}[0] + vmull.u32 $A7xB,$Bi,${A3}[1] + + bne .LNEON_1st + + vmlal.u32 $A0xB,$Ni,${N0}[0] + add $tinptr,sp,#16 + vmlal.u32 $A1xB,$Ni,${N0}[1] + sub $aptr,$aptr,$num,lsl#2 @ rewind $aptr + vmlal.u32 $A2xB,$Ni,${N1}[0] + vld1.64 {$Temp}, [sp,:128] + vmlal.u32 $A3xB,$Ni,${N1}[1] + sub $outer,$num,#1 + + vmlal.u32 $A4xB,$Ni,${N2}[0] + vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]! + vmlal.u32 $A5xB,$Ni,${N2}[1] + vshr.u64 $temp,$temp,#16 + vld1.64 {$A0xB}, [$tinptr, :128]! + vmlal.u32 $A6xB,$Ni,${N3}[0] + vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]! + vmlal.u32 $A7xB,$Ni,${N3}[1] + + vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]! + vadd.u64 $temp,$temp,`&Dhi("$Temp")` + veor $Z,$Z,$Z + vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]! + vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]! + vst1.64 {$Z}, [$toutptr,:128] + vshr.u64 $temp,$temp,#16 + + b .LNEON_outer + +.align 4 +.LNEON_outer: + vld1.32 {${Bi}[0]}, [$bptr,:32]! + sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr + vld1.32 {$A0-$A3}, [$aptr]! + veor $zero,$zero,$zero + mov $toutptr,sp + vzip.16 $Bi,$zero + sub $inner,$num,#8 + vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp + + vmlal.u32 $A0xB,$Bi,${A0}[0] + vld1.64 {$A3xB-$A4xB},[$tinptr,:256]! + vmlal.u32 $A1xB,$Bi,${A0}[1] + vmlal.u32 $A2xB,$Bi,${A1}[0] + vld1.64 {$A5xB-$A6xB},[$tinptr,:256]! + vmlal.u32 $A3xB,$Bi,${A1}[1] + + vshl.i64 $temp,`&Dhi("$A0xB")`,#16 + veor $zero,$zero,$zero + vadd.u64 $temp,$temp,`&Dlo("$A0xB")` + vld1.64 {$A7xB},[$tinptr,:128]! + vmul.u32 $Ni,$temp,$M0 + + vmlal.u32 $A4xB,$Bi,${A2}[0] + vld1.32 {$N0-$N3}, [$nptr]! + vmlal.u32 $A5xB,$Bi,${A2}[1] + vmlal.u32 $A6xB,$Bi,${A3}[0] + vzip.16 $Ni,$zero + vmlal.u32 $A7xB,$Bi,${A3}[1] + +.LNEON_inner: + vmlal.u32 $A0xB,$Ni,${N0}[0] + vld1.32 {$A0-$A3}, [$aptr]! + vmlal.u32 $A1xB,$Ni,${N0}[1] + subs $inner,$inner,#8 + vmlal.u32 $A2xB,$Ni,${N1}[0] + vmlal.u32 $A3xB,$Ni,${N1}[1] + vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]! + + vmlal.u32 $A4xB,$Ni,${N2}[0] + vld1.64 {$A0xB}, [$tinptr, :128]! + vmlal.u32 $A5xB,$Ni,${N2}[1] + vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]! + vmlal.u32 $A6xB,$Ni,${N3}[0] + vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]! + vmlal.u32 $A7xB,$Ni,${N3}[1] + vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]! + + vmlal.u32 $A0xB,$Bi,${A0}[0] + vld1.64 {$A3xB-$A4xB}, [$tinptr, :256]! + vmlal.u32 $A1xB,$Bi,${A0}[1] + vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]! + vmlal.u32 $A2xB,$Bi,${A1}[0] + vld1.64 {$A5xB-$A6xB}, [$tinptr, :256]! + vmlal.u32 $A3xB,$Bi,${A1}[1] + vld1.32 {$N0-$N3}, [$nptr]! + + vmlal.u32 $A4xB,$Bi,${A2}[0] + vld1.64 {$A7xB}, [$tinptr, :128]! + vmlal.u32 $A5xB,$Bi,${A2}[1] + vmlal.u32 $A6xB,$Bi,${A3}[0] + vmlal.u32 $A7xB,$Bi,${A3}[1] + + bne .LNEON_inner + + vmlal.u32 $A0xB,$Ni,${N0}[0] + add $tinptr,sp,#16 + vmlal.u32 $A1xB,$Ni,${N0}[1] + sub $aptr,$aptr,$num,lsl#2 @ rewind $aptr + vmlal.u32 $A2xB,$Ni,${N1}[0] + vld1.64 {$Temp}, [sp,:128] + vmlal.u32 $A3xB,$Ni,${N1}[1] + subs $outer,$outer,#1 + + vmlal.u32 $A4xB,$Ni,${N2}[0] + vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]! + vmlal.u32 $A5xB,$Ni,${N2}[1] + vld1.64 {$A0xB}, [$tinptr, :128]! + vshr.u64 $temp,$temp,#16 + vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]! + vmlal.u32 $A6xB,$Ni,${N3}[0] + vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]! + vmlal.u32 $A7xB,$Ni,${N3}[1] + + vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]! + vadd.u64 $temp,$temp,`&Dhi("$Temp")` + vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]! + vshr.u64 $temp,$temp,#16 + + bne .LNEON_outer + + mov $toutptr,sp + mov $inner,$num + +.LNEON_tail: + vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp + vld1.64 {$A3xB-$A4xB}, [$tinptr, :256]! + vshr.u64 $temp,`&Dlo("$A0xB")`,#16 + vadd.u64 `&Dhi("$A0xB")`,`&Dhi("$A0xB")`,$temp + vld1.64 {$A5xB-$A6xB}, [$tinptr, :256]! + vshr.u64 $temp,`&Dhi("$A0xB")`,#16 + vld1.64 {$A7xB}, [$tinptr, :128]! + vzip.16 `&Dlo("$A0xB")`,`&Dhi("$A0xB")` + +.LNEON_tail2: + vadd.u64 `&Dlo("$A1xB")`,`&Dlo("$A1xB")`,$temp + vst1.32 {`&Dlo("$A0xB")`[0]}, [$toutptr, :32]! + vshr.u64 $temp,`&Dlo("$A1xB")`,#16 + vadd.u64 `&Dhi("$A1xB")`,`&Dhi("$A1xB")`,$temp + vshr.u64 $temp,`&Dhi("$A1xB")`,#16 + vzip.16 `&Dlo("$A1xB")`,`&Dhi("$A1xB")` + + vadd.u64 `&Dlo("$A2xB")`,`&Dlo("$A2xB")`,$temp + vst1.32 {`&Dlo("$A1xB")`[0]}, [$toutptr, :32]! + vshr.u64 $temp,`&Dlo("$A2xB")`,#16 + vadd.u64 `&Dhi("$A2xB")`,`&Dhi("$A2xB")`,$temp + vshr.u64 $temp,`&Dhi("$A2xB")`,#16 + vzip.16 `&Dlo("$A2xB")`,`&Dhi("$A2xB")` + + vadd.u64 `&Dlo("$A3xB")`,`&Dlo("$A3xB")`,$temp + vst1.32 {`&Dlo("$A2xB")`[0]}, [$toutptr, :32]! + vshr.u64 $temp,`&Dlo("$A3xB")`,#16 + vadd.u64 `&Dhi("$A3xB")`,`&Dhi("$A3xB")`,$temp + vshr.u64 $temp,`&Dhi("$A3xB")`,#16 + vzip.16 `&Dlo("$A3xB")`,`&Dhi("$A3xB")` + + vadd.u64 `&Dlo("$A4xB")`,`&Dlo("$A4xB")`,$temp + vst1.32 {`&Dlo("$A3xB")`[0]}, [$toutptr, :32]! + vshr.u64 $temp,`&Dlo("$A4xB")`,#16 + vadd.u64 `&Dhi("$A4xB")`,`&Dhi("$A4xB")`,$temp + vshr.u64 $temp,`&Dhi("$A4xB")`,#16 + vzip.16 `&Dlo("$A4xB")`,`&Dhi("$A4xB")` + + vadd.u64 `&Dlo("$A5xB")`,`&Dlo("$A5xB")`,$temp + vst1.32 {`&Dlo("$A4xB")`[0]}, [$toutptr, :32]! + vshr.u64 $temp,`&Dlo("$A5xB")`,#16 + vadd.u64 `&Dhi("$A5xB")`,`&Dhi("$A5xB")`,$temp + vshr.u64 $temp,`&Dhi("$A5xB")`,#16 + vzip.16 `&Dlo("$A5xB")`,`&Dhi("$A5xB")` + + vadd.u64 `&Dlo("$A6xB")`,`&Dlo("$A6xB")`,$temp + vst1.32 {`&Dlo("$A5xB")`[0]}, [$toutptr, :32]! + vshr.u64 $temp,`&Dlo("$A6xB")`,#16 + vadd.u64 `&Dhi("$A6xB")`,`&Dhi("$A6xB")`,$temp + vld1.64 {$A0xB}, [$tinptr, :128]! + vshr.u64 $temp,`&Dhi("$A6xB")`,#16 + vzip.16 `&Dlo("$A6xB")`,`&Dhi("$A6xB")` + + vadd.u64 `&Dlo("$A7xB")`,`&Dlo("$A7xB")`,$temp + vst1.32 {`&Dlo("$A6xB")`[0]}, [$toutptr, :32]! + vshr.u64 $temp,`&Dlo("$A7xB")`,#16 + vadd.u64 `&Dhi("$A7xB")`,`&Dhi("$A7xB")`,$temp + vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]! + vshr.u64 $temp,`&Dhi("$A7xB")`,#16 + vzip.16 `&Dlo("$A7xB")`,`&Dhi("$A7xB")` + subs $inner,$inner,#8 + vst1.32 {`&Dlo("$A7xB")`[0]}, [$toutptr, :32]! + + bne .LNEON_tail + + vst1.32 {${temp}[0]}, [$toutptr, :32] @ top-most bit + sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr + subs $aptr,sp,#0 @ clear carry flag + add $bptr,sp,$num,lsl#2 + +.LNEON_sub: + ldmia $aptr!, {r4-r7} + ldmia $nptr!, {r8-r11} + sbcs r8, r4,r8 + sbcs r9, r5,r9 + sbcs r10,r6,r10 + sbcs r11,r7,r11 + teq $aptr,$bptr @ preserves carry + stmia $rptr!, {r8-r11} + bne .LNEON_sub + + ldr r10, [$aptr] @ load top-most bit + veor q0,q0,q0 + sub r11,$bptr,sp @ this is num*4 + veor q1,q1,q1 + mov $aptr,sp + sub $rptr,$rptr,r11 @ rewind $rptr + mov $nptr,$bptr @ second 3/4th of frame + sbcs r10,r10,#0 @ result is carry flag + +.LNEON_copy_n_zap: + ldmia $aptr!, {r4-r7} + ldmia $rptr, {r8-r11} + movcc r8, r4 + vst1.64 {q0-q1}, [$nptr,:256]! @ wipe + movcc r9, r5 + movcc r10,r6 + vst1.64 {q0-q1}, [$nptr,:256]! @ wipe + movcc r11,r7 + ldmia $aptr, {r4-r7} + stmia $rptr!, {r8-r11} + sub $aptr,$aptr,#16 + ldmia $rptr, {r8-r11} + movcc r8, r4 + vst1.64 {q0-q1}, [$aptr,:256]! @ wipe + movcc r9, r5 + movcc r10,r6 + vst1.64 {q0-q1}, [$nptr,:256]! @ wipe + movcc r11,r7 + teq $aptr,$bptr @ preserves carry + stmia $rptr!, {r8-r11} + bne .LNEON_copy_n_zap + + sub sp,ip,#96 + vldmia sp!,{d8-d15} + ldmia sp!,{r4-r11} + ret @ bx lr +.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon +#endif +___ +} +$code.=<<___; +.asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>" .align 2 +#if __ARM_ARCH__>=7 +.comm OPENSSL_armcap_P,4,4 +#endif ___ +$code =~ s/\`([^\`]*)\`/eval $1/gem; $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 +$code =~ s/\bret\b/bx lr/gm; print $code; close STDOUT; diff --git a/app/openssl/crypto/bn/asm/armv4-mont.s b/app/openssl/crypto/bn/asm/armv4-mont.s deleted file mode 100644 index 64c220b5..00000000 --- a/app/openssl/crypto/bn/asm/armv4-mont.s +++ /dev/null @@ -1,145 +0,0 @@ -.text - -.global bn_mul_mont -.type bn_mul_mont,%function - -.align 2 -bn_mul_mont: - stmdb sp!,{r0,r2} @ sp points at argument block - ldr r0,[sp,#3*4] @ load num - cmp r0,#2 - movlt r0,#0 - addlt sp,sp,#2*4 - blt .Labrt - - stmdb sp!,{r4-r12,lr} @ save 10 registers - - mov r0,r0,lsl#2 @ rescale r0 for byte count - sub sp,sp,r0 @ alloca(4*num) - sub sp,sp,#4 @ +extra dword - sub r0,r0,#4 @ "num=num-1" - add r4,r2,r0 @ &bp[num-1] - - add r0,sp,r0 @ r0 to point at &tp[num-1] - ldr r8,[r0,#14*4] @ &n0 - ldr r2,[r2] @ bp[0] - ldr r5,[r1],#4 @ ap[0],ap++ - ldr r6,[r3],#4 @ np[0],np++ - ldr r8,[r8] @ *n0 - str r4,[r0,#15*4] @ save &bp[num] - - umull r10,r11,r5,r2 @ ap[0]*bp[0] - str r8,[r0,#14*4] @ save n0 value - mul r8,r10,r8 @ "tp[0]"*n0 - mov r12,#0 - umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" - mov r4,sp - -.L1st: - ldr r5,[r1],#4 @ ap[j],ap++ - mov r10,r11 - ldr r6,[r3],#4 @ np[j],np++ - mov r11,#0 - umlal r10,r11,r5,r2 @ ap[j]*bp[0] - mov r14,#0 - umlal r12,r14,r6,r8 @ np[j]*n0 - adds r12,r12,r10 - str r12,[r4],#4 @ tp[j-1]=,tp++ - adc r12,r14,#0 - cmp r4,r0 - bne .L1st - - adds r12,r12,r11 - ldr r4,[r0,#13*4] @ restore bp - mov r14,#0 - ldr r8,[r0,#14*4] @ restore n0 - adc r14,r14,#0 - str r12,[r0] @ tp[num-1]= - str r14,[r0,#4] @ tp[num]= - -.Louter: - sub r7,r0,sp @ "original" r0-1 value - sub r1,r1,r7 @ "rewind" ap to &ap[1] - ldr r2,[r4,#4]! @ *(++bp) - sub r3,r3,r7 @ "rewind" np to &np[1] - ldr r5,[r1,#-4] @ ap[0] - ldr r10,[sp] @ tp[0] - ldr r6,[r3,#-4] @ np[0] - ldr r7,[sp,#4] @ tp[1] - - mov r11,#0 - umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] - str r4,[r0,#13*4] @ save bp - mul r8,r10,r8 - mov r12,#0 - umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]" - mov r4,sp - -.Linner: - ldr r5,[r1],#4 @ ap[j],ap++ - adds r10,r11,r7 @ +=tp[j] - ldr r6,[r3],#4 @ np[j],np++ - mov r11,#0 - umlal r10,r11,r5,r2 @ ap[j]*bp[i] - mov r14,#0 - umlal r12,r14,r6,r8 @ np[j]*n0 - adc r11,r11,#0 - ldr r7,[r4,#8] @ tp[j+1] - adds r12,r12,r10 - str r12,[r4],#4 @ tp[j-1]=,tp++ - adc r12,r14,#0 - cmp r4,r0 - bne .Linner - - adds r12,r12,r11 - mov r14,#0 - ldr r4,[r0,#13*4] @ restore bp - adc r14,r14,#0 - ldr r8,[r0,#14*4] @ restore n0 - adds r12,r12,r7 - ldr r7,[r0,#15*4] @ restore &bp[num] - adc r14,r14,#0 - str r12,[r0] @ tp[num-1]= - str r14,[r0,#4] @ tp[num]= - - cmp r4,r7 - bne .Louter - - ldr r2,[r0,#12*4] @ pull rp - add r0,r0,#4 @ r0 to point at &tp[num] - sub r5,r0,sp @ "original" num value - mov r4,sp @ "rewind" r4 - mov r1,r4 @ "borrow" r1 - sub r3,r3,r5 @ "rewind" r3 to &np[0] - - subs r7,r7,r7 @ "clear" carry flag -.Lsub: ldr r7,[r4],#4 - ldr r6,[r3],#4 - sbcs r7,r7,r6 @ tp[j]-np[j] - str r7,[r2],#4 @ rp[j]= - teq r4,r0 @ preserve carry - bne .Lsub - sbcs r14,r14,#0 @ upmost carry - mov r4,sp @ "rewind" r4 - sub r2,r2,r5 @ "rewind" r2 - - and r1,r4,r14 - bic r3,r2,r14 - orr r1,r1,r3 @ ap=borrow?tp:rp - -.Lcopy: ldr r7,[r1],#4 @ copy or in-place refresh - str sp,[r4],#4 @ zap tp - str r7,[r2],#4 - cmp r4,r0 - bne .Lcopy - - add sp,r0,#4 @ skip over tp[num+1] - ldmia sp!,{r4-r12,lr} @ restore registers - add sp,sp,#2*4 @ skip over {r0,r2} - mov r0,#1 -.Labrt: tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -.size bn_mul_mont,.-bn_mul_mont -.asciz "Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro@openssl.org>" -.align 2 diff --git a/app/openssl/crypto/bn/asm/mips3.s b/app/openssl/crypto/bn/asm/mips3.S index dca4105c..dca4105c 100644 --- a/app/openssl/crypto/bn/asm/mips3.s +++ b/app/openssl/crypto/bn/asm/mips3.S diff --git a/app/openssl/crypto/bn/asm/pa-risc2.s b/app/openssl/crypto/bn/asm/pa-risc2.S index f3b16290..f3b16290 100644 --- a/app/openssl/crypto/bn/asm/pa-risc2.s +++ b/app/openssl/crypto/bn/asm/pa-risc2.S diff --git a/app/openssl/crypto/bn/asm/pa-risc2W.s b/app/openssl/crypto/bn/asm/pa-risc2W.S index a9954575..a9954575 100644 --- a/app/openssl/crypto/bn/asm/pa-risc2W.s +++ b/app/openssl/crypto/bn/asm/pa-risc2W.S diff --git a/app/openssl/crypto/bn/bn_mont.c b/app/openssl/crypto/bn/bn_mont.c index 427b5cf4..ee8532c7 100644 --- a/app/openssl/crypto/bn/bn_mont.c +++ b/app/openssl/crypto/bn/bn_mont.c @@ -478,32 +478,38 @@ BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to, BN_MONT_CTX *from) BN_MONT_CTX *BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, int lock, const BIGNUM *mod, BN_CTX *ctx) { - int got_write_lock = 0; BN_MONT_CTX *ret; CRYPTO_r_lock(lock); - if (!*pmont) + ret = *pmont; + CRYPTO_r_unlock(lock); + if (ret) + return ret; + + /* We don't want to serialise globally while doing our lazy-init math in + * BN_MONT_CTX_set. That punishes threads that are doing independent + * things. Instead, punish the case where more than one thread tries to + * lazy-init the same 'pmont', by having each do the lazy-init math work + * independently and only use the one from the thread that wins the race + * (the losers throw away the work they've done). */ + ret = BN_MONT_CTX_new(); + if (!ret) + return NULL; + if (!BN_MONT_CTX_set(ret, mod, ctx)) { - CRYPTO_r_unlock(lock); - CRYPTO_w_lock(lock); - got_write_lock = 1; + BN_MONT_CTX_free(ret); + return NULL; + } - if (!*pmont) - { - ret = BN_MONT_CTX_new(); - if (ret && !BN_MONT_CTX_set(ret, mod, ctx)) - BN_MONT_CTX_free(ret); - else - *pmont = ret; - } + /* The locked compare-and-set, after the local work is done. */ + CRYPTO_w_lock(lock); + if (*pmont) + { + BN_MONT_CTX_free(ret); + ret = *pmont; } - - ret = *pmont; - - if (got_write_lock) - CRYPTO_w_unlock(lock); else - CRYPTO_r_unlock(lock); - + *pmont = ret; + CRYPTO_w_unlock(lock); return ret; } |