diff options
Diffstat (limited to 'openssl/crypto/bn/asm')
33 files changed, 20158 insertions, 0 deletions
diff --git a/openssl/crypto/bn/asm/README b/openssl/crypto/bn/asm/README new file mode 100644 index 00000000..b0f3a68a --- /dev/null +++ b/openssl/crypto/bn/asm/README @@ -0,0 +1,27 @@ +<OBSOLETE> + +All assember in this directory are just version of the file +crypto/bn/bn_asm.c. + +Quite a few of these files are just the assember output from gcc since on +quite a few machines they are 2 times faster than the system compiler. + +For the x86, I have hand written assember because of the bad job all +compilers seem to do on it. This normally gives a 2 time speed up in the RSA +routines. + +For the DEC alpha, I also hand wrote the assember (except the division which +is just the output from the C compiler pasted on the end of the file). +On the 2 alpha C compilers I had access to, it was not possible to do +64b x 64b -> 128b calculations (both long and the long long data types +were 64 bits). So the hand assember gives access to the 128 bit result and +a 2 times speedup :-). + +There are 3 versions of assember for the HP PA-RISC. + +pa-risc.s is the origional one which works fine and generated using gcc :-) + +pa-risc2W.s and pa-risc2.s are 64 and 32-bit PA-RISC 2.0 implementations +by Chris Ruemmler from HP (with some help from the HP C compiler). + +</OBSOLETE> diff --git a/openssl/crypto/bn/asm/alpha-mont.pl b/openssl/crypto/bn/asm/alpha-mont.pl new file mode 100644 index 00000000..03596e20 --- /dev/null +++ b/openssl/crypto/bn/asm/alpha-mont.pl @@ -0,0 +1,321 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# On 21264 RSA sign performance improves by 70/35/20/15 percent for +# 512/1024/2048/4096 bit key lengths. This is against vendor compiler +# instructed to '-tune host' code with in-line assembler. Other +# benchmarks improve by 15-20%. To anchor it to something else, the +# code provides approximately the same performance per GHz as AMD64. +# I.e. if you compare 1GHz 21264 and 2GHz Opteron, you'll observe ~2x +# difference. + +# int bn_mul_mont( +$rp="a0"; # BN_ULONG *rp, +$ap="a1"; # const BN_ULONG *ap, +$bp="a2"; # const BN_ULONG *bp, +$np="a3"; # const BN_ULONG *np, +$n0="a4"; # const BN_ULONG *n0, +$num="a5"; # int num); + +$lo0="t0"; +$hi0="t1"; +$lo1="t2"; +$hi1="t3"; +$aj="t4"; +$bi="t5"; +$nj="t6"; +$tp="t7"; +$alo="t8"; +$ahi="t9"; +$nlo="t10"; +$nhi="t11"; +$tj="t12"; +$i="s3"; +$j="s4"; +$m1="s5"; + +$code=<<___; +#ifdef __linux__ +#include <asm/regdef.h> +#else +#include <asm.h> +#include <regdef.h> +#endif + +.text + +.set noat +.set noreorder + +.globl bn_mul_mont +.align 5 +.ent bn_mul_mont +bn_mul_mont: + lda sp,-48(sp) + stq ra,0(sp) + stq s3,8(sp) + stq s4,16(sp) + stq s5,24(sp) + stq fp,32(sp) + mov sp,fp + .mask 0x0400f000,-48 + .frame fp,48,ra + .prologue 0 + + .align 4 + .set reorder + sextl $num,$num + mov 0,v0 + cmplt $num,4,AT + bne AT,.Lexit + + ldq $hi0,0($ap) # ap[0] + s8addq $num,16,AT + ldq $aj,8($ap) + subq sp,AT,sp + ldq $bi,0($bp) # bp[0] + lda AT,-4096(zero) # mov -4096,AT + ldq $n0,0($n0) + and sp,AT,sp + + mulq $hi0,$bi,$lo0 + ldq $hi1,0($np) # np[0] + umulh $hi0,$bi,$hi0 + ldq $nj,8($np) + + mulq $lo0,$n0,$m1 + + mulq $hi1,$m1,$lo1 + umulh $hi1,$m1,$hi1 + + addq $lo1,$lo0,$lo1 + cmpult $lo1,$lo0,AT + addq $hi1,AT,$hi1 + + mulq $aj,$bi,$alo + mov 2,$j + umulh $aj,$bi,$ahi + mov sp,$tp + + mulq $nj,$m1,$nlo + s8addq $j,$ap,$aj + umulh $nj,$m1,$nhi + s8addq $j,$np,$nj +.align 4 +.L1st: + .set noreorder + ldq $aj,0($aj) + addl $j,1,$j + ldq $nj,0($nj) + lda $tp,8($tp) + + addq $alo,$hi0,$lo0 + mulq $aj,$bi,$alo + cmpult $lo0,$hi0,AT + addq $nlo,$hi1,$lo1 + + mulq $nj,$m1,$nlo + addq $ahi,AT,$hi0 + cmpult $lo1,$hi1,v0 + cmplt $j,$num,$tj + + umulh $aj,$bi,$ahi + addq $nhi,v0,$hi1 + addq $lo1,$lo0,$lo1 + s8addq $j,$ap,$aj + + umulh $nj,$m1,$nhi + cmpult $lo1,$lo0,v0 + addq $hi1,v0,$hi1 + s8addq $j,$np,$nj + + stq $lo1,-8($tp) + nop + unop + bne $tj,.L1st + .set reorder + + addq $alo,$hi0,$lo0 + addq $nlo,$hi1,$lo1 + cmpult $lo0,$hi0,AT + cmpult $lo1,$hi1,v0 + addq $ahi,AT,$hi0 + addq $nhi,v0,$hi1 + + addq $lo1,$lo0,$lo1 + cmpult $lo1,$lo0,v0 + addq $hi1,v0,$hi1 + + stq $lo1,0($tp) + + addq $hi1,$hi0,$hi1 + cmpult $hi1,$hi0,AT + stq $hi1,8($tp) + stq AT,16($tp) + + mov 1,$i +.align 4 +.Louter: + s8addq $i,$bp,$bi + ldq $hi0,0($ap) + ldq $aj,8($ap) + ldq $bi,0($bi) + ldq $hi1,0($np) + ldq $nj,8($np) + ldq $tj,0(sp) + + mulq $hi0,$bi,$lo0 + umulh $hi0,$bi,$hi0 + + addq $lo0,$tj,$lo0 + cmpult $lo0,$tj,AT + addq $hi0,AT,$hi0 + + mulq $lo0,$n0,$m1 + + mulq $hi1,$m1,$lo1 + umulh $hi1,$m1,$hi1 + + addq $lo1,$lo0,$lo1 + cmpult $lo1,$lo0,AT + mov 2,$j + addq $hi1,AT,$hi1 + + mulq $aj,$bi,$alo + mov sp,$tp + umulh $aj,$bi,$ahi + + mulq $nj,$m1,$nlo + s8addq $j,$ap,$aj + umulh $nj,$m1,$nhi +.align 4 +.Linner: + .set noreorder + ldq $tj,8($tp) #L0 + nop #U1 + ldq $aj,0($aj) #L1 + s8addq $j,$np,$nj #U0 + + ldq $nj,0($nj) #L0 + nop #U1 + addq $alo,$hi0,$lo0 #L1 + lda $tp,8($tp) + + mulq $aj,$bi,$alo #U1 + cmpult $lo0,$hi0,AT #L0 + addq $nlo,$hi1,$lo1 #L1 + addl $j,1,$j + + mulq $nj,$m1,$nlo #U1 + addq $ahi,AT,$hi0 #L0 + addq $lo0,$tj,$lo0 #L1 + cmpult $lo1,$hi1,v0 #U0 + + umulh $aj,$bi,$ahi #U1 + cmpult $lo0,$tj,AT #L0 + addq $lo1,$lo0,$lo1 #L1 + addq $nhi,v0,$hi1 #U0 + + umulh $nj,$m1,$nhi #U1 + s8addq $j,$ap,$aj #L0 + cmpult $lo1,$lo0,v0 #L1 + cmplt $j,$num,$tj #U0 # borrow $tj + + addq $hi0,AT,$hi0 #L0 + addq $hi1,v0,$hi1 #U1 + stq $lo1,-8($tp) #L1 + bne $tj,.Linner #U0 + .set reorder + + ldq $tj,8($tp) + addq $alo,$hi0,$lo0 + addq $nlo,$hi1,$lo1 + cmpult $lo0,$hi0,AT + cmpult $lo1,$hi1,v0 + addq $ahi,AT,$hi0 + addq $nhi,v0,$hi1 + + addq $lo0,$tj,$lo0 + cmpult $lo0,$tj,AT + addq $hi0,AT,$hi0 + + ldq $tj,16($tp) + addq $lo1,$lo0,$j + cmpult $j,$lo0,v0 + addq $hi1,v0,$hi1 + + addq $hi1,$hi0,$lo1 + stq $j,0($tp) + cmpult $lo1,$hi0,$hi1 + addq $lo1,$tj,$lo1 + cmpult $lo1,$tj,AT + addl $i,1,$i + addq $hi1,AT,$hi1 + stq $lo1,8($tp) + cmplt $i,$num,$tj # borrow $tj + stq $hi1,16($tp) + bne $tj,.Louter + + s8addq $num,sp,$tj # &tp[num] + mov $rp,$bp # put rp aside + mov sp,$tp + mov sp,$ap + mov 0,$hi0 # clear borrow bit + +.align 4 +.Lsub: ldq $lo0,0($tp) + ldq $lo1,0($np) + lda $tp,8($tp) + lda $np,8($np) + subq $lo0,$lo1,$lo1 # tp[i]-np[i] + cmpult $lo0,$lo1,AT + subq $lo1,$hi0,$lo0 + cmpult $lo1,$lo0,$hi0 + or $hi0,AT,$hi0 + stq $lo0,0($rp) + cmpult $tp,$tj,v0 + lda $rp,8($rp) + bne v0,.Lsub + + subq $hi1,$hi0,$hi0 # handle upmost overflow bit + mov sp,$tp + mov $bp,$rp # restore rp + + and sp,$hi0,$ap + bic $bp,$hi0,$bp + bis $bp,$ap,$ap # ap=borrow?tp:rp + +.align 4 +.Lcopy: ldq $aj,0($ap) # copy or in-place refresh + lda $tp,8($tp) + lda $rp,8($rp) + lda $ap,8($ap) + stq zero,-8($tp) # zap tp + cmpult $tp,$tj,AT + stq $aj,-8($rp) + bne AT,.Lcopy + mov 1,v0 + +.Lexit: + .set noreorder + mov fp,sp + /*ldq ra,0(sp)*/ + ldq s3,8(sp) + ldq s4,16(sp) + ldq s5,24(sp) + ldq fp,32(sp) + lda sp,48(sp) + ret (ra) +.end bn_mul_mont +.ascii "Montgomery Multiplication for Alpha, CRYPTOGAMS by <appro\@openssl.org>" +.align 2 +___ + +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/armv4-mont.pl b/openssl/crypto/bn/asm/armv4-mont.pl new file mode 100644 index 00000000..14e0d2d1 --- /dev/null +++ b/openssl/crypto/bn/asm/armv4-mont.pl @@ -0,0 +1,201 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# January 2007. + +# Montgomery multiplication for ARMv4. +# +# Performance improvement naturally varies among CPU implementations +# and compilers. The code was observed to provide +65-35% improvement +# [depending on key length, less for longer keys] on ARM920T, and +# +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code +# base and compiler generated code with in-lined umull and even umlal +# instructions. The latter means that this code didn't really have an +# "advantage" of utilizing some "secret" instruction. +# +# The code is interoperable with Thumb ISA and is rather compact, less +# than 1/2KB. Windows CE port would be trivial, as it's exclusively +# about decorations, ABI and instruction syntax are identical. + +$num="r0"; # starts as num argument, but holds &tp[num-1] +$ap="r1"; +$bp="r2"; $bi="r2"; $rp="r2"; +$np="r3"; +$tp="r4"; +$aj="r5"; +$nj="r6"; +$tj="r7"; +$n0="r8"; +########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer +$alo="r10"; # sl, gcc uses it to keep @GOT +$ahi="r11"; # fp +$nlo="r12"; # ip +########### # r13 is stack pointer +$nhi="r14"; # lr +########### # r15 is program counter + +#### argument block layout relative to &tp[num-1], a.k.a. $num +$_rp="$num,#12*4"; +# ap permanently resides in r1 +$_bp="$num,#13*4"; +# np permanently resides in r3 +$_n0="$num,#14*4"; +$_num="$num,#15*4"; $_bpend=$_num; + +$code=<<___; +.text + +.global bn_mul_mont +.type bn_mul_mont,%function + +.align 2 +bn_mul_mont: + stmdb sp!,{r0,r2} @ sp points at argument block + ldr $num,[sp,#3*4] @ load num + cmp $num,#2 + movlt r0,#0 + addlt sp,sp,#2*4 + blt .Labrt + + stmdb sp!,{r4-r12,lr} @ save 10 registers + + mov $num,$num,lsl#2 @ rescale $num for byte count + sub sp,sp,$num @ alloca(4*num) + sub sp,sp,#4 @ +extra dword + sub $num,$num,#4 @ "num=num-1" + add $tp,$bp,$num @ &bp[num-1] + + add $num,sp,$num @ $num to point at &tp[num-1] + ldr $n0,[$_n0] @ &n0 + ldr $bi,[$bp] @ bp[0] + ldr $aj,[$ap],#4 @ ap[0],ap++ + ldr $nj,[$np],#4 @ np[0],np++ + ldr $n0,[$n0] @ *n0 + str $tp,[$_bpend] @ save &bp[num] + + umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0] + str $n0,[$_n0] @ save n0 value + mul $n0,$alo,$n0 @ "tp[0]"*n0 + mov $nlo,#0 + umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]" + mov $tp,sp + +.L1st: + ldr $aj,[$ap],#4 @ ap[j],ap++ + mov $alo,$ahi + mov $ahi,#0 + umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0] + ldr $nj,[$np],#4 @ np[j],np++ + mov $nhi,#0 + umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 + adds $nlo,$nlo,$alo + str $nlo,[$tp],#4 @ tp[j-1]=,tp++ + adc $nlo,$nhi,#0 + cmp $tp,$num + bne .L1st + + adds $nlo,$nlo,$ahi + mov $nhi,#0 + adc $nhi,$nhi,#0 + ldr $tp,[$_bp] @ restore bp + str $nlo,[$num] @ tp[num-1]= + ldr $n0,[$_n0] @ restore n0 + str $nhi,[$num,#4] @ tp[num]= + +.Louter: + sub $tj,$num,sp @ "original" $num-1 value + sub $ap,$ap,$tj @ "rewind" ap to &ap[1] + sub $np,$np,$tj @ "rewind" np to &np[1] + ldr $bi,[$tp,#4]! @ *(++bp) + ldr $aj,[$ap,#-4] @ ap[0] + ldr $nj,[$np,#-4] @ np[0] + ldr $alo,[sp] @ tp[0] + ldr $tj,[sp,#4] @ tp[1] + + mov $ahi,#0 + umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0] + str $tp,[$_bp] @ save bp + mul $n0,$alo,$n0 + mov $nlo,#0 + umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]" + mov $tp,sp + +.Linner: + ldr $aj,[$ap],#4 @ ap[j],ap++ + adds $alo,$ahi,$tj @ +=tp[j] + mov $ahi,#0 + umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i] + ldr $nj,[$np],#4 @ np[j],np++ + mov $nhi,#0 + umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 + ldr $tj,[$tp,#8] @ tp[j+1] + adc $ahi,$ahi,#0 + adds $nlo,$nlo,$alo + str $nlo,[$tp],#4 @ tp[j-1]=,tp++ + adc $nlo,$nhi,#0 + cmp $tp,$num + bne .Linner + + adds $nlo,$nlo,$ahi + mov $nhi,#0 + adc $nhi,$nhi,#0 + adds $nlo,$nlo,$tj + adc $nhi,$nhi,#0 + ldr $tp,[$_bp] @ restore bp + ldr $tj,[$_bpend] @ restore &bp[num] + str $nlo,[$num] @ tp[num-1]= + ldr $n0,[$_n0] @ restore n0 + str $nhi,[$num,#4] @ tp[num]= + + cmp $tp,$tj + bne .Louter + + ldr $rp,[$_rp] @ pull rp + add $num,$num,#4 @ $num to point at &tp[num] + sub $aj,$num,sp @ "original" num value + mov $tp,sp @ "rewind" $tp + mov $ap,$tp @ "borrow" $ap + sub $np,$np,$aj @ "rewind" $np to &np[0] + + subs $tj,$tj,$tj @ "clear" carry flag +.Lsub: ldr $tj,[$tp],#4 + ldr $nj,[$np],#4 + sbcs $tj,$tj,$nj @ tp[j]-np[j] + str $tj,[$rp],#4 @ rp[j]= + teq $tp,$num @ preserve carry + bne .Lsub + sbcs $nhi,$nhi,#0 @ upmost carry + mov $tp,sp @ "rewind" $tp + sub $rp,$rp,$aj @ "rewind" $rp + + and $ap,$tp,$nhi + bic $np,$rp,$nhi + orr $ap,$ap,$np @ ap=borrow?tp:rp + +.Lcopy: ldr $tj,[$ap],#4 @ copy or in-place refresh + str sp,[$tp],#4 @ zap tp + str $tj,[$rp],#4 + cmp $tp,$num + bne .Lcopy + + add sp,$num,#4 @ skip over tp[num+1] + ldmia sp!,{r4-r12,lr} @ restore registers + add sp,sp,#2*4 @ skip over {r0,r2} + mov r0,#1 +.Labrt: tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + bx lr @ interoperable with Thumb ISA:-) +.size bn_mul_mont,.-bn_mul_mont +.asciz "Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro\@openssl.org>" +.align 2 +___ + +$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/armv4-mont.s b/openssl/crypto/bn/asm/armv4-mont.s new file mode 100644 index 00000000..0488455f --- /dev/null +++ b/openssl/crypto/bn/asm/armv4-mont.s @@ -0,0 +1,145 @@ +.text + +.global bn_mul_mont +.type bn_mul_mont,%function + +.align 2 +bn_mul_mont: + stmdb sp!,{r0,r2} @ sp points at argument block + ldr r0,[sp,#3*4] @ load num + cmp r0,#2 + movlt r0,#0 + addlt sp,sp,#2*4 + blt .Labrt + + stmdb sp!,{r4-r12,lr} @ save 10 registers + + mov r0,r0,lsl#2 @ rescale r0 for byte count + sub sp,sp,r0 @ alloca(4*num) + sub sp,sp,#4 @ +extra dword + sub r0,r0,#4 @ "num=num-1" + add r4,r2,r0 @ &bp[num-1] + + add r0,sp,r0 @ r0 to point at &tp[num-1] + ldr r8,[r0,#14*4] @ &n0 + ldr r2,[r2] @ bp[0] + ldr r5,[r1],#4 @ ap[0],ap++ + ldr r6,[r3],#4 @ np[0],np++ + ldr r8,[r8] @ *n0 + str r4,[r0,#15*4] @ save &bp[num] + + umull r10,r11,r5,r2 @ ap[0]*bp[0] + str r8,[r0,#14*4] @ save n0 value + mul r8,r10,r8 @ "tp[0]"*n0 + mov r12,#0 + umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" + mov r4,sp + +.L1st: + ldr r5,[r1],#4 @ ap[j],ap++ + mov r10,r11 + mov r11,#0 + umlal r10,r11,r5,r2 @ ap[j]*bp[0] + ldr r6,[r3],#4 @ np[j],np++ + mov r14,#0 + umlal r12,r14,r6,r8 @ np[j]*n0 + adds r12,r12,r10 + str r12,[r4],#4 @ tp[j-1]=,tp++ + adc r12,r14,#0 + cmp r4,r0 + bne .L1st + + adds r12,r12,r11 + mov r14,#0 + adc r14,r14,#0 + ldr r4,[r0,#13*4] @ restore bp + str r12,[r0] @ tp[num-1]= + ldr r8,[r0,#14*4] @ restore n0 + str r14,[r0,#4] @ tp[num]= + +.Louter: + sub r7,r0,sp @ "original" r0-1 value + sub r1,r1,r7 @ "rewind" ap to &ap[1] + sub r3,r3,r7 @ "rewind" np to &np[1] + ldr r2,[r4,#4]! @ *(++bp) + ldr r5,[r1,#-4] @ ap[0] + ldr r6,[r3,#-4] @ np[0] + ldr r10,[sp] @ tp[0] + ldr r7,[sp,#4] @ tp[1] + + mov r11,#0 + umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] + str r4,[r0,#13*4] @ save bp + mul r8,r10,r8 + mov r12,#0 + umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]" + mov r4,sp + +.Linner: + ldr r5,[r1],#4 @ ap[j],ap++ + adds r10,r11,r7 @ +=tp[j] + mov r11,#0 + umlal r10,r11,r5,r2 @ ap[j]*bp[i] + ldr r6,[r3],#4 @ np[j],np++ + mov r14,#0 + umlal r12,r14,r6,r8 @ np[j]*n0 + ldr r7,[r4,#8] @ tp[j+1] + adc r11,r11,#0 + adds r12,r12,r10 + str r12,[r4],#4 @ tp[j-1]=,tp++ + adc r12,r14,#0 + cmp r4,r0 + bne .Linner + + adds r12,r12,r11 + mov r14,#0 + adc r14,r14,#0 + adds r12,r12,r7 + adc r14,r14,#0 + ldr r4,[r0,#13*4] @ restore bp + ldr r7,[r0,#15*4] @ restore &bp[num] + str r12,[r0] @ tp[num-1]= + ldr r8,[r0,#14*4] @ restore n0 + str r14,[r0,#4] @ tp[num]= + + cmp r4,r7 + bne .Louter + + ldr r2,[r0,#12*4] @ pull rp + add r0,r0,#4 @ r0 to point at &tp[num] + sub r5,r0,sp @ "original" num value + mov r4,sp @ "rewind" r4 + mov r1,r4 @ "borrow" r1 + sub r3,r3,r5 @ "rewind" r3 to &np[0] + + subs r7,r7,r7 @ "clear" carry flag +.Lsub: ldr r7,[r4],#4 + ldr r6,[r3],#4 + sbcs r7,r7,r6 @ tp[j]-np[j] + str r7,[r2],#4 @ rp[j]= + teq r4,r0 @ preserve carry + bne .Lsub + sbcs r14,r14,#0 @ upmost carry + mov r4,sp @ "rewind" r4 + sub r2,r2,r5 @ "rewind" r2 + + and r1,r4,r14 + bic r3,r2,r14 + orr r1,r1,r3 @ ap=borrow?tp:rp + +.Lcopy: ldr r7,[r1],#4 @ copy or in-place refresh + str sp,[r4],#4 @ zap tp + str r7,[r2],#4 + cmp r4,r0 + bne .Lcopy + + add sp,r0,#4 @ skip over tp[num+1] + ldmia sp!,{r4-r12,lr} @ restore registers + add sp,sp,#2*4 @ skip over {r0,r2} + mov r0,#1 +.Labrt: tst lr,#1 + moveq pc,lr @ be binary compatible with V4, yet + .word 0xe12fff1e @ interoperable with Thumb ISA:-) +.size bn_mul_mont,.-bn_mul_mont +.asciz "Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro@openssl.org>" +.align 2 diff --git a/openssl/crypto/bn/asm/bn-586.pl b/openssl/crypto/bn/asm/bn-586.pl new file mode 100644 index 00000000..332ef3e9 --- /dev/null +++ b/openssl/crypto/bn/asm/bn-586.pl @@ -0,0 +1,774 @@ +#!/usr/local/bin/perl + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +push(@INC,"${dir}","${dir}../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],$0); + +$sse2=0; +for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); } + +&external_label("OPENSSL_ia32cap_P") if ($sse2); + +&bn_mul_add_words("bn_mul_add_words"); +&bn_mul_words("bn_mul_words"); +&bn_sqr_words("bn_sqr_words"); +&bn_div_words("bn_div_words"); +&bn_add_words("bn_add_words"); +&bn_sub_words("bn_sub_words"); +&bn_sub_part_words("bn_sub_part_words"); + +&asm_finish(); + +sub bn_mul_add_words + { + local($name)=@_; + + &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":""); + + $r="eax"; + $a="edx"; + $c="ecx"; + + if ($sse2) { + &picmeup("eax","OPENSSL_ia32cap_P"); + &bt(&DWP(0,"eax"),26); + &jnc(&label("maw_non_sse2")); + + &mov($r,&wparam(0)); + &mov($a,&wparam(1)); + &mov($c,&wparam(2)); + &movd("mm0",&wparam(3)); # mm0 = w + &pxor("mm1","mm1"); # mm1 = carry_in + &jmp(&label("maw_sse2_entry")); + + &set_label("maw_sse2_unrolled",16); + &movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0] + &paddq("mm1","mm3"); # mm1 = carry_in + r[0] + &movd("mm2",&DWP(0,$a,"",0)); # mm2 = a[0] + &pmuludq("mm2","mm0"); # mm2 = w*a[0] + &movd("mm4",&DWP(4,$a,"",0)); # mm4 = a[1] + &pmuludq("mm4","mm0"); # mm4 = w*a[1] + &movd("mm6",&DWP(8,$a,"",0)); # mm6 = a[2] + &pmuludq("mm6","mm0"); # mm6 = w*a[2] + &movd("mm7",&DWP(12,$a,"",0)); # mm7 = a[3] + &pmuludq("mm7","mm0"); # mm7 = w*a[3] + &paddq("mm1","mm2"); # mm1 = carry_in + r[0] + w*a[0] + &movd("mm3",&DWP(4,$r,"",0)); # mm3 = r[1] + &paddq("mm3","mm4"); # mm3 = r[1] + w*a[1] + &movd("mm5",&DWP(8,$r,"",0)); # mm5 = r[2] + &paddq("mm5","mm6"); # mm5 = r[2] + w*a[2] + &movd("mm4",&DWP(12,$r,"",0)); # mm4 = r[3] + &paddq("mm7","mm4"); # mm7 = r[3] + w*a[3] + &movd(&DWP(0,$r,"",0),"mm1"); + &movd("mm2",&DWP(16,$a,"",0)); # mm2 = a[4] + &pmuludq("mm2","mm0"); # mm2 = w*a[4] + &psrlq("mm1",32); # mm1 = carry0 + &movd("mm4",&DWP(20,$a,"",0)); # mm4 = a[5] + &pmuludq("mm4","mm0"); # mm4 = w*a[5] + &paddq("mm1","mm3"); # mm1 = carry0 + r[1] + w*a[1] + &movd("mm6",&DWP(24,$a,"",0)); # mm6 = a[6] + &pmuludq("mm6","mm0"); # mm6 = w*a[6] + &movd(&DWP(4,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry1 + &movd("mm3",&DWP(28,$a,"",0)); # mm3 = a[7] + &add($a,32); + &pmuludq("mm3","mm0"); # mm3 = w*a[7] + &paddq("mm1","mm5"); # mm1 = carry1 + r[2] + w*a[2] + &movd("mm5",&DWP(16,$r,"",0)); # mm5 = r[4] + &paddq("mm2","mm5"); # mm2 = r[4] + w*a[4] + &movd(&DWP(8,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry2 + &paddq("mm1","mm7"); # mm1 = carry2 + r[3] + w*a[3] + &movd("mm5",&DWP(20,$r,"",0)); # mm5 = r[5] + &paddq("mm4","mm5"); # mm4 = r[5] + w*a[5] + &movd(&DWP(12,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry3 + &paddq("mm1","mm2"); # mm1 = carry3 + r[4] + w*a[4] + &movd("mm5",&DWP(24,$r,"",0)); # mm5 = r[6] + &paddq("mm6","mm5"); # mm6 = r[6] + w*a[6] + &movd(&DWP(16,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry4 + &paddq("mm1","mm4"); # mm1 = carry4 + r[5] + w*a[5] + &movd("mm5",&DWP(28,$r,"",0)); # mm5 = r[7] + &paddq("mm3","mm5"); # mm3 = r[7] + w*a[7] + &movd(&DWP(20,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry5 + &paddq("mm1","mm6"); # mm1 = carry5 + r[6] + w*a[6] + &movd(&DWP(24,$r,"",0),"mm1"); + &psrlq("mm1",32); # mm1 = carry6 + &paddq("mm1","mm3"); # mm1 = carry6 + r[7] + w*a[7] + &movd(&DWP(28,$r,"",0),"mm1"); + &lea($r,&DWP(32,$r)); + &psrlq("mm1",32); # mm1 = carry_out + + &sub($c,8); + &jz(&label("maw_sse2_exit")); + &set_label("maw_sse2_entry"); + &test($c,0xfffffff8); + &jnz(&label("maw_sse2_unrolled")); + + &set_label("maw_sse2_loop",4); + &movd("mm2",&DWP(0,$a)); # mm2 = a[i] + &movd("mm3",&DWP(0,$r)); # mm3 = r[i] + &pmuludq("mm2","mm0"); # a[i] *= w + &lea($a,&DWP(4,$a)); + &paddq("mm1","mm3"); # carry += r[i] + &paddq("mm1","mm2"); # carry += a[i]*w + &movd(&DWP(0,$r),"mm1"); # r[i] = carry_low + &sub($c,1); + &psrlq("mm1",32); # carry = carry_high + &lea($r,&DWP(4,$r)); + &jnz(&label("maw_sse2_loop")); + &set_label("maw_sse2_exit"); + &movd("eax","mm1"); # c = carry_out + &emms(); + &ret(); + + &set_label("maw_non_sse2",16); + } + + # function_begin prologue + &push("ebp"); + &push("ebx"); + &push("esi"); + &push("edi"); + + &comment(""); + $Low="eax"; + $High="edx"; + $a="ebx"; + $w="ebp"; + $r="edi"; + $c="esi"; + + &xor($c,$c); # clear carry + &mov($r,&wparam(0)); # + + &mov("ecx",&wparam(2)); # + &mov($a,&wparam(1)); # + + &and("ecx",0xfffffff8); # num / 8 + &mov($w,&wparam(3)); # + + &push("ecx"); # Up the stack for a tmp variable + + &jz(&label("maw_finish")); + + &set_label("maw_loop",16); + + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + + &mov("eax",&DWP($i,$a)); # *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+= c + &adc("edx",0); # H(t)+=carry + &add("eax",&DWP($i,$r)); # L(t)+= *r + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i,$r),"eax"); # *r= L(t); + &mov($c,"edx"); # c= H(t); + } + + &comment(""); + &sub("ecx",8); + &lea($a,&DWP(32,$a)); + &lea($r,&DWP(32,$r)); + &jnz(&label("maw_loop")); + + &set_label("maw_finish",0); + &mov("ecx",&wparam(2)); # get num + &and("ecx",7); + &jnz(&label("maw_finish2")); # helps branch prediction + &jmp(&label("maw_end")); + + &set_label("maw_finish2",1); + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a)); # *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + &adc("edx",0); # H(t)+=carry + &add("eax",&DWP($i*4,$r)); # L(t)+= *r + &adc("edx",0); # H(t)+=carry + &dec("ecx") if ($i != 7-1); + &mov(&DWP($i*4,$r),"eax"); # *r= L(t); + &mov($c,"edx"); # c= H(t); + &jz(&label("maw_end")) if ($i != 7-1); + } + &set_label("maw_end",0); + &mov("eax",$c); + + &pop("ecx"); # clear variable from + + &function_end($name); + } + +sub bn_mul_words + { + local($name)=@_; + + &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":""); + + $r="eax"; + $a="edx"; + $c="ecx"; + + if ($sse2) { + &picmeup("eax","OPENSSL_ia32cap_P"); + &bt(&DWP(0,"eax"),26); + &jnc(&label("mw_non_sse2")); + + &mov($r,&wparam(0)); + &mov($a,&wparam(1)); + &mov($c,&wparam(2)); + &movd("mm0",&wparam(3)); # mm0 = w + &pxor("mm1","mm1"); # mm1 = carry = 0 + + &set_label("mw_sse2_loop",16); + &movd("mm2",&DWP(0,$a)); # mm2 = a[i] + &pmuludq("mm2","mm0"); # a[i] *= w + &lea($a,&DWP(4,$a)); + &paddq("mm1","mm2"); # carry += a[i]*w + &movd(&DWP(0,$r),"mm1"); # r[i] = carry_low + &sub($c,1); + &psrlq("mm1",32); # carry = carry_high + &lea($r,&DWP(4,$r)); + &jnz(&label("mw_sse2_loop")); + + &movd("eax","mm1"); # return carry + &emms(); + &ret(); + &set_label("mw_non_sse2",16); + } + + # function_begin prologue + &push("ebp"); + &push("ebx"); + &push("esi"); + &push("edi"); + + &comment(""); + $Low="eax"; + $High="edx"; + $a="ebx"; + $w="ecx"; + $r="edi"; + $c="esi"; + $num="ebp"; + + &xor($c,$c); # clear carry + &mov($r,&wparam(0)); # + &mov($a,&wparam(1)); # + &mov($num,&wparam(2)); # + &mov($w,&wparam(3)); # + + &and($num,0xfffffff8); # num / 8 + &jz(&label("mw_finish")); + + &set_label("mw_loop",0); + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + + &mov("eax",&DWP($i,$a,"",0)); # *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + # XXX + + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t); + + &mov($c,"edx"); # c= H(t); + } + + &comment(""); + &add($a,32); + &add($r,32); + &sub($num,8); + &jz(&label("mw_finish")); + &jmp(&label("mw_loop")); + + &set_label("mw_finish",0); + &mov($num,&wparam(2)); # get num + &and($num,7); + &jnz(&label("mw_finish2")); + &jmp(&label("mw_end")); + + &set_label("mw_finish2",1); + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a,"",0));# *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + # XXX + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t); + &mov($c,"edx"); # c= H(t); + &dec($num) if ($i != 7-1); + &jz(&label("mw_end")) if ($i != 7-1); + } + &set_label("mw_end",0); + &mov("eax",$c); + + &function_end($name); + } + +sub bn_sqr_words + { + local($name)=@_; + + &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":""); + + $r="eax"; + $a="edx"; + $c="ecx"; + + if ($sse2) { + &picmeup("eax","OPENSSL_ia32cap_P"); + &bt(&DWP(0,"eax"),26); + &jnc(&label("sqr_non_sse2")); + + &mov($r,&wparam(0)); + &mov($a,&wparam(1)); + &mov($c,&wparam(2)); + + &set_label("sqr_sse2_loop",16); + &movd("mm0",&DWP(0,$a)); # mm0 = a[i] + &pmuludq("mm0","mm0"); # a[i] *= a[i] + &lea($a,&DWP(4,$a)); # a++ + &movq(&QWP(0,$r),"mm0"); # r[i] = a[i]*a[i] + &sub($c,1); + &lea($r,&DWP(8,$r)); # r += 2 + &jnz(&label("sqr_sse2_loop")); + + &emms(); + &ret(); + &set_label("sqr_non_sse2",16); + } + + # function_begin prologue + &push("ebp"); + &push("ebx"); + &push("esi"); + &push("edi"); + + &comment(""); + $r="esi"; + $a="edi"; + $num="ebx"; + + &mov($r,&wparam(0)); # + &mov($a,&wparam(1)); # + &mov($num,&wparam(2)); # + + &and($num,0xfffffff8); # num / 8 + &jz(&label("sw_finish")); + + &set_label("sw_loop",0); + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + &mov("eax",&DWP($i,$a,"",0)); # *a + # XXX + &mul("eax"); # *a * *a + &mov(&DWP($i*2,$r,"",0),"eax"); # + &mov(&DWP($i*2+4,$r,"",0),"edx");# + } + + &comment(""); + &add($a,32); + &add($r,64); + &sub($num,8); + &jnz(&label("sw_loop")); + + &set_label("sw_finish",0); + &mov($num,&wparam(2)); # get num + &and($num,7); + &jz(&label("sw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a,"",0)); # *a + # XXX + &mul("eax"); # *a * *a + &mov(&DWP($i*8,$r,"",0),"eax"); # + &dec($num) if ($i != 7-1); + &mov(&DWP($i*8+4,$r,"",0),"edx"); + &jz(&label("sw_end")) if ($i != 7-1); + } + &set_label("sw_end",0); + + &function_end($name); + } + +sub bn_div_words + { + local($name)=@_; + + &function_begin_B($name,""); + &mov("edx",&wparam(0)); # + &mov("eax",&wparam(1)); # + &mov("ecx",&wparam(2)); # + &div("ecx"); + &ret(); + &function_end_B($name); + } + +sub bn_add_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $a="esi"; + $b="edi"; + $c="eax"; + $r="ebx"; + $tmp1="ecx"; + $tmp2="edx"; + $num="ebp"; + + &mov($r,&wparam(0)); # get r + &mov($a,&wparam(1)); # get a + &mov($b,&wparam(2)); # get b + &mov($num,&wparam(3)); # get num + &xor($c,$c); # clear carry + &and($num,0xfffffff8); # num / 8 + + &jz(&label("aw_finish")); + + &set_label("aw_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &add($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &add($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($a,32); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("aw_loop")); + + &set_label("aw_finish",0); + &mov($num,&wparam(3)); # get num + &and($num,7); + &jz(&label("aw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0));# *b + &add($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &add($tmp1,$tmp2); + &adc($c,0); + &dec($num) if ($i != 6); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &jz(&label("aw_end")) if ($i != 6); + } + &set_label("aw_end",0); + +# &mov("eax",$c); # $c is "eax" + + &function_end($name); + } + +sub bn_sub_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $a="esi"; + $b="edi"; + $c="eax"; + $r="ebx"; + $tmp1="ecx"; + $tmp2="edx"; + $num="ebp"; + + &mov($r,&wparam(0)); # get r + &mov($a,&wparam(1)); # get a + &mov($b,&wparam(2)); # get b + &mov($num,&wparam(3)); # get num + &xor($c,$c); # clear carry + &and($num,0xfffffff8); # num / 8 + + &jz(&label("aw_finish")); + + &set_label("aw_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($a,32); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("aw_loop")); + + &set_label("aw_finish",0); + &mov($num,&wparam(3)); # get num + &and($num,7); + &jz(&label("aw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0));# *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &dec($num) if ($i != 6); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &jz(&label("aw_end")) if ($i != 6); + } + &set_label("aw_end",0); + +# &mov("eax",$c); # $c is "eax" + + &function_end($name); + } + +sub bn_sub_part_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $a="esi"; + $b="edi"; + $c="eax"; + $r="ebx"; + $tmp1="ecx"; + $tmp2="edx"; + $num="ebp"; + + &mov($r,&wparam(0)); # get r + &mov($a,&wparam(1)); # get a + &mov($b,&wparam(2)); # get b + &mov($num,&wparam(3)); # get num + &xor($c,$c); # clear carry + &and($num,0xfffffff8); # num / 8 + + &jz(&label("aw_finish")); + + &set_label("aw_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($a,32); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("aw_loop")); + + &set_label("aw_finish",0); + &mov($num,&wparam(3)); # get num + &and($num,7); + &jz(&label("aw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov($tmp1,&DWP(0,$a,"",0)); # *a + &mov($tmp2,&DWP(0,$b,"",0));# *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP(0,$r,"",0),$tmp1); # *r + &add($a, 4); + &add($b, 4); + &add($r, 4); + &dec($num) if ($i != 6); + &jz(&label("aw_end")) if ($i != 6); + } + &set_label("aw_end",0); + + &cmp(&wparam(4),0); + &je(&label("pw_end")); + + &mov($num,&wparam(4)); # get dl + &cmp($num,0); + &je(&label("pw_end")); + &jge(&label("pw_pos")); + + &comment("pw_neg"); + &mov($tmp2,0); + &sub($tmp2,$num); + &mov($num,$tmp2); + &and($num,0xfffffff8); # num / 8 + &jz(&label("pw_neg_finish")); + + &set_label("pw_neg_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("dl<0 Round $i"); + + &mov($tmp1,0); + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("pw_neg_loop")); + + &set_label("pw_neg_finish",0); + &mov($tmp2,&wparam(4)); # get dl + &mov($num,0); + &sub($num,$tmp2); + &and($num,7); + &jz(&label("pw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("dl<0 Tail Round $i"); + &mov($tmp1,0); + &mov($tmp2,&DWP($i*4,$b,"",0));# *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &dec($num) if ($i != 6); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &jz(&label("pw_end")) if ($i != 6); + } + + &jmp(&label("pw_end")); + + &set_label("pw_pos",0); + + &and($num,0xfffffff8); # num / 8 + &jz(&label("pw_pos_finish")); + + &set_label("pw_pos_loop",0); + + for ($i=0; $i<8; $i++) + { + &comment("dl>0 Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &sub($tmp1,$c); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &jnc(&label("pw_nc".$i)); + } + + &comment(""); + &add($a,32); + &add($r,32); + &sub($num,8); + &jnz(&label("pw_pos_loop")); + + &set_label("pw_pos_finish",0); + &mov($num,&wparam(4)); # get dl + &and($num,7); + &jz(&label("pw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("dl>0 Tail Round $i"); + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &sub($tmp1,$c); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &jnc(&label("pw_tail_nc".$i)); + &dec($num) if ($i != 6); + &jz(&label("pw_end")) if ($i != 6); + } + &mov($c,1); + &jmp(&label("pw_end")); + + &set_label("pw_nc_loop",0); + for ($i=0; $i<8; $i++) + { + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &set_label("pw_nc".$i,0); + } + + &comment(""); + &add($a,32); + &add($r,32); + &sub($num,8); + &jnz(&label("pw_nc_loop")); + + &mov($num,&wparam(4)); # get dl + &and($num,7); + &jz(&label("pw_nc_end")); + + for ($i=0; $i<7; $i++) + { + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + &set_label("pw_tail_nc".$i,0); + &dec($num) if ($i != 6); + &jz(&label("pw_nc_end")) if ($i != 6); + } + + &set_label("pw_nc_end",0); + &mov($c,0); + + &set_label("pw_end",0); + +# &mov("eax",$c); # $c is "eax" + + &function_end($name); + } + diff --git a/openssl/crypto/bn/asm/co-586.pl b/openssl/crypto/bn/asm/co-586.pl new file mode 100644 index 00000000..57101a6b --- /dev/null +++ b/openssl/crypto/bn/asm/co-586.pl @@ -0,0 +1,287 @@ +#!/usr/local/bin/perl + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +push(@INC,"${dir}","${dir}../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],$0); + +&bn_mul_comba("bn_mul_comba8",8); +&bn_mul_comba("bn_mul_comba4",4); +&bn_sqr_comba("bn_sqr_comba8",8); +&bn_sqr_comba("bn_sqr_comba4",4); + +&asm_finish(); + +sub mul_add_c + { + local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("mul a[$ai]*b[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$b,"",0)); + + &mul("edx"); + &add($c0,"eax"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # laod next a + &mov("eax",&wparam(0)) if $pos > 0; # load r[] + ### + &adc($c1,"edx"); + &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0; # laod next b + &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b + ### + &adc($c2,0); + # is pos > 1, it means it is the last loop + &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[]; + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a + } + +sub sqr_add_c + { + local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("sqr a[$ai]*a[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$b,"",0)); + + if ($ai == $bi) + { &mul("eax");} + else + { &mul("edx");} + &add($c0,"eax"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a + ### + &adc($c1,"edx"); + &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb); + ### + &adc($c2,0); + # is pos > 1, it means it is the last loop + &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[]; + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b + } + +sub sqr_add_c2 + { + local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("sqr a[$ai]*a[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$a,"",0)); + + if ($ai == $bi) + { &mul("eax");} + else + { &mul("edx");} + &add("eax","eax"); + ### + &adc("edx","edx"); + ### + &adc($c2,0); + &add($c0,"eax"); + &adc($c1,"edx"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b + &adc($c2,0); + &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[]; + &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb); + ### + } + +sub bn_mul_comba + { + local($name,$num)=@_; + local($a,$b,$c0,$c1,$c2); + local($i,$as,$ae,$bs,$be,$ai,$bi); + local($tot,$end); + + &function_begin_B($name,""); + + $c0="ebx"; + $c1="ecx"; + $c2="ebp"; + $a="esi"; + $b="edi"; + + $as=0; + $ae=0; + $bs=0; + $be=0; + $tot=$num+$num-1; + + &push("esi"); + &mov($a,&wparam(1)); + &push("edi"); + &mov($b,&wparam(2)); + &push("ebp"); + &push("ebx"); + + &xor($c0,$c0); + &mov("eax",&DWP(0,$a,"",0)); # load the first word + &xor($c1,$c1); + &mov("edx",&DWP(0,$b,"",0)); # load the first second + + for ($i=0; $i<$tot; $i++) + { + $ai=$as; + $bi=$bs; + $end=$be+1; + + &comment("################## Calculate word $i"); + + for ($j=$bs; $j<$end; $j++) + { + &xor($c2,$c2) if ($j == $bs); + if (($j+1) == $end) + { + $v=1; + $v=2 if (($i+1) == $tot); + } + else + { $v=0; } + if (($j+1) != $end) + { + $na=($ai-1); + $nb=($bi+1); + } + else + { + $na=$as+($i < ($num-1)); + $nb=$bs+($i >= ($num-1)); + } +#printf STDERR "[$ai,$bi] -> [$na,$nb]\n"; + &mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb); + if ($v) + { + &comment("saved r[$i]"); + # &mov("eax",&wparam(0)); + # &mov(&DWP($i*4,"eax","",0),$c0); + ($c0,$c1,$c2)=($c1,$c2,$c0); + } + $ai--; + $bi++; + } + $as++ if ($i < ($num-1)); + $ae++ if ($i >= ($num-1)); + + $bs++ if ($i >= ($num-1)); + $be++ if ($i < ($num-1)); + } + &comment("save r[$i]"); + # &mov("eax",&wparam(0)); + &mov(&DWP($i*4,"eax","",0),$c0); + + &pop("ebx"); + &pop("ebp"); + &pop("edi"); + &pop("esi"); + &ret(); + &function_end_B($name); + } + +sub bn_sqr_comba + { + local($name,$num)=@_; + local($r,$a,$c0,$c1,$c2)=@_; + local($i,$as,$ae,$bs,$be,$ai,$bi); + local($b,$tot,$end,$half); + + &function_begin_B($name,""); + + $c0="ebx"; + $c1="ecx"; + $c2="ebp"; + $a="esi"; + $r="edi"; + + &push("esi"); + &push("edi"); + &push("ebp"); + &push("ebx"); + &mov($r,&wparam(0)); + &mov($a,&wparam(1)); + &xor($c0,$c0); + &xor($c1,$c1); + &mov("eax",&DWP(0,$a,"",0)); # load the first word + + $as=0; + $ae=0; + $bs=0; + $be=0; + $tot=$num+$num-1; + + for ($i=0; $i<$tot; $i++) + { + $ai=$as; + $bi=$bs; + $end=$be+1; + + &comment("############### Calculate word $i"); + for ($j=$bs; $j<$end; $j++) + { + &xor($c2,$c2) if ($j == $bs); + if (($ai-1) < ($bi+1)) + { + $v=1; + $v=2 if ($i+1) == $tot; + } + else + { $v=0; } + if (!$v) + { + $na=$ai-1; + $nb=$bi+1; + } + else + { + $na=$as+($i < ($num-1)); + $nb=$bs+($i >= ($num-1)); + } + if ($ai == $bi) + { + &sqr_add_c($r,$a,$ai,$bi, + $c0,$c1,$c2,$v,$i,$na,$nb); + } + else + { + &sqr_add_c2($r,$a,$ai,$bi, + $c0,$c1,$c2,$v,$i,$na,$nb); + } + if ($v) + { + &comment("saved r[$i]"); + #&mov(&DWP($i*4,$r,"",0),$c0); + ($c0,$c1,$c2)=($c1,$c2,$c0); + last; + } + $ai--; + $bi++; + } + $as++ if ($i < ($num-1)); + $ae++ if ($i >= ($num-1)); + + $bs++ if ($i >= ($num-1)); + $be++ if ($i < ($num-1)); + } + &mov(&DWP($i*4,$r,"",0),$c0); + &pop("ebx"); + &pop("ebp"); + &pop("edi"); + &pop("esi"); + &ret(); + &function_end_B($name); + } diff --git a/openssl/crypto/bn/asm/ia64.S b/openssl/crypto/bn/asm/ia64.S new file mode 100644 index 00000000..951abc53 --- /dev/null +++ b/openssl/crypto/bn/asm/ia64.S @@ -0,0 +1,1555 @@ +.explicit +.text +.ident "ia64.S, Version 2.1" +.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>" + +// +// ==================================================================== +// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +// project. +// +// Rights for redistribution and usage in source and binary forms are +// granted according to the OpenSSL license. Warranty of any kind is +// disclaimed. +// ==================================================================== +// +// Version 2.x is Itanium2 re-tune. Few words about how Itanum2 is +// different from Itanium to this module viewpoint. Most notably, is it +// "wider" than Itanium? Can you experience loop scalability as +// discussed in commentary sections? Not really:-( Itanium2 has 6 +// integer ALU ports, i.e. it's 2 ports wider, but it's not enough to +// spin twice as fast, as I need 8 IALU ports. Amount of floating point +// ports is the same, i.e. 2, while I need 4. In other words, to this +// module Itanium2 remains effectively as "wide" as Itanium. Yet it's +// essentially different in respect to this module, and a re-tune was +// required. Well, because some intruction latencies has changed. Most +// noticeably those intensively used: +// +// Itanium Itanium2 +// ldf8 9 6 L2 hit +// ld8 2 1 L1 hit +// getf 2 5 +// xma[->getf] 7[+1] 4[+0] +// add[->st8] 1[+1] 1[+0] +// +// What does it mean? You might ratiocinate that the original code +// should run just faster... Because sum of latencies is smaller... +// Wrong! Note that getf latency increased. This means that if a loop is +// scheduled for lower latency (as they were), then it will suffer from +// stall condition and the code will therefore turn anti-scalable, e.g. +// original bn_mul_words spun at 5*n or 2.5 times slower than expected +// on Itanium2! What to do? Reschedule loops for Itanium2? But then +// Itanium would exhibit anti-scalability. So I've chosen to reschedule +// for worst latency for every instruction aiming for best *all-round* +// performance. + +// Q. How much faster does it get? +// A. Here is the output from 'openssl speed rsa dsa' for vanilla +// 0.9.6a compiled with gcc version 2.96 20000731 (Red Hat +// Linux 7.1 2.96-81): +// +// sign verify sign/s verify/s +// rsa 512 bits 0.0036s 0.0003s 275.3 2999.2 +// rsa 1024 bits 0.0203s 0.0011s 49.3 894.1 +// rsa 2048 bits 0.1331s 0.0040s 7.5 250.9 +// rsa 4096 bits 0.9270s 0.0147s 1.1 68.1 +// sign verify sign/s verify/s +// dsa 512 bits 0.0035s 0.0043s 288.3 234.8 +// dsa 1024 bits 0.0111s 0.0135s 90.0 74.2 +// +// And here is similar output but for this assembler +// implementation:-) +// +// sign verify sign/s verify/s +// rsa 512 bits 0.0021s 0.0001s 549.4 9638.5 +// rsa 1024 bits 0.0055s 0.0002s 183.8 4481.1 +// rsa 2048 bits 0.0244s 0.0006s 41.4 1726.3 +// rsa 4096 bits 0.1295s 0.0018s 7.7 561.5 +// sign verify sign/s verify/s +// dsa 512 bits 0.0012s 0.0013s 891.9 756.6 +// dsa 1024 bits 0.0023s 0.0028s 440.4 376.2 +// +// Yes, you may argue that it's not fair comparison as it's +// possible to craft the C implementation with BN_UMULT_HIGH +// inline assembler macro. But of course! Here is the output +// with the macro: +// +// sign verify sign/s verify/s +// rsa 512 bits 0.0020s 0.0002s 495.0 6561.0 +// rsa 1024 bits 0.0086s 0.0004s 116.2 2235.7 +// rsa 2048 bits 0.0519s 0.0015s 19.3 667.3 +// rsa 4096 bits 0.3464s 0.0053s 2.9 187.7 +// sign verify sign/s verify/s +// dsa 512 bits 0.0016s 0.0020s 613.1 510.5 +// dsa 1024 bits 0.0045s 0.0054s 221.0 183.9 +// +// My code is still way faster, huh:-) And I believe that even +// higher performance can be achieved. Note that as keys get +// longer, performance gain is larger. Why? According to the +// profiler there is another player in the field, namely +// BN_from_montgomery consuming larger and larger portion of CPU +// time as keysize decreases. I therefore consider putting effort +// to assembler implementation of the following routine: +// +// void bn_mul_add_mont (BN_ULONG *rp,BN_ULONG *np,int nl,BN_ULONG n0) +// { +// int i,j; +// BN_ULONG v; +// +// for (i=0; i<nl; i++) +// { +// v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2); +// nrp++; +// rp++; +// if (((nrp[-1]+=v)&BN_MASK2) < v) +// for (j=0; ((++nrp[j])&BN_MASK2) == 0; j++) ; +// } +// } +// +// It might as well be beneficial to implement even combaX +// variants, as it appears as it can literally unleash the +// performance (see comment section to bn_mul_comba8 below). +// +// And finally for your reference the output for 0.9.6a compiled +// with SGIcc version 0.01.0-12 (keep in mind that for the moment +// of this writing it's not possible to convince SGIcc to use +// BN_UMULT_HIGH inline assembler macro, yet the code is fast, +// i.e. for a compiler generated one:-): +// +// sign verify sign/s verify/s +// rsa 512 bits 0.0022s 0.0002s 452.7 5894.3 +// rsa 1024 bits 0.0097s 0.0005s 102.7 2002.9 +// rsa 2048 bits 0.0578s 0.0017s 17.3 600.2 +// rsa 4096 bits 0.3838s 0.0061s 2.6 164.5 +// sign verify sign/s verify/s +// dsa 512 bits 0.0018s 0.0022s 547.3 459.6 +// dsa 1024 bits 0.0051s 0.0062s 196.6 161.3 +// +// Oh! Benchmarks were performed on 733MHz Lion-class Itanium +// system running Redhat Linux 7.1 (very special thanks to Ray +// McCaffity of Williams Communications for providing an account). +// +// Q. What's the heck with 'rum 1<<5' at the end of every function? +// A. Well, by clearing the "upper FP registers written" bit of the +// User Mask I want to excuse the kernel from preserving upper +// (f32-f128) FP register bank over process context switch, thus +// minimizing bus bandwidth consumption during the switch (i.e. +// after PKI opration completes and the program is off doing +// something else like bulk symmetric encryption). Having said +// this, I also want to point out that it might be good idea +// to compile the whole toolkit (as well as majority of the +// programs for that matter) with -mfixed-range=f32-f127 command +// line option. No, it doesn't prevent the compiler from writing +// to upper bank, but at least discourages to do so. If you don't +// like the idea you have the option to compile the module with +// -Drum=nop.m in command line. +// + +#if defined(_HPUX_SOURCE) && !defined(_LP64) +#define ADDP addp4 +#else +#define ADDP add +#endif + +#if 1 +// +// bn_[add|sub]_words routines. +// +// Loops are spinning in 2*(n+5) ticks on Itanuim (provided that the +// data reside in L1 cache, i.e. 2 ticks away). It's possible to +// compress the epilogue and get down to 2*n+6, but at the cost of +// scalability (the neat feature of this implementation is that it +// shall automagically spin in n+5 on "wider" IA-64 implementations:-) +// I consider that the epilogue is short enough as it is to trade tiny +// performance loss on Itanium for scalability. +// +// BN_ULONG bn_add_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num) +// +.global bn_add_words# +.proc bn_add_words# +.align 64 +.skip 32 // makes the loop body aligned at 64-byte boundary +bn_add_words: + .prologue + .save ar.pfs,r2 +{ .mii; alloc r2=ar.pfs,4,12,0,16 + cmp4.le p6,p0=r35,r0 };; +{ .mfb; mov r8=r0 // return value +(p6) br.ret.spnt.many b0 };; + +{ .mib; sub r10=r35,r0,1 + .save ar.lc,r3 + mov r3=ar.lc + brp.loop.imp .L_bn_add_words_ctop,.L_bn_add_words_cend-16 + } +{ .mib; ADDP r14=0,r32 // rp + .save pr,r9 + mov r9=pr };; + .body +{ .mii; ADDP r15=0,r33 // ap + mov ar.lc=r10 + mov ar.ec=6 } +{ .mib; ADDP r16=0,r34 // bp + mov pr.rot=1<<16 };; + +.L_bn_add_words_ctop: +{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++) + (p18) add r39=r37,r34 + (p19) cmp.ltu.unc p56,p0=r40,r38 } +{ .mfb; (p0) nop.m 0x0 + (p0) nop.f 0x0 + (p0) nop.b 0x0 } +{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++) + (p58) cmp.eq.or p57,p0=-1,r41 // (p20) + (p58) add r41=1,r41 } // (p20) +{ .mfb; (p21) st8 [r14]=r42,8 // *(rp++)=r + (p0) nop.f 0x0 + br.ctop.sptk .L_bn_add_words_ctop };; +.L_bn_add_words_cend: + +{ .mii; +(p59) add r8=1,r8 // return value + mov pr=r9,0x1ffff + mov ar.lc=r3 } +{ .mbb; nop.b 0x0 + br.ret.sptk.many b0 };; +.endp bn_add_words# + +// +// BN_ULONG bn_sub_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num) +// +.global bn_sub_words# +.proc bn_sub_words# +.align 64 +.skip 32 // makes the loop body aligned at 64-byte boundary +bn_sub_words: + .prologue + .save ar.pfs,r2 +{ .mii; alloc r2=ar.pfs,4,12,0,16 + cmp4.le p6,p0=r35,r0 };; +{ .mfb; mov r8=r0 // return value +(p6) br.ret.spnt.many b0 };; + +{ .mib; sub r10=r35,r0,1 + .save ar.lc,r3 + mov r3=ar.lc + brp.loop.imp .L_bn_sub_words_ctop,.L_bn_sub_words_cend-16 + } +{ .mib; ADDP r14=0,r32 // rp + .save pr,r9 + mov r9=pr };; + .body +{ .mii; ADDP r15=0,r33 // ap + mov ar.lc=r10 + mov ar.ec=6 } +{ .mib; ADDP r16=0,r34 // bp + mov pr.rot=1<<16 };; + +.L_bn_sub_words_ctop: +{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++) + (p18) sub r39=r37,r34 + (p19) cmp.gtu.unc p56,p0=r40,r38 } +{ .mfb; (p0) nop.m 0x0 + (p0) nop.f 0x0 + (p0) nop.b 0x0 } +{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++) + (p58) cmp.eq.or p57,p0=0,r41 // (p20) + (p58) add r41=-1,r41 } // (p20) +{ .mbb; (p21) st8 [r14]=r42,8 // *(rp++)=r + (p0) nop.b 0x0 + br.ctop.sptk .L_bn_sub_words_ctop };; +.L_bn_sub_words_cend: + +{ .mii; +(p59) add r8=1,r8 // return value + mov pr=r9,0x1ffff + mov ar.lc=r3 } +{ .mbb; nop.b 0x0 + br.ret.sptk.many b0 };; +.endp bn_sub_words# +#endif + +#if 0 +#define XMA_TEMPTATION +#endif + +#if 1 +// +// BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) +// +.global bn_mul_words# +.proc bn_mul_words# +.align 64 +.skip 32 // makes the loop body aligned at 64-byte boundary +bn_mul_words: + .prologue + .save ar.pfs,r2 +#ifdef XMA_TEMPTATION +{ .mfi; alloc r2=ar.pfs,4,0,0,0 };; +#else +{ .mfi; alloc r2=ar.pfs,4,12,0,16 };; +#endif +{ .mib; mov r8=r0 // return value + cmp4.le p6,p0=r34,r0 +(p6) br.ret.spnt.many b0 };; + +{ .mii; sub r10=r34,r0,1 + .save ar.lc,r3 + mov r3=ar.lc + .save pr,r9 + mov r9=pr };; + + .body +{ .mib; setf.sig f8=r35 // w + mov pr.rot=0x800001<<16 + // ------^----- serves as (p50) at first (p27) + brp.loop.imp .L_bn_mul_words_ctop,.L_bn_mul_words_cend-16 + } + +#ifndef XMA_TEMPTATION + +{ .mmi; ADDP r14=0,r32 // rp + ADDP r15=0,r33 // ap + mov ar.lc=r10 } +{ .mmi; mov r40=0 // serves as r35 at first (p27) + mov ar.ec=13 };; + +// This loop spins in 2*(n+12) ticks. It's scheduled for data in Itanium +// L2 cache (i.e. 9 ticks away) as floating point load/store instructions +// bypass L1 cache and L2 latency is actually best-case scenario for +// ldf8. The loop is not scalable and shall run in 2*(n+12) even on +// "wider" IA-64 implementations. It's a trade-off here. n+24 loop +// would give us ~5% in *overall* performance improvement on "wider" +// IA-64, but would hurt Itanium for about same because of longer +// epilogue. As it's a matter of few percents in either case I've +// chosen to trade the scalability for development time (you can see +// this very instruction sequence in bn_mul_add_words loop which in +// turn is scalable). +.L_bn_mul_words_ctop: +{ .mfi; (p25) getf.sig r36=f52 // low + (p21) xmpy.lu f48=f37,f8 + (p28) cmp.ltu p54,p50=r41,r39 } +{ .mfi; (p16) ldf8 f32=[r15],8 + (p21) xmpy.hu f40=f37,f8 + (p0) nop.i 0x0 };; +{ .mii; (p25) getf.sig r32=f44 // high + .pred.rel "mutex",p50,p54 + (p50) add r40=r38,r35 // (p27) + (p54) add r40=r38,r35,1 } // (p27) +{ .mfb; (p28) st8 [r14]=r41,8 + (p0) nop.f 0x0 + br.ctop.sptk .L_bn_mul_words_ctop };; +.L_bn_mul_words_cend: + +{ .mii; nop.m 0x0 +.pred.rel "mutex",p51,p55 +(p51) add r8=r36,r0 +(p55) add r8=r36,r0,1 } +{ .mfb; nop.m 0x0 + nop.f 0x0 + nop.b 0x0 } + +#else // XMA_TEMPTATION + + setf.sig f37=r0 // serves as carry at (p18) tick + mov ar.lc=r10 + mov ar.ec=5;; + +// Most of you examining this code very likely wonder why in the name +// of Intel the following loop is commented out? Indeed, it looks so +// neat that you find it hard to believe that it's something wrong +// with it, right? The catch is that every iteration depends on the +// result from previous one and the latter isn't available instantly. +// The loop therefore spins at the latency of xma minus 1, or in other +// words at 6*(n+4) ticks:-( Compare to the "production" loop above +// that runs in 2*(n+11) where the low latency problem is worked around +// by moving the dependency to one-tick latent interger ALU. Note that +// "distance" between ldf8 and xma is not latency of ldf8, but the +// *difference* between xma and ldf8 latencies. +.L_bn_mul_words_ctop: +{ .mfi; (p16) ldf8 f32=[r33],8 + (p18) xma.hu f38=f34,f8,f39 } +{ .mfb; (p20) stf8 [r32]=f37,8 + (p18) xma.lu f35=f34,f8,f39 + br.ctop.sptk .L_bn_mul_words_ctop };; +.L_bn_mul_words_cend: + + getf.sig r8=f41 // the return value + +#endif // XMA_TEMPTATION + +{ .mii; nop.m 0x0 + mov pr=r9,0x1ffff + mov ar.lc=r3 } +{ .mfb; rum 1<<5 // clear um.mfh + nop.f 0x0 + br.ret.sptk.many b0 };; +.endp bn_mul_words# +#endif + +#if 1 +// +// BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) +// +.global bn_mul_add_words# +.proc bn_mul_add_words# +.align 64 +.skip 48 // makes the loop body aligned at 64-byte boundary +bn_mul_add_words: + .prologue + .save ar.pfs,r2 +{ .mmi; alloc r2=ar.pfs,4,4,0,8 + cmp4.le p6,p0=r34,r0 + .save ar.lc,r3 + mov r3=ar.lc };; +{ .mib; mov r8=r0 // return value + sub r10=r34,r0,1 +(p6) br.ret.spnt.many b0 };; + +{ .mib; setf.sig f8=r35 // w + .save pr,r9 + mov r9=pr + brp.loop.imp .L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16 + } + .body +{ .mmi; ADDP r14=0,r32 // rp + ADDP r15=0,r33 // ap + mov ar.lc=r10 } +{ .mii; ADDP r16=0,r32 // rp copy + mov pr.rot=0x2001<<16 + // ------^----- serves as (p40) at first (p27) + mov ar.ec=11 };; + +// This loop spins in 3*(n+10) ticks on Itanium and in 2*(n+10) on +// Itanium 2. Yes, unlike previous versions it scales:-) Previous +// version was peforming *all* additions in IALU and was starving +// for those even on Itanium 2. In this version one addition is +// moved to FPU and is folded with multiplication. This is at cost +// of propogating the result from previous call to this subroutine +// to L2 cache... In other words negligible even for shorter keys. +// *Overall* performance improvement [over previous version] varies +// from 11 to 22 percent depending on key length. +.L_bn_mul_add_words_ctop: +.pred.rel "mutex",p40,p42 +{ .mfi; (p23) getf.sig r36=f45 // low + (p20) xma.lu f42=f36,f8,f50 // low + (p40) add r39=r39,r35 } // (p27) +{ .mfi; (p16) ldf8 f32=[r15],8 // *(ap++) + (p20) xma.hu f36=f36,f8,f50 // high + (p42) add r39=r39,r35,1 };; // (p27) +{ .mmi; (p24) getf.sig r32=f40 // high + (p16) ldf8 f46=[r16],8 // *(rp1++) + (p40) cmp.ltu p41,p39=r39,r35 } // (p27) +{ .mib; (p26) st8 [r14]=r39,8 // *(rp2++) + (p42) cmp.leu p41,p39=r39,r35 // (p27) + br.ctop.sptk .L_bn_mul_add_words_ctop};; +.L_bn_mul_add_words_cend: + +{ .mmi; .pred.rel "mutex",p40,p42 +(p40) add r8=r35,r0 +(p42) add r8=r35,r0,1 + mov pr=r9,0x1ffff } +{ .mib; rum 1<<5 // clear um.mfh + mov ar.lc=r3 + br.ret.sptk.many b0 };; +.endp bn_mul_add_words# +#endif + +#if 1 +// +// void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num) +// +.global bn_sqr_words# +.proc bn_sqr_words# +.align 64 +.skip 32 // makes the loop body aligned at 64-byte boundary +bn_sqr_words: + .prologue + .save ar.pfs,r2 +{ .mii; alloc r2=ar.pfs,3,0,0,0 + sxt4 r34=r34 };; +{ .mii; cmp.le p6,p0=r34,r0 + mov r8=r0 } // return value +{ .mfb; ADDP r32=0,r32 + nop.f 0x0 +(p6) br.ret.spnt.many b0 };; + +{ .mii; sub r10=r34,r0,1 + .save ar.lc,r3 + mov r3=ar.lc + .save pr,r9 + mov r9=pr };; + + .body +{ .mib; ADDP r33=0,r33 + mov pr.rot=1<<16 + brp.loop.imp .L_bn_sqr_words_ctop,.L_bn_sqr_words_cend-16 + } +{ .mii; add r34=8,r32 + mov ar.lc=r10 + mov ar.ec=18 };; + +// 2*(n+17) on Itanium, (n+17) on "wider" IA-64 implementations. It's +// possible to compress the epilogue (I'm getting tired to write this +// comment over and over) and get down to 2*n+16 at the cost of +// scalability. The decision will very likely be reconsidered after the +// benchmark program is profiled. I.e. if perfomance gain on Itanium +// will appear larger than loss on "wider" IA-64, then the loop should +// be explicitely split and the epilogue compressed. +.L_bn_sqr_words_ctop: +{ .mfi; (p16) ldf8 f32=[r33],8 + (p25) xmpy.lu f42=f41,f41 + (p0) nop.i 0x0 } +{ .mib; (p33) stf8 [r32]=f50,16 + (p0) nop.i 0x0 + (p0) nop.b 0x0 } +{ .mfi; (p0) nop.m 0x0 + (p25) xmpy.hu f52=f41,f41 + (p0) nop.i 0x0 } +{ .mib; (p33) stf8 [r34]=f60,16 + (p0) nop.i 0x0 + br.ctop.sptk .L_bn_sqr_words_ctop };; +.L_bn_sqr_words_cend: + +{ .mii; nop.m 0x0 + mov pr=r9,0x1ffff + mov ar.lc=r3 } +{ .mfb; rum 1<<5 // clear um.mfh + nop.f 0x0 + br.ret.sptk.many b0 };; +.endp bn_sqr_words# +#endif + +#if 1 +// Apparently we win nothing by implementing special bn_sqr_comba8. +// Yes, it is possible to reduce the number of multiplications by +// almost factor of two, but then the amount of additions would +// increase by factor of two (as we would have to perform those +// otherwise performed by xma ourselves). Normally we would trade +// anyway as multiplications are way more expensive, but not this +// time... Multiplication kernel is fully pipelined and as we drain +// one 128-bit multiplication result per clock cycle multiplications +// are effectively as inexpensive as additions. Special implementation +// might become of interest for "wider" IA-64 implementation as you'll +// be able to get through the multiplication phase faster (there won't +// be any stall issues as discussed in the commentary section below and +// you therefore will be able to employ all 4 FP units)... But these +// Itanium days it's simply too hard to justify the effort so I just +// drop down to bn_mul_comba8 code:-) +// +// void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a) +// +.global bn_sqr_comba8# +.proc bn_sqr_comba8# +.align 64 +bn_sqr_comba8: + .prologue + .save ar.pfs,r2 +#if defined(_HPUX_SOURCE) && !defined(_LP64) +{ .mii; alloc r2=ar.pfs,2,1,0,0 + addp4 r33=0,r33 + addp4 r32=0,r32 };; +{ .mii; +#else +{ .mii; alloc r2=ar.pfs,2,1,0,0 +#endif + mov r34=r33 + add r14=8,r33 };; + .body +{ .mii; add r17=8,r34 + add r15=16,r33 + add r18=16,r34 } +{ .mfb; add r16=24,r33 + br .L_cheat_entry_point8 };; +.endp bn_sqr_comba8# +#endif + +#if 1 +// I've estimated this routine to run in ~120 ticks, but in reality +// (i.e. according to ar.itc) it takes ~160 ticks. Are those extra +// cycles consumed for instructions fetch? Or did I misinterpret some +// clause in Itanium µ-architecture manual? Comments are welcomed and +// highly appreciated. +// +// On Itanium 2 it takes ~190 ticks. This is because of stalls on +// result from getf.sig. I do nothing about it at this point for +// reasons depicted below. +// +// However! It should be noted that even 160 ticks is darn good result +// as it's over 10 (yes, ten, spelled as t-e-n) times faster than the +// C version (compiled with gcc with inline assembler). I really +// kicked compiler's butt here, didn't I? Yeah! This brings us to the +// following statement. It's damn shame that this routine isn't called +// very often nowadays! According to the profiler most CPU time is +// consumed by bn_mul_add_words called from BN_from_montgomery. In +// order to estimate what we're missing, I've compared the performance +// of this routine against "traditional" implementation, i.e. against +// following routine: +// +// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +// { r[ 8]=bn_mul_words( &(r[0]),a,8,b[0]); +// r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]); +// r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]); +// r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]); +// r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]); +// r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]); +// r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]); +// r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]); +// } +// +// The one below is over 8 times faster than the one above:-( Even +// more reasons to "combafy" bn_mul_add_mont... +// +// And yes, this routine really made me wish there were an optimizing +// assembler! It also feels like it deserves a dedication. +// +// To my wife for being there and to my kids... +// +// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +// +#define carry1 r14 +#define carry2 r15 +#define carry3 r34 +.global bn_mul_comba8# +.proc bn_mul_comba8# +.align 64 +bn_mul_comba8: + .prologue + .save ar.pfs,r2 +#if defined(_HPUX_SOURCE) && !defined(_LP64) +{ .mii; alloc r2=ar.pfs,3,0,0,0 + addp4 r33=0,r33 + addp4 r34=0,r34 };; +{ .mii; addp4 r32=0,r32 +#else +{ .mii; alloc r2=ar.pfs,3,0,0,0 +#endif + add r14=8,r33 + add r17=8,r34 } + .body +{ .mii; add r15=16,r33 + add r18=16,r34 + add r16=24,r33 } +.L_cheat_entry_point8: +{ .mmi; add r19=24,r34 + + ldf8 f32=[r33],32 };; + +{ .mmi; ldf8 f120=[r34],32 + ldf8 f121=[r17],32 } +{ .mmi; ldf8 f122=[r18],32 + ldf8 f123=[r19],32 };; +{ .mmi; ldf8 f124=[r34] + ldf8 f125=[r17] } +{ .mmi; ldf8 f126=[r18] + ldf8 f127=[r19] } + +{ .mmi; ldf8 f33=[r14],32 + ldf8 f34=[r15],32 } +{ .mmi; ldf8 f35=[r16],32;; + ldf8 f36=[r33] } +{ .mmi; ldf8 f37=[r14] + ldf8 f38=[r15] } +{ .mfi; ldf8 f39=[r16] +// -------\ Entering multiplier's heaven /------- +// ------------\ /------------ +// -----------------\ /----------------- +// ----------------------\/---------------------- + xma.hu f41=f32,f120,f0 } +{ .mfi; xma.lu f40=f32,f120,f0 };; // (*) +{ .mfi; xma.hu f51=f32,f121,f0 } +{ .mfi; xma.lu f50=f32,f121,f0 };; +{ .mfi; xma.hu f61=f32,f122,f0 } +{ .mfi; xma.lu f60=f32,f122,f0 };; +{ .mfi; xma.hu f71=f32,f123,f0 } +{ .mfi; xma.lu f70=f32,f123,f0 };; +{ .mfi; xma.hu f81=f32,f124,f0 } +{ .mfi; xma.lu f80=f32,f124,f0 };; +{ .mfi; xma.hu f91=f32,f125,f0 } +{ .mfi; xma.lu f90=f32,f125,f0 };; +{ .mfi; xma.hu f101=f32,f126,f0 } +{ .mfi; xma.lu f100=f32,f126,f0 };; +{ .mfi; xma.hu f111=f32,f127,f0 } +{ .mfi; xma.lu f110=f32,f127,f0 };;// +// (*) You can argue that splitting at every second bundle would +// prevent "wider" IA-64 implementations from achieving the peak +// performance. Well, not really... The catch is that if you +// intend to keep 4 FP units busy by splitting at every fourth +// bundle and thus perform these 16 multiplications in 4 ticks, +// the first bundle *below* would stall because the result from +// the first xma bundle *above* won't be available for another 3 +// ticks (if not more, being an optimist, I assume that "wider" +// implementation will have same latency:-). This stall will hold +// you back and the performance would be as if every second bundle +// were split *anyway*... +{ .mfi; getf.sig r16=f40 + xma.hu f42=f33,f120,f41 + add r33=8,r32 } +{ .mfi; xma.lu f41=f33,f120,f41 };; +{ .mfi; getf.sig r24=f50 + xma.hu f52=f33,f121,f51 } +{ .mfi; xma.lu f51=f33,f121,f51 };; +{ .mfi; st8 [r32]=r16,16 + xma.hu f62=f33,f122,f61 } +{ .mfi; xma.lu f61=f33,f122,f61 };; +{ .mfi; xma.hu f72=f33,f123,f71 } +{ .mfi; xma.lu f71=f33,f123,f71 };; +{ .mfi; xma.hu f82=f33,f124,f81 } +{ .mfi; xma.lu f81=f33,f124,f81 };; +{ .mfi; xma.hu f92=f33,f125,f91 } +{ .mfi; xma.lu f91=f33,f125,f91 };; +{ .mfi; xma.hu f102=f33,f126,f101 } +{ .mfi; xma.lu f101=f33,f126,f101 };; +{ .mfi; xma.hu f112=f33,f127,f111 } +{ .mfi; xma.lu f111=f33,f127,f111 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r25=f41 + xma.hu f43=f34,f120,f42 } +{ .mfi; xma.lu f42=f34,f120,f42 };; +{ .mfi; getf.sig r16=f60 + xma.hu f53=f34,f121,f52 } +{ .mfi; xma.lu f52=f34,f121,f52 };; +{ .mfi; getf.sig r17=f51 + xma.hu f63=f34,f122,f62 + add r25=r25,r24 } +{ .mfi; xma.lu f62=f34,f122,f62 + mov carry1=0 };; +{ .mfi; cmp.ltu p6,p0=r25,r24 + xma.hu f73=f34,f123,f72 } +{ .mfi; xma.lu f72=f34,f123,f72 };; +{ .mfi; st8 [r33]=r25,16 + xma.hu f83=f34,f124,f82 +(p6) add carry1=1,carry1 } +{ .mfi; xma.lu f82=f34,f124,f82 };; +{ .mfi; xma.hu f93=f34,f125,f92 } +{ .mfi; xma.lu f92=f34,f125,f92 };; +{ .mfi; xma.hu f103=f34,f126,f102 } +{ .mfi; xma.lu f102=f34,f126,f102 };; +{ .mfi; xma.hu f113=f34,f127,f112 } +{ .mfi; xma.lu f112=f34,f127,f112 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r18=f42 + xma.hu f44=f35,f120,f43 + add r17=r17,r16 } +{ .mfi; xma.lu f43=f35,f120,f43 };; +{ .mfi; getf.sig r24=f70 + xma.hu f54=f35,f121,f53 } +{ .mfi; mov carry2=0 + xma.lu f53=f35,f121,f53 };; +{ .mfi; getf.sig r25=f61 + xma.hu f64=f35,f122,f63 + cmp.ltu p7,p0=r17,r16 } +{ .mfi; add r18=r18,r17 + xma.lu f63=f35,f122,f63 };; +{ .mfi; getf.sig r26=f52 + xma.hu f74=f35,f123,f73 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r18,r17 + xma.lu f73=f35,f123,f73 + add r18=r18,carry1 };; +{ .mfi; + xma.hu f84=f35,f124,f83 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r18,carry1 + xma.lu f83=f35,f124,f83 };; +{ .mfi; st8 [r32]=r18,16 + xma.hu f94=f35,f125,f93 +(p7) add carry2=1,carry2 } +{ .mfi; xma.lu f93=f35,f125,f93 };; +{ .mfi; xma.hu f104=f35,f126,f103 } +{ .mfi; xma.lu f103=f35,f126,f103 };; +{ .mfi; xma.hu f114=f35,f127,f113 } +{ .mfi; mov carry1=0 + xma.lu f113=f35,f127,f113 + add r25=r25,r24 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r27=f43 + xma.hu f45=f36,f120,f44 + cmp.ltu p6,p0=r25,r24 } +{ .mfi; xma.lu f44=f36,f120,f44 + add r26=r26,r25 };; +{ .mfi; getf.sig r16=f80 + xma.hu f55=f36,f121,f54 +(p6) add carry1=1,carry1 } +{ .mfi; xma.lu f54=f36,f121,f54 };; +{ .mfi; getf.sig r17=f71 + xma.hu f65=f36,f122,f64 + cmp.ltu p6,p0=r26,r25 } +{ .mfi; xma.lu f64=f36,f122,f64 + add r27=r27,r26 };; +{ .mfi; getf.sig r18=f62 + xma.hu f75=f36,f123,f74 +(p6) add carry1=1,carry1 } +{ .mfi; cmp.ltu p6,p0=r27,r26 + xma.lu f74=f36,f123,f74 + add r27=r27,carry2 };; +{ .mfi; getf.sig r19=f53 + xma.hu f85=f36,f124,f84 +(p6) add carry1=1,carry1 } +{ .mfi; xma.lu f84=f36,f124,f84 + cmp.ltu p6,p0=r27,carry2 };; +{ .mfi; st8 [r33]=r27,16 + xma.hu f95=f36,f125,f94 +(p6) add carry1=1,carry1 } +{ .mfi; xma.lu f94=f36,f125,f94 };; +{ .mfi; xma.hu f105=f36,f126,f104 } +{ .mfi; mov carry2=0 + xma.lu f104=f36,f126,f104 + add r17=r17,r16 };; +{ .mfi; xma.hu f115=f36,f127,f114 + cmp.ltu p7,p0=r17,r16 } +{ .mfi; xma.lu f114=f36,f127,f114 + add r18=r18,r17 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r20=f44 + xma.hu f46=f37,f120,f45 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r18,r17 + xma.lu f45=f37,f120,f45 + add r19=r19,r18 };; +{ .mfi; getf.sig r24=f90 + xma.hu f56=f37,f121,f55 } +{ .mfi; xma.lu f55=f37,f121,f55 };; +{ .mfi; getf.sig r25=f81 + xma.hu f66=f37,f122,f65 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r19,r18 + xma.lu f65=f37,f122,f65 + add r20=r20,r19 };; +{ .mfi; getf.sig r26=f72 + xma.hu f76=f37,f123,f75 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r20,r19 + xma.lu f75=f37,f123,f75 + add r20=r20,carry1 };; +{ .mfi; getf.sig r27=f63 + xma.hu f86=f37,f124,f85 +(p7) add carry2=1,carry2 } +{ .mfi; xma.lu f85=f37,f124,f85 + cmp.ltu p7,p0=r20,carry1 };; +{ .mfi; getf.sig r28=f54 + xma.hu f96=f37,f125,f95 +(p7) add carry2=1,carry2 } +{ .mfi; st8 [r32]=r20,16 + xma.lu f95=f37,f125,f95 };; +{ .mfi; xma.hu f106=f37,f126,f105 } +{ .mfi; mov carry1=0 + xma.lu f105=f37,f126,f105 + add r25=r25,r24 };; +{ .mfi; xma.hu f116=f37,f127,f115 + cmp.ltu p6,p0=r25,r24 } +{ .mfi; xma.lu f115=f37,f127,f115 + add r26=r26,r25 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r29=f45 + xma.hu f47=f38,f120,f46 +(p6) add carry1=1,carry1 } +{ .mfi; cmp.ltu p6,p0=r26,r25 + xma.lu f46=f38,f120,f46 + add r27=r27,r26 };; +{ .mfi; getf.sig r16=f100 + xma.hu f57=f38,f121,f56 +(p6) add carry1=1,carry1 } +{ .mfi; cmp.ltu p6,p0=r27,r26 + xma.lu f56=f38,f121,f56 + add r28=r28,r27 };; +{ .mfi; getf.sig r17=f91 + xma.hu f67=f38,f122,f66 +(p6) add carry1=1,carry1 } +{ .mfi; cmp.ltu p6,p0=r28,r27 + xma.lu f66=f38,f122,f66 + add r29=r29,r28 };; +{ .mfi; getf.sig r18=f82 + xma.hu f77=f38,f123,f76 +(p6) add carry1=1,carry1 } +{ .mfi; cmp.ltu p6,p0=r29,r28 + xma.lu f76=f38,f123,f76 + add r29=r29,carry2 };; +{ .mfi; getf.sig r19=f73 + xma.hu f87=f38,f124,f86 +(p6) add carry1=1,carry1 } +{ .mfi; xma.lu f86=f38,f124,f86 + cmp.ltu p6,p0=r29,carry2 };; +{ .mfi; getf.sig r20=f64 + xma.hu f97=f38,f125,f96 +(p6) add carry1=1,carry1 } +{ .mfi; st8 [r33]=r29,16 + xma.lu f96=f38,f125,f96 };; +{ .mfi; getf.sig r21=f55 + xma.hu f107=f38,f126,f106 } +{ .mfi; mov carry2=0 + xma.lu f106=f38,f126,f106 + add r17=r17,r16 };; +{ .mfi; xma.hu f117=f38,f127,f116 + cmp.ltu p7,p0=r17,r16 } +{ .mfi; xma.lu f116=f38,f127,f116 + add r18=r18,r17 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r22=f46 + xma.hu f48=f39,f120,f47 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r18,r17 + xma.lu f47=f39,f120,f47 + add r19=r19,r18 };; +{ .mfi; getf.sig r24=f110 + xma.hu f58=f39,f121,f57 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r19,r18 + xma.lu f57=f39,f121,f57 + add r20=r20,r19 };; +{ .mfi; getf.sig r25=f101 + xma.hu f68=f39,f122,f67 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r20,r19 + xma.lu f67=f39,f122,f67 + add r21=r21,r20 };; +{ .mfi; getf.sig r26=f92 + xma.hu f78=f39,f123,f77 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r21,r20 + xma.lu f77=f39,f123,f77 + add r22=r22,r21 };; +{ .mfi; getf.sig r27=f83 + xma.hu f88=f39,f124,f87 +(p7) add carry2=1,carry2 } +{ .mfi; cmp.ltu p7,p0=r22,r21 + xma.lu f87=f39,f124,f87 + add r22=r22,carry1 };; +{ .mfi; getf.sig r28=f74 + xma.hu f98=f39,f125,f97 +(p7) add carry2=1,carry2 } +{ .mfi; xma.lu f97=f39,f125,f97 + cmp.ltu p7,p0=r22,carry1 };; +{ .mfi; getf.sig r29=f65 + xma.hu f108=f39,f126,f107 +(p7) add carry2=1,carry2 } +{ .mfi; st8 [r32]=r22,16 + xma.lu f107=f39,f126,f107 };; +{ .mfi; getf.sig r30=f56 + xma.hu f118=f39,f127,f117 } +{ .mfi; xma.lu f117=f39,f127,f117 };;// +//-------------------------------------------------// +// Leaving muliplier's heaven... Quite a ride, huh? + +{ .mii; getf.sig r31=f47 + add r25=r25,r24 + mov carry1=0 };; +{ .mii; getf.sig r16=f111 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mfb; getf.sig r17=f102 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r27=r27,r26 };; +{ .mfb; nop.m 0x0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r27,r26 + add r28=r28,r27 };; +{ .mii; getf.sig r18=f93 + add r17=r17,r16 + mov carry3=0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r28,r27 + add r29=r29,r28 };; +{ .mii; getf.sig r19=f84 + cmp.ltu p7,p0=r17,r16 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r29,r28 + add r30=r30,r29 };; +{ .mii; getf.sig r20=f75 + add r18=r18,r17 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r30,r29 + add r31=r31,r30 };; +{ .mfb; getf.sig r21=f66 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r18,r17 + add r19=r19,r18 } +{ .mfb; nop.m 0x0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r31,r30 + add r31=r31,carry2 };; +{ .mfb; getf.sig r22=f57 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r19,r18 + add r20=r20,r19 } +{ .mfb; nop.m 0x0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r31,carry2 };; +{ .mfb; getf.sig r23=f48 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r20,r19 + add r21=r21,r20 } +{ .mii; +(p6) add carry1=1,carry1 } +{ .mfb; st8 [r33]=r31,16 };; + +{ .mfb; getf.sig r24=f112 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r21,r20 + add r22=r22,r21 };; +{ .mfb; getf.sig r25=f103 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r22,r21 + add r23=r23,r22 };; +{ .mfb; getf.sig r26=f94 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r23,r22 + add r23=r23,carry1 };; +{ .mfb; getf.sig r27=f85 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p8=r23,carry1};; +{ .mii; getf.sig r28=f76 + add r25=r25,r24 + mov carry1=0 } +{ .mii; st8 [r32]=r23,16 + (p7) add carry2=1,carry3 + (p8) add carry2=0,carry3 };; + +{ .mfb; nop.m 0x0 } +{ .mii; getf.sig r29=f67 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mfb; getf.sig r30=f58 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r27=r27,r26 };; +{ .mfb; getf.sig r16=f113 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r27,r26 + add r28=r28,r27 };; +{ .mfb; getf.sig r17=f104 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r28,r27 + add r29=r29,r28 };; +{ .mfb; getf.sig r18=f95 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r29,r28 + add r30=r30,r29 };; +{ .mii; getf.sig r19=f86 + add r17=r17,r16 + mov carry3=0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r30,r29 + add r30=r30,carry2 };; +{ .mii; getf.sig r20=f77 + cmp.ltu p7,p0=r17,r16 + add r18=r18,r17 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r30,carry2 };; +{ .mfb; getf.sig r21=f68 } +{ .mii; st8 [r33]=r30,16 +(p6) add carry1=1,carry1 };; + +{ .mfb; getf.sig r24=f114 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r18,r17 + add r19=r19,r18 };; +{ .mfb; getf.sig r25=f105 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r19,r18 + add r20=r20,r19 };; +{ .mfb; getf.sig r26=f96 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r20,r19 + add r21=r21,r20 };; +{ .mfb; getf.sig r27=f87 } +{ .mii; (p7) add carry3=1,carry3 + cmp.ltu p7,p0=r21,r20 + add r21=r21,carry1 };; +{ .mib; getf.sig r28=f78 + add r25=r25,r24 } +{ .mib; (p7) add carry3=1,carry3 + cmp.ltu p7,p8=r21,carry1};; +{ .mii; st8 [r32]=r21,16 + (p7) add carry2=1,carry3 + (p8) add carry2=0,carry3 } + +{ .mii; mov carry1=0 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mfb; getf.sig r16=f115 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r27=r27,r26 };; +{ .mfb; getf.sig r17=f106 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r27,r26 + add r28=r28,r27 };; +{ .mfb; getf.sig r18=f97 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r28,r27 + add r28=r28,carry2 };; +{ .mib; getf.sig r19=f88 + add r17=r17,r16 } +{ .mib; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r28,carry2 };; +{ .mii; st8 [r33]=r28,16 +(p6) add carry1=1,carry1 } + +{ .mii; mov carry2=0 + cmp.ltu p7,p0=r17,r16 + add r18=r18,r17 };; +{ .mfb; getf.sig r24=f116 } +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r18,r17 + add r19=r19,r18 };; +{ .mfb; getf.sig r25=f107 } +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r19,r18 + add r19=r19,carry1 };; +{ .mfb; getf.sig r26=f98 } +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r19,carry1};; +{ .mii; st8 [r32]=r19,16 + (p7) add carry2=1,carry2 } + +{ .mfb; add r25=r25,r24 };; + +{ .mfb; getf.sig r16=f117 } +{ .mii; mov carry1=0 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mfb; getf.sig r17=f108 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r26=r26,carry2 };; +{ .mfb; nop.m 0x0 } +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,carry2 };; +{ .mii; st8 [r33]=r26,16 +(p6) add carry1=1,carry1 } + +{ .mfb; add r17=r17,r16 };; +{ .mfb; getf.sig r24=f118 } +{ .mii; mov carry2=0 + cmp.ltu p7,p0=r17,r16 + add r17=r17,carry1 };; +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r17,carry1};; +{ .mii; st8 [r32]=r17 + (p7) add carry2=1,carry2 };; +{ .mfb; add r24=r24,carry2 };; +{ .mib; st8 [r33]=r24 } + +{ .mib; rum 1<<5 // clear um.mfh + br.ret.sptk.many b0 };; +.endp bn_mul_comba8# +#undef carry3 +#undef carry2 +#undef carry1 +#endif + +#if 1 +// It's possible to make it faster (see comment to bn_sqr_comba8), but +// I reckon it doesn't worth the effort. Basically because the routine +// (actually both of them) practically never called... So I just play +// same trick as with bn_sqr_comba8. +// +// void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a) +// +.global bn_sqr_comba4# +.proc bn_sqr_comba4# +.align 64 +bn_sqr_comba4: + .prologue + .save ar.pfs,r2 +#if defined(_HPUX_SOURCE) && !defined(_LP64) +{ .mii; alloc r2=ar.pfs,2,1,0,0 + addp4 r32=0,r32 + addp4 r33=0,r33 };; +{ .mii; +#else +{ .mii; alloc r2=ar.pfs,2,1,0,0 +#endif + mov r34=r33 + add r14=8,r33 };; + .body +{ .mii; add r17=8,r34 + add r15=16,r33 + add r18=16,r34 } +{ .mfb; add r16=24,r33 + br .L_cheat_entry_point4 };; +.endp bn_sqr_comba4# +#endif + +#if 1 +// Runs in ~115 cycles and ~4.5 times faster than C. Well, whatever... +// +// void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +// +#define carry1 r14 +#define carry2 r15 +.global bn_mul_comba4# +.proc bn_mul_comba4# +.align 64 +bn_mul_comba4: + .prologue + .save ar.pfs,r2 +#if defined(_HPUX_SOURCE) && !defined(_LP64) +{ .mii; alloc r2=ar.pfs,3,0,0,0 + addp4 r33=0,r33 + addp4 r34=0,r34 };; +{ .mii; addp4 r32=0,r32 +#else +{ .mii; alloc r2=ar.pfs,3,0,0,0 +#endif + add r14=8,r33 + add r17=8,r34 } + .body +{ .mii; add r15=16,r33 + add r18=16,r34 + add r16=24,r33 };; +.L_cheat_entry_point4: +{ .mmi; add r19=24,r34 + + ldf8 f32=[r33] } + +{ .mmi; ldf8 f120=[r34] + ldf8 f121=[r17] };; +{ .mmi; ldf8 f122=[r18] + ldf8 f123=[r19] } + +{ .mmi; ldf8 f33=[r14] + ldf8 f34=[r15] } +{ .mfi; ldf8 f35=[r16] + + xma.hu f41=f32,f120,f0 } +{ .mfi; xma.lu f40=f32,f120,f0 };; +{ .mfi; xma.hu f51=f32,f121,f0 } +{ .mfi; xma.lu f50=f32,f121,f0 };; +{ .mfi; xma.hu f61=f32,f122,f0 } +{ .mfi; xma.lu f60=f32,f122,f0 };; +{ .mfi; xma.hu f71=f32,f123,f0 } +{ .mfi; xma.lu f70=f32,f123,f0 };;// +// Major stall takes place here, and 3 more places below. Result from +// first xma is not available for another 3 ticks. +{ .mfi; getf.sig r16=f40 + xma.hu f42=f33,f120,f41 + add r33=8,r32 } +{ .mfi; xma.lu f41=f33,f120,f41 };; +{ .mfi; getf.sig r24=f50 + xma.hu f52=f33,f121,f51 } +{ .mfi; xma.lu f51=f33,f121,f51 };; +{ .mfi; st8 [r32]=r16,16 + xma.hu f62=f33,f122,f61 } +{ .mfi; xma.lu f61=f33,f122,f61 };; +{ .mfi; xma.hu f72=f33,f123,f71 } +{ .mfi; xma.lu f71=f33,f123,f71 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r25=f41 + xma.hu f43=f34,f120,f42 } +{ .mfi; xma.lu f42=f34,f120,f42 };; +{ .mfi; getf.sig r16=f60 + xma.hu f53=f34,f121,f52 } +{ .mfi; xma.lu f52=f34,f121,f52 };; +{ .mfi; getf.sig r17=f51 + xma.hu f63=f34,f122,f62 + add r25=r25,r24 } +{ .mfi; mov carry1=0 + xma.lu f62=f34,f122,f62 };; +{ .mfi; st8 [r33]=r25,16 + xma.hu f73=f34,f123,f72 + cmp.ltu p6,p0=r25,r24 } +{ .mfi; xma.lu f72=f34,f123,f72 };;// +//-------------------------------------------------// +{ .mfi; getf.sig r18=f42 + xma.hu f44=f35,f120,f43 +(p6) add carry1=1,carry1 } +{ .mfi; add r17=r17,r16 + xma.lu f43=f35,f120,f43 + mov carry2=0 };; +{ .mfi; getf.sig r24=f70 + xma.hu f54=f35,f121,f53 + cmp.ltu p7,p0=r17,r16 } +{ .mfi; xma.lu f53=f35,f121,f53 };; +{ .mfi; getf.sig r25=f61 + xma.hu f64=f35,f122,f63 + add r18=r18,r17 } +{ .mfi; xma.lu f63=f35,f122,f63 +(p7) add carry2=1,carry2 };; +{ .mfi; getf.sig r26=f52 + xma.hu f74=f35,f123,f73 + cmp.ltu p7,p0=r18,r17 } +{ .mfi; xma.lu f73=f35,f123,f73 + add r18=r18,carry1 };; +//-------------------------------------------------// +{ .mii; st8 [r32]=r18,16 +(p7) add carry2=1,carry2 + cmp.ltu p7,p0=r18,carry1 };; + +{ .mfi; getf.sig r27=f43 // last major stall +(p7) add carry2=1,carry2 };; +{ .mii; getf.sig r16=f71 + add r25=r25,r24 + mov carry1=0 };; +{ .mii; getf.sig r17=f62 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r27=r27,r26 };; +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r27,r26 + add r27=r27,carry2 };; +{ .mii; getf.sig r18=f53 +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r27,carry2 };; +{ .mfi; st8 [r33]=r27,16 +(p6) add carry1=1,carry1 } + +{ .mii; getf.sig r19=f44 + add r17=r17,r16 + mov carry2=0 };; +{ .mii; getf.sig r24=f72 + cmp.ltu p7,p0=r17,r16 + add r18=r18,r17 };; +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r18,r17 + add r19=r19,r18 };; +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r19,r18 + add r19=r19,carry1 };; +{ .mii; getf.sig r25=f63 + (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r19,carry1};; +{ .mii; st8 [r32]=r19,16 + (p7) add carry2=1,carry2 } + +{ .mii; getf.sig r26=f54 + add r25=r25,r24 + mov carry1=0 };; +{ .mii; getf.sig r16=f73 + cmp.ltu p6,p0=r25,r24 + add r26=r26,r25 };; +{ .mii; +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,r25 + add r26=r26,carry2 };; +{ .mii; getf.sig r17=f64 +(p6) add carry1=1,carry1 + cmp.ltu p6,p0=r26,carry2 };; +{ .mii; st8 [r33]=r26,16 +(p6) add carry1=1,carry1 } + +{ .mii; getf.sig r24=f74 + add r17=r17,r16 + mov carry2=0 };; +{ .mii; cmp.ltu p7,p0=r17,r16 + add r17=r17,carry1 };; + +{ .mii; (p7) add carry2=1,carry2 + cmp.ltu p7,p0=r17,carry1};; +{ .mii; st8 [r32]=r17,16 + (p7) add carry2=1,carry2 };; + +{ .mii; add r24=r24,carry2 };; +{ .mii; st8 [r33]=r24 } + +{ .mib; rum 1<<5 // clear um.mfh + br.ret.sptk.many b0 };; +.endp bn_mul_comba4# +#undef carry2 +#undef carry1 +#endif + +#if 1 +// +// BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) +// +// In the nutshell it's a port of my MIPS III/IV implementation. +// +#define AT r14 +#define H r16 +#define HH r20 +#define L r17 +#define D r18 +#define DH r22 +#define I r21 + +#if 0 +// Some preprocessors (most notably HP-UX) appear to be allergic to +// macros enclosed to parenthesis [as these three were]. +#define cont p16 +#define break p0 // p20 +#define equ p24 +#else +cont=p16 +break=p0 +equ=p24 +#endif + +.global abort# +.global bn_div_words# +.proc bn_div_words# +.align 64 +bn_div_words: + .prologue + .save ar.pfs,r2 +{ .mii; alloc r2=ar.pfs,3,5,0,8 + .save b0,r3 + mov r3=b0 + .save pr,r10 + mov r10=pr };; +{ .mmb; cmp.eq p6,p0=r34,r0 + mov r8=-1 +(p6) br.ret.spnt.many b0 };; + + .body +{ .mii; mov H=r32 // save h + mov ar.ec=0 // don't rotate at exit + mov pr.rot=0 } +{ .mii; mov L=r33 // save l + mov r36=r0 };; + +.L_divw_shift: // -vv- note signed comparison +{ .mfi; (p0) cmp.lt p16,p0=r0,r34 // d + (p0) shladd r33=r34,1,r0 } +{ .mfb; (p0) add r35=1,r36 + (p0) nop.f 0x0 +(p16) br.wtop.dpnt .L_divw_shift };; + +{ .mii; mov D=r34 + shr.u DH=r34,32 + sub r35=64,r36 };; +{ .mii; setf.sig f7=DH + shr.u AT=H,r35 + mov I=r36 };; +{ .mib; cmp.ne p6,p0=r0,AT + shl H=H,r36 +(p6) br.call.spnt.clr b0=abort };; // overflow, die... + +{ .mfi; fcvt.xuf.s1 f7=f7 + shr.u AT=L,r35 };; +{ .mii; shl L=L,r36 + or H=H,AT };; + +{ .mii; nop.m 0x0 + cmp.leu p6,p0=D,H;; +(p6) sub H=H,D } + +{ .mlx; setf.sig f14=D + movl AT=0xffffffff };; +/////////////////////////////////////////////////////////// +{ .mii; setf.sig f6=H + shr.u HH=H,32;; + cmp.eq p6,p7=HH,DH };; +{ .mfb; +(p6) setf.sig f8=AT +(p7) fcvt.xuf.s1 f6=f6 +(p7) br.call.sptk b6=.L_udiv64_32_b6 };; + +{ .mfi; getf.sig r33=f8 // q + xmpy.lu f9=f8,f14 } +{ .mfi; xmpy.hu f10=f8,f14 + shrp H=H,L,32 };; + +{ .mmi; getf.sig r35=f9 // tl + getf.sig r31=f10 };; // th + +.L_divw_1st_iter: +{ .mii; (p0) add r32=-1,r33 + (p0) cmp.eq equ,cont=HH,r31 };; +{ .mii; (p0) cmp.ltu p8,p0=r35,D + (p0) sub r34=r35,D + (equ) cmp.leu break,cont=r35,H };; +{ .mib; (cont) cmp.leu cont,break=HH,r31 + (p8) add r31=-1,r31 +(cont) br.wtop.spnt .L_divw_1st_iter };; +/////////////////////////////////////////////////////////// +{ .mii; sub H=H,r35 + shl r8=r33,32 + shl L=L,32 };; +/////////////////////////////////////////////////////////// +{ .mii; setf.sig f6=H + shr.u HH=H,32;; + cmp.eq p6,p7=HH,DH };; +{ .mfb; +(p6) setf.sig f8=AT +(p7) fcvt.xuf.s1 f6=f6 +(p7) br.call.sptk b6=.L_udiv64_32_b6 };; + +{ .mfi; getf.sig r33=f8 // q + xmpy.lu f9=f8,f14 } +{ .mfi; xmpy.hu f10=f8,f14 + shrp H=H,L,32 };; + +{ .mmi; getf.sig r35=f9 // tl + getf.sig r31=f10 };; // th + +.L_divw_2nd_iter: +{ .mii; (p0) add r32=-1,r33 + (p0) cmp.eq equ,cont=HH,r31 };; +{ .mii; (p0) cmp.ltu p8,p0=r35,D + (p0) sub r34=r35,D + (equ) cmp.leu break,cont=r35,H };; +{ .mib; (cont) cmp.leu cont,break=HH,r31 + (p8) add r31=-1,r31 +(cont) br.wtop.spnt .L_divw_2nd_iter };; +/////////////////////////////////////////////////////////// +{ .mii; sub H=H,r35 + or r8=r8,r33 + mov ar.pfs=r2 };; +{ .mii; shr.u r9=H,I // remainder if anybody wants it + mov pr=r10,0x1ffff } +{ .mfb; br.ret.sptk.many b0 };; + +// Unsigned 64 by 32 (well, by 64 for the moment) bit integer division +// procedure. +// +// inputs: f6 = (double)a, f7 = (double)b +// output: f8 = (int)(a/b) +// clobbered: f8,f9,f10,f11,pred +pred=p15 +// One can argue that this snippet is copyrighted to Intel +// Corporation, as it's essentially identical to one of those +// found in "Divide, Square Root and Remainder" section at +// http://www.intel.com/software/products/opensource/libraries/num.htm. +// Yes, I admit that the referred code was used as template, +// but after I realized that there hardly is any other instruction +// sequence which would perform this operation. I mean I figure that +// any independent attempt to implement high-performance division +// will result in code virtually identical to the Intel code. It +// should be noted though that below division kernel is 1 cycle +// faster than Intel one (note commented splits:-), not to mention +// original prologue (rather lack of one) and epilogue. +.align 32 +.skip 16 +.L_udiv64_32_b6: + frcpa.s1 f8,pred=f6,f7;; // [0] y0 = 1 / b + +(pred) fnma.s1 f9=f7,f8,f1 // [5] e0 = 1 - b * y0 +(pred) fmpy.s1 f10=f6,f8;; // [5] q0 = a * y0 +(pred) fmpy.s1 f11=f9,f9 // [10] e1 = e0 * e0 +(pred) fma.s1 f10=f9,f10,f10;; // [10] q1 = q0 + e0 * q0 +(pred) fma.s1 f8=f9,f8,f8 //;; // [15] y1 = y0 + e0 * y0 +(pred) fma.s1 f9=f11,f10,f10;; // [15] q2 = q1 + e1 * q1 +(pred) fma.s1 f8=f11,f8,f8 //;; // [20] y2 = y1 + e1 * y1 +(pred) fnma.s1 f10=f7,f9,f6;; // [20] r2 = a - b * q2 +(pred) fma.s1 f8=f10,f8,f9;; // [25] q3 = q2 + r2 * y2 + + fcvt.fxu.trunc.s1 f8=f8 // [30] q = trunc(q3) + br.ret.sptk.many b6;; +.endp bn_div_words# +#endif diff --git a/openssl/crypto/bn/asm/mips3-mont.pl b/openssl/crypto/bn/asm/mips3-mont.pl new file mode 100644 index 00000000..8f9156e0 --- /dev/null +++ b/openssl/crypto/bn/asm/mips3-mont.pl @@ -0,0 +1,327 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# This module doesn't present direct interest for OpenSSL, because it +# doesn't provide better performance for longer keys. While 512-bit +# RSA private key operations are 40% faster, 1024-bit ones are hardly +# faster at all, while longer key operations are slower by up to 20%. +# It might be of interest to embedded system developers though, as +# it's smaller than 1KB, yet offers ~3x improvement over compiler +# generated code. +# +# The module targets N32 and N64 MIPS ABIs and currently is a bit +# IRIX-centric, i.e. is likely to require adaptation for other OSes. + +# int bn_mul_mont( +$rp="a0"; # BN_ULONG *rp, +$ap="a1"; # const BN_ULONG *ap, +$bp="a2"; # const BN_ULONG *bp, +$np="a3"; # const BN_ULONG *np, +$n0="a4"; # const BN_ULONG *n0, +$num="a5"; # int num); + +$lo0="a6"; +$hi0="a7"; +$lo1="v0"; +$hi1="v1"; +$aj="t0"; +$bi="t1"; +$nj="t2"; +$tp="t3"; +$alo="s0"; +$ahi="s1"; +$nlo="s2"; +$nhi="s3"; +$tj="s4"; +$i="s5"; +$j="s6"; +$fp="t8"; +$m1="t9"; + +$FRAME=8*(2+8); + +$code=<<___; +#include <asm.h> +#include <regdef.h> + +.text + +.set noat +.set reorder + +.align 5 +.globl bn_mul_mont +.ent bn_mul_mont +bn_mul_mont: + .set noreorder + PTR_SUB sp,64 + move $fp,sp + .frame $fp,64,ra + slt AT,$num,4 + li v0,0 + beqzl AT,.Lproceed + nop + jr ra + PTR_ADD sp,$fp,64 + .set reorder +.align 5 +.Lproceed: + ld $n0,0($n0) + ld $bi,0($bp) # bp[0] + ld $aj,0($ap) # ap[0] + ld $nj,0($np) # np[0] + PTR_SUB sp,16 # place for two extra words + sll $num,3 + li AT,-4096 + PTR_SUB sp,$num + and sp,AT + + sd s0,0($fp) + sd s1,8($fp) + sd s2,16($fp) + sd s3,24($fp) + sd s4,32($fp) + sd s5,40($fp) + sd s6,48($fp) + sd s7,56($fp) + + dmultu $aj,$bi + ld $alo,8($ap) + ld $nlo,8($np) + mflo $lo0 + mfhi $hi0 + dmultu $lo0,$n0 + mflo $m1 + + dmultu $alo,$bi + mflo $alo + mfhi $ahi + + dmultu $nj,$m1 + mflo $lo1 + mfhi $hi1 + dmultu $nlo,$m1 + daddu $lo1,$lo0 + sltu AT,$lo1,$lo0 + daddu $hi1,AT + mflo $nlo + mfhi $nhi + + move $tp,sp + li $j,16 +.align 4 +.L1st: + .set noreorder + PTR_ADD $aj,$ap,$j + ld $aj,($aj) + PTR_ADD $nj,$np,$j + ld $nj,($nj) + + dmultu $aj,$bi + daddu $lo0,$alo,$hi0 + daddu $lo1,$nlo,$hi1 + sltu AT,$lo0,$hi0 + sltu s7,$lo1,$hi1 + daddu $hi0,$ahi,AT + daddu $hi1,$nhi,s7 + mflo $alo + mfhi $ahi + + daddu $lo1,$lo0 + sltu AT,$lo1,$lo0 + dmultu $nj,$m1 + daddu $hi1,AT + addu $j,8 + sd $lo1,($tp) + sltu s7,$j,$num + mflo $nlo + mfhi $nhi + + bnez s7,.L1st + PTR_ADD $tp,8 + .set reorder + + daddu $lo0,$alo,$hi0 + sltu AT,$lo0,$hi0 + daddu $hi0,$ahi,AT + + daddu $lo1,$nlo,$hi1 + sltu s7,$lo1,$hi1 + daddu $hi1,$nhi,s7 + daddu $lo1,$lo0 + sltu AT,$lo1,$lo0 + daddu $hi1,AT + + sd $lo1,($tp) + + daddu $hi1,$hi0 + sltu AT,$hi1,$hi0 + sd $hi1,8($tp) + sd AT,16($tp) + + li $i,8 +.align 4 +.Louter: + PTR_ADD $bi,$bp,$i + ld $bi,($bi) + ld $aj,($ap) + ld $alo,8($ap) + ld $tj,(sp) + + dmultu $aj,$bi + ld $nj,($np) + ld $nlo,8($np) + mflo $lo0 + mfhi $hi0 + daddu $lo0,$tj + dmultu $lo0,$n0 + sltu AT,$lo0,$tj + daddu $hi0,AT + mflo $m1 + + dmultu $alo,$bi + mflo $alo + mfhi $ahi + + dmultu $nj,$m1 + mflo $lo1 + mfhi $hi1 + + dmultu $nlo,$m1 + daddu $lo1,$lo0 + sltu AT,$lo1,$lo0 + daddu $hi1,AT + mflo $nlo + mfhi $nhi + + move $tp,sp + li $j,16 + ld $tj,8($tp) +.align 4 +.Linner: + .set noreorder + PTR_ADD $aj,$ap,$j + ld $aj,($aj) + PTR_ADD $nj,$np,$j + ld $nj,($nj) + + dmultu $aj,$bi + daddu $lo0,$alo,$hi0 + daddu $lo1,$nlo,$hi1 + sltu AT,$lo0,$hi0 + sltu s7,$lo1,$hi1 + daddu $hi0,$ahi,AT + daddu $hi1,$nhi,s7 + mflo $alo + mfhi $ahi + + daddu $lo0,$tj + addu $j,8 + dmultu $nj,$m1 + sltu AT,$lo0,$tj + daddu $lo1,$lo0 + daddu $hi0,AT + sltu s7,$lo1,$lo0 + ld $tj,16($tp) + daddu $hi1,s7 + sltu AT,$j,$num + mflo $nlo + mfhi $nhi + sd $lo1,($tp) + bnez AT,.Linner + PTR_ADD $tp,8 + .set reorder + + daddu $lo0,$alo,$hi0 + sltu AT,$lo0,$hi0 + daddu $hi0,$ahi,AT + daddu $lo0,$tj + sltu s7,$lo0,$tj + daddu $hi0,s7 + + ld $tj,16($tp) + daddu $lo1,$nlo,$hi1 + sltu AT,$lo1,$hi1 + daddu $hi1,$nhi,AT + daddu $lo1,$lo0 + sltu s7,$lo1,$lo0 + daddu $hi1,s7 + sd $lo1,($tp) + + daddu $lo1,$hi1,$hi0 + sltu $hi1,$lo1,$hi0 + daddu $lo1,$tj + sltu AT,$lo1,$tj + daddu $hi1,AT + sd $lo1,8($tp) + sd $hi1,16($tp) + + addu $i,8 + sltu s7,$i,$num + bnez s7,.Louter + + .set noreorder + PTR_ADD $tj,sp,$num # &tp[num] + move $tp,sp + move $ap,sp + li $hi0,0 # clear borrow bit + +.align 4 +.Lsub: ld $lo0,($tp) + ld $lo1,($np) + PTR_ADD $tp,8 + PTR_ADD $np,8 + dsubu $lo1,$lo0,$lo1 # tp[i]-np[i] + sgtu AT,$lo1,$lo0 + dsubu $lo0,$lo1,$hi0 + sgtu $hi0,$lo0,$lo1 + sd $lo0,($rp) + or $hi0,AT + sltu AT,$tp,$tj + bnez AT,.Lsub + PTR_ADD $rp,8 + + dsubu $hi0,$hi1,$hi0 # handle upmost overflow bit + move $tp,sp + PTR_SUB $rp,$num # restore rp + not $hi1,$hi0 + + and $ap,$hi0,sp + and $bp,$hi1,$rp + or $ap,$ap,$bp # ap=borrow?tp:rp + +.align 4 +.Lcopy: ld $aj,($ap) + PTR_ADD $ap,8 + PTR_ADD $tp,8 + sd zero,-8($tp) + sltu AT,$tp,$tj + sd $aj,($rp) + bnez AT,.Lcopy + PTR_ADD $rp,8 + + ld s0,0($fp) + ld s1,8($fp) + ld s2,16($fp) + ld s3,24($fp) + ld s4,32($fp) + ld s5,40($fp) + ld s6,48($fp) + ld s7,56($fp) + li v0,1 + jr ra + PTR_ADD sp,$fp,64 + .set reorder +END(bn_mul_mont) +.rdata +.asciiz "Montgomery Multiplication for MIPS III/IV, CRYPTOGAMS by <appro\@openssl.org>" +___ + +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/mips3.s b/openssl/crypto/bn/asm/mips3.s new file mode 100644 index 00000000..dca4105c --- /dev/null +++ b/openssl/crypto/bn/asm/mips3.s @@ -0,0 +1,2201 @@ +.rdata +.asciiz "mips3.s, Version 1.1" +.asciiz "MIPS III/IV ISA artwork by Andy Polyakov <appro@fy.chalmers.se>" + +/* + * ==================================================================== + * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL + * project. + * + * Rights for redistribution and usage in source and binary forms are + * granted according to the OpenSSL license. Warranty of any kind is + * disclaimed. + * ==================================================================== + */ + +/* + * This is my modest contributon to the OpenSSL project (see + * http://www.openssl.org/ for more information about it) and is + * a drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c + * module. For updates see http://fy.chalmers.se/~appro/hpe/. + * + * The module is designed to work with either of the "new" MIPS ABI(5), + * namely N32 or N64, offered by IRIX 6.x. It's not ment to work under + * IRIX 5.x not only because it doesn't support new ABIs but also + * because 5.x kernels put R4x00 CPU into 32-bit mode and all those + * 64-bit instructions (daddu, dmultu, etc.) found below gonna only + * cause illegal instruction exception:-( + * + * In addition the code depends on preprocessor flags set up by MIPSpro + * compiler driver (either as or cc) and therefore (probably?) can't be + * compiled by the GNU assembler. GNU C driver manages fine though... + * I mean as long as -mmips-as is specified or is the default option, + * because then it simply invokes /usr/bin/as which in turn takes + * perfect care of the preprocessor definitions. Another neat feature + * offered by the MIPSpro assembler is an optimization pass. This gave + * me the opportunity to have the code looking more regular as all those + * architecture dependent instruction rescheduling details were left to + * the assembler. Cool, huh? + * + * Performance improvement is astonishing! 'apps/openssl speed rsa dsa' + * goes way over 3 times faster! + * + * <appro@fy.chalmers.se> + */ +#include <asm.h> +#include <regdef.h> + +#if _MIPS_ISA>=4 +#define MOVNZ(cond,dst,src) \ + movn dst,src,cond +#else +#define MOVNZ(cond,dst,src) \ + .set noreorder; \ + bnezl cond,.+8; \ + move dst,src; \ + .set reorder +#endif + +.text + +.set noat +.set reorder + +#define MINUS4 v1 + +.align 5 +LEAF(bn_mul_add_words) + .set noreorder + bgtzl a2,.L_bn_mul_add_words_proceed + ld t0,0(a1) + jr ra + move v0,zero + .set reorder + +.L_bn_mul_add_words_proceed: + li MINUS4,-4 + and ta0,a2,MINUS4 + move v0,zero + beqz ta0,.L_bn_mul_add_words_tail + +.L_bn_mul_add_words_loop: + dmultu t0,a3 + ld t1,0(a0) + ld t2,8(a1) + ld t3,8(a0) + ld ta0,16(a1) + ld ta1,16(a0) + daddu t1,v0 + sltu v0,t1,v0 /* All manuals say it "compares 32-bit + * values", but it seems to work fine + * even on 64-bit registers. */ + mflo AT + mfhi t0 + daddu t1,AT + daddu v0,t0 + sltu AT,t1,AT + sd t1,0(a0) + daddu v0,AT + + dmultu t2,a3 + ld ta2,24(a1) + ld ta3,24(a0) + daddu t3,v0 + sltu v0,t3,v0 + mflo AT + mfhi t2 + daddu t3,AT + daddu v0,t2 + sltu AT,t3,AT + sd t3,8(a0) + daddu v0,AT + + dmultu ta0,a3 + subu a2,4 + PTR_ADD a0,32 + PTR_ADD a1,32 + daddu ta1,v0 + sltu v0,ta1,v0 + mflo AT + mfhi ta0 + daddu ta1,AT + daddu v0,ta0 + sltu AT,ta1,AT + sd ta1,-16(a0) + daddu v0,AT + + + dmultu ta2,a3 + and ta0,a2,MINUS4 + daddu ta3,v0 + sltu v0,ta3,v0 + mflo AT + mfhi ta2 + daddu ta3,AT + daddu v0,ta2 + sltu AT,ta3,AT + sd ta3,-8(a0) + daddu v0,AT + .set noreorder + bgtzl ta0,.L_bn_mul_add_words_loop + ld t0,0(a1) + + bnezl a2,.L_bn_mul_add_words_tail + ld t0,0(a1) + .set reorder + +.L_bn_mul_add_words_return: + jr ra + +.L_bn_mul_add_words_tail: + dmultu t0,a3 + ld t1,0(a0) + subu a2,1 + daddu t1,v0 + sltu v0,t1,v0 + mflo AT + mfhi t0 + daddu t1,AT + daddu v0,t0 + sltu AT,t1,AT + sd t1,0(a0) + daddu v0,AT + beqz a2,.L_bn_mul_add_words_return + + ld t0,8(a1) + dmultu t0,a3 + ld t1,8(a0) + subu a2,1 + daddu t1,v0 + sltu v0,t1,v0 + mflo AT + mfhi t0 + daddu t1,AT + daddu v0,t0 + sltu AT,t1,AT + sd t1,8(a0) + daddu v0,AT + beqz a2,.L_bn_mul_add_words_return + + ld t0,16(a1) + dmultu t0,a3 + ld t1,16(a0) + daddu t1,v0 + sltu v0,t1,v0 + mflo AT + mfhi t0 + daddu t1,AT + daddu v0,t0 + sltu AT,t1,AT + sd t1,16(a0) + daddu v0,AT + jr ra +END(bn_mul_add_words) + +.align 5 +LEAF(bn_mul_words) + .set noreorder + bgtzl a2,.L_bn_mul_words_proceed + ld t0,0(a1) + jr ra + move v0,zero + .set reorder + +.L_bn_mul_words_proceed: + li MINUS4,-4 + and ta0,a2,MINUS4 + move v0,zero + beqz ta0,.L_bn_mul_words_tail + +.L_bn_mul_words_loop: + dmultu t0,a3 + ld t2,8(a1) + ld ta0,16(a1) + ld ta2,24(a1) + mflo AT + mfhi t0 + daddu v0,AT + sltu t1,v0,AT + sd v0,0(a0) + daddu v0,t1,t0 + + dmultu t2,a3 + subu a2,4 + PTR_ADD a0,32 + PTR_ADD a1,32 + mflo AT + mfhi t2 + daddu v0,AT + sltu t3,v0,AT + sd v0,-24(a0) + daddu v0,t3,t2 + + dmultu ta0,a3 + mflo AT + mfhi ta0 + daddu v0,AT + sltu ta1,v0,AT + sd v0,-16(a0) + daddu v0,ta1,ta0 + + + dmultu ta2,a3 + and ta0,a2,MINUS4 + mflo AT + mfhi ta2 + daddu v0,AT + sltu ta3,v0,AT + sd v0,-8(a0) + daddu v0,ta3,ta2 + .set noreorder + bgtzl ta0,.L_bn_mul_words_loop + ld t0,0(a1) + + bnezl a2,.L_bn_mul_words_tail + ld t0,0(a1) + .set reorder + +.L_bn_mul_words_return: + jr ra + +.L_bn_mul_words_tail: + dmultu t0,a3 + subu a2,1 + mflo AT + mfhi t0 + daddu v0,AT + sltu t1,v0,AT + sd v0,0(a0) + daddu v0,t1,t0 + beqz a2,.L_bn_mul_words_return + + ld t0,8(a1) + dmultu t0,a3 + subu a2,1 + mflo AT + mfhi t0 + daddu v0,AT + sltu t1,v0,AT + sd v0,8(a0) + daddu v0,t1,t0 + beqz a2,.L_bn_mul_words_return + + ld t0,16(a1) + dmultu t0,a3 + mflo AT + mfhi t0 + daddu v0,AT + sltu t1,v0,AT + sd v0,16(a0) + daddu v0,t1,t0 + jr ra +END(bn_mul_words) + +.align 5 +LEAF(bn_sqr_words) + .set noreorder + bgtzl a2,.L_bn_sqr_words_proceed + ld t0,0(a1) + jr ra + move v0,zero + .set reorder + +.L_bn_sqr_words_proceed: + li MINUS4,-4 + and ta0,a2,MINUS4 + move v0,zero + beqz ta0,.L_bn_sqr_words_tail + +.L_bn_sqr_words_loop: + dmultu t0,t0 + ld t2,8(a1) + ld ta0,16(a1) + ld ta2,24(a1) + mflo t1 + mfhi t0 + sd t1,0(a0) + sd t0,8(a0) + + dmultu t2,t2 + subu a2,4 + PTR_ADD a0,64 + PTR_ADD a1,32 + mflo t3 + mfhi t2 + sd t3,-48(a0) + sd t2,-40(a0) + + dmultu ta0,ta0 + mflo ta1 + mfhi ta0 + sd ta1,-32(a0) + sd ta0,-24(a0) + + + dmultu ta2,ta2 + and ta0,a2,MINUS4 + mflo ta3 + mfhi ta2 + sd ta3,-16(a0) + sd ta2,-8(a0) + + .set noreorder + bgtzl ta0,.L_bn_sqr_words_loop + ld t0,0(a1) + + bnezl a2,.L_bn_sqr_words_tail + ld t0,0(a1) + .set reorder + +.L_bn_sqr_words_return: + move v0,zero + jr ra + +.L_bn_sqr_words_tail: + dmultu t0,t0 + subu a2,1 + mflo t1 + mfhi t0 + sd t1,0(a0) + sd t0,8(a0) + beqz a2,.L_bn_sqr_words_return + + ld t0,8(a1) + dmultu t0,t0 + subu a2,1 + mflo t1 + mfhi t0 + sd t1,16(a0) + sd t0,24(a0) + beqz a2,.L_bn_sqr_words_return + + ld t0,16(a1) + dmultu t0,t0 + mflo t1 + mfhi t0 + sd t1,32(a0) + sd t0,40(a0) + jr ra +END(bn_sqr_words) + +.align 5 +LEAF(bn_add_words) + .set noreorder + bgtzl a3,.L_bn_add_words_proceed + ld t0,0(a1) + jr ra + move v0,zero + .set reorder + +.L_bn_add_words_proceed: + li MINUS4,-4 + and AT,a3,MINUS4 + move v0,zero + beqz AT,.L_bn_add_words_tail + +.L_bn_add_words_loop: + ld ta0,0(a2) + subu a3,4 + ld t1,8(a1) + and AT,a3,MINUS4 + ld t2,16(a1) + PTR_ADD a2,32 + ld t3,24(a1) + PTR_ADD a0,32 + ld ta1,-24(a2) + PTR_ADD a1,32 + ld ta2,-16(a2) + ld ta3,-8(a2) + daddu ta0,t0 + sltu t8,ta0,t0 + daddu t0,ta0,v0 + sltu v0,t0,ta0 + sd t0,-32(a0) + daddu v0,t8 + + daddu ta1,t1 + sltu t9,ta1,t1 + daddu t1,ta1,v0 + sltu v0,t1,ta1 + sd t1,-24(a0) + daddu v0,t9 + + daddu ta2,t2 + sltu t8,ta2,t2 + daddu t2,ta2,v0 + sltu v0,t2,ta2 + sd t2,-16(a0) + daddu v0,t8 + + daddu ta3,t3 + sltu t9,ta3,t3 + daddu t3,ta3,v0 + sltu v0,t3,ta3 + sd t3,-8(a0) + daddu v0,t9 + + .set noreorder + bgtzl AT,.L_bn_add_words_loop + ld t0,0(a1) + + bnezl a3,.L_bn_add_words_tail + ld t0,0(a1) + .set reorder + +.L_bn_add_words_return: + jr ra + +.L_bn_add_words_tail: + ld ta0,0(a2) + daddu ta0,t0 + subu a3,1 + sltu t8,ta0,t0 + daddu t0,ta0,v0 + sltu v0,t0,ta0 + sd t0,0(a0) + daddu v0,t8 + beqz a3,.L_bn_add_words_return + + ld t1,8(a1) + ld ta1,8(a2) + daddu ta1,t1 + subu a3,1 + sltu t9,ta1,t1 + daddu t1,ta1,v0 + sltu v0,t1,ta1 + sd t1,8(a0) + daddu v0,t9 + beqz a3,.L_bn_add_words_return + + ld t2,16(a1) + ld ta2,16(a2) + daddu ta2,t2 + sltu t8,ta2,t2 + daddu t2,ta2,v0 + sltu v0,t2,ta2 + sd t2,16(a0) + daddu v0,t8 + jr ra +END(bn_add_words) + +.align 5 +LEAF(bn_sub_words) + .set noreorder + bgtzl a3,.L_bn_sub_words_proceed + ld t0,0(a1) + jr ra + move v0,zero + .set reorder + +.L_bn_sub_words_proceed: + li MINUS4,-4 + and AT,a3,MINUS4 + move v0,zero + beqz AT,.L_bn_sub_words_tail + +.L_bn_sub_words_loop: + ld ta0,0(a2) + subu a3,4 + ld t1,8(a1) + and AT,a3,MINUS4 + ld t2,16(a1) + PTR_ADD a2,32 + ld t3,24(a1) + PTR_ADD a0,32 + ld ta1,-24(a2) + PTR_ADD a1,32 + ld ta2,-16(a2) + ld ta3,-8(a2) + sltu t8,t0,ta0 + dsubu t0,ta0 + dsubu ta0,t0,v0 + sd ta0,-32(a0) + MOVNZ (t0,v0,t8) + + sltu t9,t1,ta1 + dsubu t1,ta1 + dsubu ta1,t1,v0 + sd ta1,-24(a0) + MOVNZ (t1,v0,t9) + + + sltu t8,t2,ta2 + dsubu t2,ta2 + dsubu ta2,t2,v0 + sd ta2,-16(a0) + MOVNZ (t2,v0,t8) + + sltu t9,t3,ta3 + dsubu t3,ta3 + dsubu ta3,t3,v0 + sd ta3,-8(a0) + MOVNZ (t3,v0,t9) + + .set noreorder + bgtzl AT,.L_bn_sub_words_loop + ld t0,0(a1) + + bnezl a3,.L_bn_sub_words_tail + ld t0,0(a1) + .set reorder + +.L_bn_sub_words_return: + jr ra + +.L_bn_sub_words_tail: + ld ta0,0(a2) + subu a3,1 + sltu t8,t0,ta0 + dsubu t0,ta0 + dsubu ta0,t0,v0 + MOVNZ (t0,v0,t8) + sd ta0,0(a0) + beqz a3,.L_bn_sub_words_return + + ld t1,8(a1) + subu a3,1 + ld ta1,8(a2) + sltu t9,t1,ta1 + dsubu t1,ta1 + dsubu ta1,t1,v0 + MOVNZ (t1,v0,t9) + sd ta1,8(a0) + beqz a3,.L_bn_sub_words_return + + ld t2,16(a1) + ld ta2,16(a2) + sltu t8,t2,ta2 + dsubu t2,ta2 + dsubu ta2,t2,v0 + MOVNZ (t2,v0,t8) + sd ta2,16(a0) + jr ra +END(bn_sub_words) + +#undef MINUS4 + +.align 5 +LEAF(bn_div_3_words) + .set reorder + move a3,a0 /* we know that bn_div_words doesn't + * touch a3, ta2, ta3 and preserves a2 + * so that we can save two arguments + * and return address in registers + * instead of stack:-) + */ + ld a0,(a3) + move ta2,a1 + ld a1,-8(a3) + bne a0,a2,.L_bn_div_3_words_proceed + li v0,-1 + jr ra +.L_bn_div_3_words_proceed: + move ta3,ra + bal bn_div_words + move ra,ta3 + dmultu ta2,v0 + ld t2,-16(a3) + move ta0,zero + mfhi t1 + mflo t0 + sltu t8,t1,v1 +.L_bn_div_3_words_inner_loop: + bnez t8,.L_bn_div_3_words_inner_loop_done + sgeu AT,t2,t0 + seq t9,t1,v1 + and AT,t9 + sltu t3,t0,ta2 + daddu v1,a2 + dsubu t1,t3 + dsubu t0,ta2 + sltu t8,t1,v1 + sltu ta0,v1,a2 + or t8,ta0 + .set noreorder + beqzl AT,.L_bn_div_3_words_inner_loop + dsubu v0,1 + .set reorder +.L_bn_div_3_words_inner_loop_done: + jr ra +END(bn_div_3_words) + +.align 5 +LEAF(bn_div_words) + .set noreorder + bnezl a2,.L_bn_div_words_proceed + move v1,zero + jr ra + li v0,-1 /* I'd rather signal div-by-zero + * which can be done with 'break 7' */ + +.L_bn_div_words_proceed: + bltz a2,.L_bn_div_words_body + move t9,v1 + dsll a2,1 + bgtz a2,.-4 + addu t9,1 + + .set reorder + negu t1,t9 + li t2,-1 + dsll t2,t1 + and t2,a0 + dsrl AT,a1,t1 + .set noreorder + bnezl t2,.+8 + break 6 /* signal overflow */ + .set reorder + dsll a0,t9 + dsll a1,t9 + or a0,AT + +#define QT ta0 +#define HH ta1 +#define DH v1 +.L_bn_div_words_body: + dsrl DH,a2,32 + sgeu AT,a0,a2 + .set noreorder + bnezl AT,.+8 + dsubu a0,a2 + .set reorder + + li QT,-1 + dsrl HH,a0,32 + dsrl QT,32 /* q=0xffffffff */ + beq DH,HH,.L_bn_div_words_skip_div1 + ddivu zero,a0,DH + mflo QT +.L_bn_div_words_skip_div1: + dmultu a2,QT + dsll t3,a0,32 + dsrl AT,a1,32 + or t3,AT + mflo t0 + mfhi t1 +.L_bn_div_words_inner_loop1: + sltu t2,t3,t0 + seq t8,HH,t1 + sltu AT,HH,t1 + and t2,t8 + sltu v0,t0,a2 + or AT,t2 + .set noreorder + beqz AT,.L_bn_div_words_inner_loop1_done + dsubu t1,v0 + dsubu t0,a2 + b .L_bn_div_words_inner_loop1 + dsubu QT,1 + .set reorder +.L_bn_div_words_inner_loop1_done: + + dsll a1,32 + dsubu a0,t3,t0 + dsll v0,QT,32 + + li QT,-1 + dsrl HH,a0,32 + dsrl QT,32 /* q=0xffffffff */ + beq DH,HH,.L_bn_div_words_skip_div2 + ddivu zero,a0,DH + mflo QT +.L_bn_div_words_skip_div2: +#undef DH + dmultu a2,QT + dsll t3,a0,32 + dsrl AT,a1,32 + or t3,AT + mflo t0 + mfhi t1 +.L_bn_div_words_inner_loop2: + sltu t2,t3,t0 + seq t8,HH,t1 + sltu AT,HH,t1 + and t2,t8 + sltu v1,t0,a2 + or AT,t2 + .set noreorder + beqz AT,.L_bn_div_words_inner_loop2_done + dsubu t1,v1 + dsubu t0,a2 + b .L_bn_div_words_inner_loop2 + dsubu QT,1 + .set reorder +.L_bn_div_words_inner_loop2_done: +#undef HH + + dsubu a0,t3,t0 + or v0,QT + dsrl v1,a0,t9 /* v1 contains remainder if anybody wants it */ + dsrl a2,t9 /* restore a2 */ + jr ra +#undef QT +END(bn_div_words) + +#define a_0 t0 +#define a_1 t1 +#define a_2 t2 +#define a_3 t3 +#define b_0 ta0 +#define b_1 ta1 +#define b_2 ta2 +#define b_3 ta3 + +#define a_4 s0 +#define a_5 s2 +#define a_6 s4 +#define a_7 a1 /* once we load a[7] we don't need a anymore */ +#define b_4 s1 +#define b_5 s3 +#define b_6 s5 +#define b_7 a2 /* once we load b[7] we don't need b anymore */ + +#define t_1 t8 +#define t_2 t9 + +#define c_1 v0 +#define c_2 v1 +#define c_3 a3 + +#define FRAME_SIZE 48 + +.align 5 +LEAF(bn_mul_comba8) + .set noreorder + PTR_SUB sp,FRAME_SIZE + .frame sp,64,ra + .set reorder + ld a_0,0(a1) /* If compiled with -mips3 option on + * R5000 box assembler barks on this + * line with "shouldn't have mult/div + * as last instruction in bb (R10K + * bug)" warning. If anybody out there + * has a clue about how to circumvent + * this do send me a note. + * <appro@fy.chalmers.se> + */ + ld b_0,0(a2) + ld a_1,8(a1) + ld a_2,16(a1) + ld a_3,24(a1) + ld b_1,8(a2) + ld b_2,16(a2) + ld b_3,24(a2) + dmultu a_0,b_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */ + sd s0,0(sp) + sd s1,8(sp) + sd s2,16(sp) + sd s3,24(sp) + sd s4,32(sp) + sd s5,40(sp) + mflo c_1 + mfhi c_2 + + dmultu a_0,b_1 /* mul_add_c(a[0],b[1],c2,c3,c1); */ + ld a_4,32(a1) + ld a_5,40(a1) + ld a_6,48(a1) + ld a_7,56(a1) + ld b_4,32(a2) + ld b_5,40(a2) + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu c_3,t_2,AT + dmultu a_1,b_0 /* mul_add_c(a[1],b[0],c2,c3,c1); */ + ld b_6,48(a2) + ld b_7,56(a2) + sd c_1,0(a0) /* r[0]=c1; */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu c_1,c_3,t_2 + sd c_2,8(a0) /* r[1]=c2; */ + + dmultu a_2,b_0 /* mul_add_c(a[2],b[0],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + dmultu a_1,b_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu c_2,c_1,t_2 + dmultu a_0,b_2 /* mul_add_c(a[0],b[2],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,16(a0) /* r[2]=c3; */ + + dmultu a_0,b_3 /* mul_add_c(a[0],b[3],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu c_3,c_2,t_2 + dmultu a_1,b_2 /* mul_add_c(a[1],b[2],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_2,b_1 /* mul_add_c(a[2],b[1],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_3,b_0 /* mul_add_c(a[3],b[0],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + sd c_1,24(a0) /* r[3]=c1; */ + + dmultu a_4,b_0 /* mul_add_c(a[4],b[0],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu c_1,c_3,t_2 + dmultu a_3,b_1 /* mul_add_c(a[3],b[1],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_2,b_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_1,b_3 /* mul_add_c(a[1],b[3],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_0,b_4 /* mul_add_c(a[0],b[4],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + sd c_2,32(a0) /* r[4]=c2; */ + + dmultu a_0,b_5 /* mul_add_c(a[0],b[5],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu c_2,c_1,t_2 + dmultu a_1,b_4 /* mul_add_c(a[1],b[4],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_2,b_3 /* mul_add_c(a[2],b[3],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_3,b_2 /* mul_add_c(a[3],b[2],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_4,b_1 /* mul_add_c(a[4],b[1],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_5,b_0 /* mul_add_c(a[5],b[0],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,40(a0) /* r[5]=c3; */ + + dmultu a_6,b_0 /* mul_add_c(a[6],b[0],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu c_3,c_2,t_2 + dmultu a_5,b_1 /* mul_add_c(a[5],b[1],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_4,b_2 /* mul_add_c(a[4],b[2],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_3,b_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_2,b_4 /* mul_add_c(a[2],b[4],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_1,b_5 /* mul_add_c(a[1],b[5],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_0,b_6 /* mul_add_c(a[0],b[6],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + sd c_1,48(a0) /* r[6]=c1; */ + + dmultu a_0,b_7 /* mul_add_c(a[0],b[7],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu c_1,c_3,t_2 + dmultu a_1,b_6 /* mul_add_c(a[1],b[6],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_2,b_5 /* mul_add_c(a[2],b[5],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_3,b_4 /* mul_add_c(a[3],b[4],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_4,b_3 /* mul_add_c(a[4],b[3],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_5,b_2 /* mul_add_c(a[5],b[2],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_6,b_1 /* mul_add_c(a[6],b[1],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_7,b_0 /* mul_add_c(a[7],b[0],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + sd c_2,56(a0) /* r[7]=c2; */ + + dmultu a_7,b_1 /* mul_add_c(a[7],b[1],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu c_2,c_1,t_2 + dmultu a_6,b_2 /* mul_add_c(a[6],b[2],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_5,b_3 /* mul_add_c(a[5],b[3],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_4,b_4 /* mul_add_c(a[4],b[4],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_3,b_5 /* mul_add_c(a[3],b[5],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_2,b_6 /* mul_add_c(a[2],b[6],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_1,b_7 /* mul_add_c(a[1],b[7],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,64(a0) /* r[8]=c3; */ + + dmultu a_2,b_7 /* mul_add_c(a[2],b[7],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu c_3,c_2,t_2 + dmultu a_3,b_6 /* mul_add_c(a[3],b[6],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_4,b_5 /* mul_add_c(a[4],b[5],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_5,b_4 /* mul_add_c(a[5],b[4],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_6,b_3 /* mul_add_c(a[6],b[3],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_7,b_2 /* mul_add_c(a[7],b[2],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + sd c_1,72(a0) /* r[9]=c1; */ + + dmultu a_7,b_3 /* mul_add_c(a[7],b[3],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu c_1,c_3,t_2 + dmultu a_6,b_4 /* mul_add_c(a[6],b[4],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_5,b_5 /* mul_add_c(a[5],b[5],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_4,b_6 /* mul_add_c(a[4],b[6],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_3,b_7 /* mul_add_c(a[3],b[7],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + sd c_2,80(a0) /* r[10]=c2; */ + + dmultu a_4,b_7 /* mul_add_c(a[4],b[7],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu c_2,c_1,t_2 + dmultu a_5,b_6 /* mul_add_c(a[5],b[6],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_6,b_5 /* mul_add_c(a[6],b[5],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_7,b_4 /* mul_add_c(a[7],b[4],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,88(a0) /* r[11]=c3; */ + + dmultu a_7,b_5 /* mul_add_c(a[7],b[5],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu c_3,c_2,t_2 + dmultu a_6,b_6 /* mul_add_c(a[6],b[6],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_5,b_7 /* mul_add_c(a[5],b[7],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + sd c_1,96(a0) /* r[12]=c1; */ + + dmultu a_6,b_7 /* mul_add_c(a[6],b[7],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu c_1,c_3,t_2 + dmultu a_7,b_6 /* mul_add_c(a[7],b[6],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + sd c_2,104(a0) /* r[13]=c2; */ + + dmultu a_7,b_7 /* mul_add_c(a[7],b[7],c3,c1,c2); */ + ld s0,0(sp) + ld s1,8(sp) + ld s2,16(sp) + ld s3,24(sp) + ld s4,32(sp) + ld s5,40(sp) + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sd c_3,112(a0) /* r[14]=c3; */ + sd c_1,120(a0) /* r[15]=c1; */ + + PTR_ADD sp,FRAME_SIZE + + jr ra +END(bn_mul_comba8) + +.align 5 +LEAF(bn_mul_comba4) + .set reorder + ld a_0,0(a1) + ld b_0,0(a2) + ld a_1,8(a1) + ld a_2,16(a1) + dmultu a_0,b_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */ + ld a_3,24(a1) + ld b_1,8(a2) + ld b_2,16(a2) + ld b_3,24(a2) + mflo c_1 + mfhi c_2 + sd c_1,0(a0) + + dmultu a_0,b_1 /* mul_add_c(a[0],b[1],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu c_3,t_2,AT + dmultu a_1,b_0 /* mul_add_c(a[1],b[0],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu c_1,c_3,t_2 + sd c_2,8(a0) + + dmultu a_2,b_0 /* mul_add_c(a[2],b[0],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + dmultu a_1,b_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu c_2,c_1,t_2 + dmultu a_0,b_2 /* mul_add_c(a[0],b[2],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,16(a0) + + dmultu a_0,b_3 /* mul_add_c(a[0],b[3],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu c_3,c_2,t_2 + dmultu a_1,b_2 /* mul_add_c(a[1],b[2],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_2,b_1 /* mul_add_c(a[2],b[1],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_3,b_0 /* mul_add_c(a[3],b[0],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + sd c_1,24(a0) + + dmultu a_3,b_1 /* mul_add_c(a[3],b[1],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu c_1,c_3,t_2 + dmultu a_2,b_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_1,b_3 /* mul_add_c(a[1],b[3],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + sd c_2,32(a0) + + dmultu a_2,b_3 /* mul_add_c(a[2],b[3],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu c_2,c_1,t_2 + dmultu a_3,b_2 /* mul_add_c(a[3],b[2],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,40(a0) + + dmultu a_3,b_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sd c_1,48(a0) + sd c_2,56(a0) + + jr ra +END(bn_mul_comba4) + +#undef a_4 +#undef a_5 +#undef a_6 +#undef a_7 +#define a_4 b_0 +#define a_5 b_1 +#define a_6 b_2 +#define a_7 b_3 + +.align 5 +LEAF(bn_sqr_comba8) + .set reorder + ld a_0,0(a1) + ld a_1,8(a1) + ld a_2,16(a1) + ld a_3,24(a1) + + dmultu a_0,a_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */ + ld a_4,32(a1) + ld a_5,40(a1) + ld a_6,48(a1) + ld a_7,56(a1) + mflo c_1 + mfhi c_2 + sd c_1,0(a0) + + dmultu a_0,a_1 /* mul_add_c2(a[0],b[1],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt c_1,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu c_3,t_2,AT + sd c_2,8(a0) + + dmultu a_2,a_0 /* mul_add_c2(a[2],b[0],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + slt c_2,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_1,a_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,16(a0) + + dmultu a_0,a_3 /* mul_add_c2(a[0],b[3],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + slt c_3,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_1,a_2 /* mul_add_c2(a[1],b[2],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_3,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + sd c_1,24(a0) + + dmultu a_4,a_0 /* mul_add_c2(a[4],b[0],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt c_1,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_3,a_1 /* mul_add_c2(a[3],b[1],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_1,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_2,a_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + sd c_2,32(a0) + + dmultu a_0,a_5 /* mul_add_c2(a[0],b[5],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + slt c_2,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_1,a_4 /* mul_add_c2(a[1],b[4],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_2,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_2,a_3 /* mul_add_c2(a[2],b[3],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_2,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,40(a0) + + dmultu a_6,a_0 /* mul_add_c2(a[6],b[0],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + slt c_3,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_5,a_1 /* mul_add_c2(a[5],b[1],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_3,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_4,a_2 /* mul_add_c2(a[4],b[2],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_3,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_3,a_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + sd c_1,48(a0) + + dmultu a_0,a_7 /* mul_add_c2(a[0],b[7],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt c_1,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_1,a_6 /* mul_add_c2(a[1],b[6],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_1,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_2,a_5 /* mul_add_c2(a[2],b[5],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_1,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_3,a_4 /* mul_add_c2(a[3],b[4],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_1,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + sd c_2,56(a0) + + dmultu a_7,a_1 /* mul_add_c2(a[7],b[1],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + slt c_2,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_6,a_2 /* mul_add_c2(a[6],b[2],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_2,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_5,a_3 /* mul_add_c2(a[5],b[3],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_2,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_4,a_4 /* mul_add_c(a[4],b[4],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,64(a0) + + dmultu a_2,a_7 /* mul_add_c2(a[2],b[7],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + slt c_3,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_3,a_6 /* mul_add_c2(a[3],b[6],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_3,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_4,a_5 /* mul_add_c2(a[4],b[5],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_3,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + sd c_1,72(a0) + + dmultu a_7,a_3 /* mul_add_c2(a[7],b[3],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt c_1,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_6,a_4 /* mul_add_c2(a[6],b[4],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_1,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_5,a_5 /* mul_add_c(a[5],b[5],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + sd c_2,80(a0) + + dmultu a_4,a_7 /* mul_add_c2(a[4],b[7],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + slt c_2,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_5,a_6 /* mul_add_c2(a[5],b[6],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_2,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,88(a0) + + dmultu a_7,a_5 /* mul_add_c2(a[7],b[5],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + slt c_3,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_6,a_6 /* mul_add_c(a[6],b[6],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + sd c_1,96(a0) + + dmultu a_6,a_7 /* mul_add_c2(a[6],b[7],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt c_1,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + sd c_2,104(a0) + + dmultu a_7,a_7 /* mul_add_c(a[7],b[7],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sd c_3,112(a0) + sd c_1,120(a0) + + jr ra +END(bn_sqr_comba8) + +.align 5 +LEAF(bn_sqr_comba4) + .set reorder + ld a_0,0(a1) + ld a_1,8(a1) + ld a_2,16(a1) + ld a_3,24(a1) + dmultu a_0,a_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */ + mflo c_1 + mfhi c_2 + sd c_1,0(a0) + + dmultu a_0,a_1 /* mul_add_c2(a[0],b[1],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt c_1,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu c_3,t_2,AT + sd c_2,8(a0) + + dmultu a_2,a_0 /* mul_add_c2(a[2],b[0],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + slt c_2,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + dmultu a_1,a_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,16(a0) + + dmultu a_0,a_3 /* mul_add_c2(a[0],b[3],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + slt c_3,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + dmultu a_1,a_2 /* mul_add_c(a2[1],b[2],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + slt AT,t_2,zero + daddu c_3,AT + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sltu AT,c_2,t_2 + daddu c_3,AT + sd c_1,24(a0) + + dmultu a_3,a_1 /* mul_add_c2(a[3],b[1],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + slt c_1,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + dmultu a_2,a_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */ + mflo t_1 + mfhi t_2 + daddu c_2,t_1 + sltu AT,c_2,t_1 + daddu t_2,AT + daddu c_3,t_2 + sltu AT,c_3,t_2 + daddu c_1,AT + sd c_2,32(a0) + + dmultu a_2,a_3 /* mul_add_c2(a[2],b[3],c3,c1,c2); */ + mflo t_1 + mfhi t_2 + slt c_2,t_2,zero + dsll t_2,1 + slt a2,t_1,zero + daddu t_2,a2 + dsll t_1,1 + daddu c_3,t_1 + sltu AT,c_3,t_1 + daddu t_2,AT + daddu c_1,t_2 + sltu AT,c_1,t_2 + daddu c_2,AT + sd c_3,40(a0) + + dmultu a_3,a_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */ + mflo t_1 + mfhi t_2 + daddu c_1,t_1 + sltu AT,c_1,t_1 + daddu t_2,AT + daddu c_2,t_2 + sd c_1,48(a0) + sd c_2,56(a0) + + jr ra +END(bn_sqr_comba4) diff --git a/openssl/crypto/bn/asm/pa-risc2.s b/openssl/crypto/bn/asm/pa-risc2.s new file mode 100644 index 00000000..f3b16290 --- /dev/null +++ b/openssl/crypto/bn/asm/pa-risc2.s @@ -0,0 +1,1618 @@ +; +; PA-RISC 2.0 implementation of bn_asm code, based on the +; 64-bit version of the code. This code is effectively the +; same as the 64-bit version except the register model is +; slightly different given all values must be 32-bit between +; function calls. Thus the 64-bit return values are returned +; in %ret0 and %ret1 vs just %ret0 as is done in 64-bit +; +; +; This code is approximately 2x faster than the C version +; for RSA/DSA. +; +; See http://devresource.hp.com/ for more details on the PA-RISC +; architecture. Also see the book "PA-RISC 2.0 Architecture" +; by Gerry Kane for information on the instruction set architecture. +; +; Code written by Chris Ruemmler (with some help from the HP C +; compiler). +; +; The code compiles with HP's assembler +; + + .level 2.0N + .space $TEXT$ + .subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY + +; +; Global Register definitions used for the routines. +; +; Some information about HP's runtime architecture for 32-bits. +; +; "Caller save" means the calling function must save the register +; if it wants the register to be preserved. +; "Callee save" means if a function uses the register, it must save +; the value before using it. +; +; For the floating point registers +; +; "caller save" registers: fr4-fr11, fr22-fr31 +; "callee save" registers: fr12-fr21 +; "special" registers: fr0-fr3 (status and exception registers) +; +; For the integer registers +; value zero : r0 +; "caller save" registers: r1,r19-r26 +; "callee save" registers: r3-r18 +; return register : r2 (rp) +; return values ; r28,r29 (ret0,ret1) +; Stack pointer ; r30 (sp) +; millicode return ptr ; r31 (also a caller save register) + + +; +; Arguments to the routines +; +r_ptr .reg %r26 +a_ptr .reg %r25 +b_ptr .reg %r24 +num .reg %r24 +n .reg %r23 + +; +; Note that the "w" argument for bn_mul_add_words and bn_mul_words +; is passed on the stack at a delta of -56 from the top of stack +; as the routine is entered. +; + +; +; Globals used in some routines +; + +top_overflow .reg %r23 +high_mask .reg %r22 ; value 0xffffffff80000000L + + +;------------------------------------------------------------------------------ +; +; bn_mul_add_words +; +;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr, +; int num, BN_ULONG w) +; +; arg0 = r_ptr +; arg1 = a_ptr +; arg3 = num +; -56(sp) = w +; +; Local register definitions +; + +fm1 .reg %fr22 +fm .reg %fr23 +ht_temp .reg %fr24 +ht_temp_1 .reg %fr25 +lt_temp .reg %fr26 +lt_temp_1 .reg %fr27 +fm1_1 .reg %fr28 +fm_1 .reg %fr29 + +fw_h .reg %fr7L +fw_l .reg %fr7R +fw .reg %fr7 + +fht_0 .reg %fr8L +flt_0 .reg %fr8R +t_float_0 .reg %fr8 + +fht_1 .reg %fr9L +flt_1 .reg %fr9R +t_float_1 .reg %fr9 + +tmp_0 .reg %r31 +tmp_1 .reg %r21 +m_0 .reg %r20 +m_1 .reg %r19 +ht_0 .reg %r1 +ht_1 .reg %r3 +lt_0 .reg %r4 +lt_1 .reg %r5 +m1_0 .reg %r6 +m1_1 .reg %r7 +rp_val .reg %r8 +rp_val_1 .reg %r9 + +bn_mul_add_words + .export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN + .proc + .callinfo frame=128 + .entry + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + NOP ; Needed to make the loop 16-byte aligned + NOP ; needed to make the loop 16-byte aligned + + STD %r5,16(%sp) ; save r5 + NOP + STD %r6,24(%sp) ; save r6 + STD %r7,32(%sp) ; save r7 + + STD %r8,40(%sp) ; save r8 + STD %r9,48(%sp) ; save r9 + COPY %r0,%ret1 ; return 0 by default + DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32 + + CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit + LDO 128(%sp),%sp ; bump stack + + ; + ; The loop is unrolled twice, so if there is only 1 number + ; then go straight to the cleanup code. + ; + CMPIB,= 1,num,bn_mul_add_words_single_top + FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l) + + ; + ; This loop is unrolled 2 times (64-byte aligned as well) + ; + ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus + ; two 32-bit mutiplies can be issued per cycle. + ; +bn_mul_add_words_unroll2 + + FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R) + FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R) + LDD 0(r_ptr),rp_val ; rp[0] + LDD 8(r_ptr),rp_val_1 ; rp[1] + + XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l + XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l + FSTD fm1,-16(%sp) ; -16(sp) = m1[0] + FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1] + + XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h + XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h + FSTD fm,-8(%sp) ; -8(sp) = m[0] + FSTD fm_1,-40(%sp) ; -40(sp) = m[1] + + XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h + XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h + FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp + FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1 + + XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l + XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l + FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp + FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1 + + LDD -8(%sp),m_0 ; m[0] + LDD -40(%sp),m_1 ; m[1] + LDD -16(%sp),m1_0 ; m1[0] + LDD -48(%sp),m1_1 ; m1[1] + + LDD -24(%sp),ht_0 ; ht[0] + LDD -56(%sp),ht_1 ; ht[1] + ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0]; + ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1]; + + LDD -32(%sp),lt_0 + LDD -64(%sp),lt_1 + CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0]) + ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32) + + CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1]) + ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32) + EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32 + DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32 + + EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32 + DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32 + ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32) + ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32) + + ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0]; + ADD,DC ht_0,%r0,ht_0 ; ht[0]++ + ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1]; + ADD,DC ht_1,%r0,ht_1 ; ht[1]++ + + ADD %ret1,lt_0,lt_0 ; lt[0] = lt[0] + c; + ADD,DC ht_0,%r0,ht_0 ; ht[0]++ + ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0] + ADD,DC ht_0,%r0,ht_0 ; ht[0]++ + + LDO -2(num),num ; num = num - 2; + ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c); + ADD,DC ht_1,%r0,ht_1 ; ht[1]++ + STD lt_0,0(r_ptr) ; rp[0] = lt[0] + + ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1] + ADD,DC ht_1,%r0,%ret1 ; ht[1]++ + LDO 16(a_ptr),a_ptr ; a_ptr += 2 + + STD lt_1,8(r_ptr) ; rp[1] = lt[1] + CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do + LDO 16(r_ptr),r_ptr ; r_ptr += 2 + + CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one + + ; + ; Top of loop aligned on 64-byte boundary + ; +bn_mul_add_words_single_top + FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R) + LDD 0(r_ptr),rp_val ; rp[0] + LDO 8(a_ptr),a_ptr ; a_ptr++ + XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l + FSTD fm1,-16(%sp) ; -16(sp) = m1 + XMPYU flt_0,fw_h,fm ; m = lt*fw_h + FSTD fm,-8(%sp) ; -8(sp) = m + XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h + FSTD ht_temp,-24(%sp) ; -24(sp) = ht + XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l + FSTD lt_temp,-32(%sp) ; -32(sp) = lt + + LDD -8(%sp),m_0 + LDD -16(%sp),m1_0 ; m1 = temp1 + ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1; + LDD -24(%sp),ht_0 + LDD -32(%sp),lt_0 + + CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1) + ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32) + + EXTRD,U tmp_0,31,32,m_0 ; m>>32 + DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32 + + ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32) + ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1; + ADD,DC ht_0,%r0,ht_0 ; ht++ + ADD %ret1,tmp_0,lt_0 ; lt = lt + c; + ADD,DC ht_0,%r0,ht_0 ; ht++ + ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0] + ADD,DC ht_0,%r0,%ret1 ; ht++ + STD lt_0,0(r_ptr) ; rp[0] = lt + +bn_mul_add_words_exit + .EXIT + + EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1 + LDD -80(%sp),%r9 ; restore r9 + LDD -88(%sp),%r8 ; restore r8 + LDD -96(%sp),%r7 ; restore r7 + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 ; restore r3 + .PROCEND ;in=23,24,25,26,29;out=28; + +;---------------------------------------------------------------------------- +; +;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) +; +; arg0 = rp +; arg1 = ap +; arg3 = num +; w on stack at -56(sp) + +bn_mul_words + .proc + .callinfo frame=128 + .entry + .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + NOP + STD %r5,16(%sp) ; save r5 + + STD %r6,24(%sp) ; save r6 + STD %r7,32(%sp) ; save r7 + COPY %r0,%ret1 ; return 0 by default + DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32 + + CMPIB,>= 0,num,bn_mul_words_exit + LDO 128(%sp),%sp ; bump stack + + ; + ; See if only 1 word to do, thus just do cleanup + ; + CMPIB,= 1,num,bn_mul_words_single_top + FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l) + + ; + ; This loop is unrolled 2 times (64-byte aligned as well) + ; + ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus + ; two 32-bit mutiplies can be issued per cycle. + ; +bn_mul_words_unroll2 + + FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R) + FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R) + XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l + XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l + + FSTD fm1,-16(%sp) ; -16(sp) = m1 + FSTD fm1_1,-48(%sp) ; -48(sp) = m1 + XMPYU flt_0,fw_h,fm ; m = lt*fw_h + XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h + + FSTD fm,-8(%sp) ; -8(sp) = m + FSTD fm_1,-40(%sp) ; -40(sp) = m + XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h + XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h + + FSTD ht_temp,-24(%sp) ; -24(sp) = ht + FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht + XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l + XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l + + FSTD lt_temp,-32(%sp) ; -32(sp) = lt + FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt + LDD -8(%sp),m_0 + LDD -40(%sp),m_1 + + LDD -16(%sp),m1_0 + LDD -48(%sp),m1_1 + LDD -24(%sp),ht_0 + LDD -56(%sp),ht_1 + + ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1; + ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1; + LDD -32(%sp),lt_0 + LDD -64(%sp),lt_1 + + CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1) + ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32) + CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1) + ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32) + + EXTRD,U tmp_0,31,32,m_0 ; m>>32 + DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32 + EXTRD,U tmp_1,31,32,m_1 ; m>>32 + DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32 + + ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32) + ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32) + ADD lt_0,m1_0,lt_0 ; lt = lt+m1; + ADD,DC ht_0,%r0,ht_0 ; ht++ + + ADD lt_1,m1_1,lt_1 ; lt = lt+m1; + ADD,DC ht_1,%r0,ht_1 ; ht++ + ADD %ret1,lt_0,lt_0 ; lt = lt + c (ret1); + ADD,DC ht_0,%r0,ht_0 ; ht++ + + ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0) + ADD,DC ht_1,%r0,ht_1 ; ht++ + STD lt_0,0(r_ptr) ; rp[0] = lt + STD lt_1,8(r_ptr) ; rp[1] = lt + + COPY ht_1,%ret1 ; carry = ht + LDO -2(num),num ; num = num - 2; + LDO 16(a_ptr),a_ptr ; ap += 2 + CMPIB,<= 2,num,bn_mul_words_unroll2 + LDO 16(r_ptr),r_ptr ; rp++ + + CMPIB,=,N 0,num,bn_mul_words_exit ; are we done? + + ; + ; Top of loop aligned on 64-byte boundary + ; +bn_mul_words_single_top + FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R) + + XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l + FSTD fm1,-16(%sp) ; -16(sp) = m1 + XMPYU flt_0,fw_h,fm ; m = lt*fw_h + FSTD fm,-8(%sp) ; -8(sp) = m + XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h + FSTD ht_temp,-24(%sp) ; -24(sp) = ht + XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l + FSTD lt_temp,-32(%sp) ; -32(sp) = lt + + LDD -8(%sp),m_0 + LDD -16(%sp),m1_0 + ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1; + LDD -24(%sp),ht_0 + LDD -32(%sp),lt_0 + + CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1) + ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32) + + EXTRD,U tmp_0,31,32,m_0 ; m>>32 + DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32 + + ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32) + ADD lt_0,m1_0,lt_0 ; lt= lt+m1; + ADD,DC ht_0,%r0,ht_0 ; ht++ + + ADD %ret1,lt_0,lt_0 ; lt = lt + c; + ADD,DC ht_0,%r0,ht_0 ; ht++ + + COPY ht_0,%ret1 ; copy carry + STD lt_0,0(r_ptr) ; rp[0] = lt + +bn_mul_words_exit + .EXIT + EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1 + LDD -96(%sp),%r7 ; restore r7 + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 ; restore r3 + .PROCEND + +;---------------------------------------------------------------------------- +; +;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num) +; +; arg0 = rp +; arg1 = ap +; arg2 = num +; + +bn_sqr_words + .proc + .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE + .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .entry + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + NOP + STD %r5,16(%sp) ; save r5 + + CMPIB,>= 0,num,bn_sqr_words_exit + LDO 128(%sp),%sp ; bump stack + + ; + ; If only 1, the goto straight to cleanup + ; + CMPIB,= 1,num,bn_sqr_words_single_top + DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L + + ; + ; This loop is unrolled 2 times (64-byte aligned as well) + ; + +bn_sqr_words_unroll2 + FLDD 0(a_ptr),t_float_0 ; a[0] + FLDD 8(a_ptr),t_float_1 ; a[1] + XMPYU fht_0,flt_0,fm ; m[0] + XMPYU fht_1,flt_1,fm_1 ; m[1] + + FSTD fm,-24(%sp) ; store m[0] + FSTD fm_1,-56(%sp) ; store m[1] + XMPYU flt_0,flt_0,lt_temp ; lt[0] + XMPYU flt_1,flt_1,lt_temp_1 ; lt[1] + + FSTD lt_temp,-16(%sp) ; store lt[0] + FSTD lt_temp_1,-48(%sp) ; store lt[1] + XMPYU fht_0,fht_0,ht_temp ; ht[0] + XMPYU fht_1,fht_1,ht_temp_1 ; ht[1] + + FSTD ht_temp,-8(%sp) ; store ht[0] + FSTD ht_temp_1,-40(%sp) ; store ht[1] + LDD -24(%sp),m_0 + LDD -56(%sp),m_1 + + AND m_0,high_mask,tmp_0 ; m[0] & Mask + AND m_1,high_mask,tmp_1 ; m[1] & Mask + DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1 + DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1 + + LDD -16(%sp),lt_0 + LDD -48(%sp),lt_1 + EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1 + EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1 + + LDD -8(%sp),ht_0 + LDD -40(%sp),ht_1 + ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0 + ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1 + + ADD lt_0,m_0,lt_0 ; lt = lt+m + ADD,DC ht_0,%r0,ht_0 ; ht[0]++ + STD lt_0,0(r_ptr) ; rp[0] = lt[0] + STD ht_0,8(r_ptr) ; rp[1] = ht[1] + + ADD lt_1,m_1,lt_1 ; lt = lt+m + ADD,DC ht_1,%r0,ht_1 ; ht[1]++ + STD lt_1,16(r_ptr) ; rp[2] = lt[1] + STD ht_1,24(r_ptr) ; rp[3] = ht[1] + + LDO -2(num),num ; num = num - 2; + LDO 16(a_ptr),a_ptr ; ap += 2 + CMPIB,<= 2,num,bn_sqr_words_unroll2 + LDO 32(r_ptr),r_ptr ; rp += 4 + + CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done? + + ; + ; Top of loop aligned on 64-byte boundary + ; +bn_sqr_words_single_top + FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R) + + XMPYU fht_0,flt_0,fm ; m + FSTD fm,-24(%sp) ; store m + + XMPYU flt_0,flt_0,lt_temp ; lt + FSTD lt_temp,-16(%sp) ; store lt + + XMPYU fht_0,fht_0,ht_temp ; ht + FSTD ht_temp,-8(%sp) ; store ht + + LDD -24(%sp),m_0 ; load m + AND m_0,high_mask,tmp_0 ; m & Mask + DEPD,Z m_0,30,31,m_0 ; m << 32+1 + LDD -16(%sp),lt_0 ; lt + + LDD -8(%sp),ht_0 ; ht + EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1 + ADD m_0,lt_0,lt_0 ; lt = lt+m + ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0 + ADD,DC ht_0,%r0,ht_0 ; ht++ + + STD lt_0,0(r_ptr) ; rp[0] = lt + STD ht_0,8(r_ptr) ; rp[1] = ht + +bn_sqr_words_exit + .EXIT + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 + .PROCEND ;in=23,24,25,26,29;out=28; + + +;---------------------------------------------------------------------------- +; +;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n) +; +; arg0 = rp +; arg1 = ap +; arg2 = bp +; arg3 = n + +t .reg %r22 +b .reg %r21 +l .reg %r20 + +bn_add_words + .proc + .entry + .callinfo + .EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .align 64 + + CMPIB,>= 0,n,bn_add_words_exit + COPY %r0,%ret1 ; return 0 by default + + ; + ; If 2 or more numbers do the loop + ; + CMPIB,= 1,n,bn_add_words_single_top + NOP + + ; + ; This loop is unrolled 2 times (64-byte aligned as well) + ; +bn_add_words_unroll2 + LDD 0(a_ptr),t + LDD 0(b_ptr),b + ADD t,%ret1,t ; t = t+c; + ADD,DC %r0,%r0,%ret1 ; set c to carry + ADD t,b,l ; l = t + b[0] + ADD,DC %ret1,%r0,%ret1 ; c+= carry + STD l,0(r_ptr) + + LDD 8(a_ptr),t + LDD 8(b_ptr),b + ADD t,%ret1,t ; t = t+c; + ADD,DC %r0,%r0,%ret1 ; set c to carry + ADD t,b,l ; l = t + b[0] + ADD,DC %ret1,%r0,%ret1 ; c+= carry + STD l,8(r_ptr) + + LDO -2(n),n + LDO 16(a_ptr),a_ptr + LDO 16(b_ptr),b_ptr + + CMPIB,<= 2,n,bn_add_words_unroll2 + LDO 16(r_ptr),r_ptr + + CMPIB,=,N 0,n,bn_add_words_exit ; are we done? + +bn_add_words_single_top + LDD 0(a_ptr),t + LDD 0(b_ptr),b + + ADD t,%ret1,t ; t = t+c; + ADD,DC %r0,%r0,%ret1 ; set c to carry (could use CMPCLR??) + ADD t,b,l ; l = t + b[0] + ADD,DC %ret1,%r0,%ret1 ; c+= carry + STD l,0(r_ptr) + +bn_add_words_exit + .EXIT + BVE (%rp) + EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1 + .PROCEND ;in=23,24,25,26,29;out=28; + +;---------------------------------------------------------------------------- +; +;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n) +; +; arg0 = rp +; arg1 = ap +; arg2 = bp +; arg3 = n + +t1 .reg %r22 +t2 .reg %r21 +sub_tmp1 .reg %r20 +sub_tmp2 .reg %r19 + + +bn_sub_words + .proc + .callinfo + .EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .entry + .align 64 + + CMPIB,>= 0,n,bn_sub_words_exit + COPY %r0,%ret1 ; return 0 by default + + ; + ; If 2 or more numbers do the loop + ; + CMPIB,= 1,n,bn_sub_words_single_top + NOP + + ; + ; This loop is unrolled 2 times (64-byte aligned as well) + ; +bn_sub_words_unroll2 + LDD 0(a_ptr),t1 + LDD 0(b_ptr),t2 + SUB t1,t2,sub_tmp1 ; t3 = t1-t2; + SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c; + + CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2 + LDO 1(%r0),sub_tmp2 + + CMPCLR,*= t1,t2,%r0 + COPY sub_tmp2,%ret1 + STD sub_tmp1,0(r_ptr) + + LDD 8(a_ptr),t1 + LDD 8(b_ptr),t2 + SUB t1,t2,sub_tmp1 ; t3 = t1-t2; + SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c; + CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2 + LDO 1(%r0),sub_tmp2 + + CMPCLR,*= t1,t2,%r0 + COPY sub_tmp2,%ret1 + STD sub_tmp1,8(r_ptr) + + LDO -2(n),n + LDO 16(a_ptr),a_ptr + LDO 16(b_ptr),b_ptr + + CMPIB,<= 2,n,bn_sub_words_unroll2 + LDO 16(r_ptr),r_ptr + + CMPIB,=,N 0,n,bn_sub_words_exit ; are we done? + +bn_sub_words_single_top + LDD 0(a_ptr),t1 + LDD 0(b_ptr),t2 + SUB t1,t2,sub_tmp1 ; t3 = t1-t2; + SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c; + CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2 + LDO 1(%r0),sub_tmp2 + + CMPCLR,*= t1,t2,%r0 + COPY sub_tmp2,%ret1 + + STD sub_tmp1,0(r_ptr) + +bn_sub_words_exit + .EXIT + BVE (%rp) + EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1 + .PROCEND ;in=23,24,25,26,29;out=28; + +;------------------------------------------------------------------------------ +; +; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d) +; +; arg0 = h +; arg1 = l +; arg2 = d +; +; This is mainly just output from the HP C compiler. +; +;------------------------------------------------------------------------------ +bn_div_words + .PROC + .EXPORT bn_div_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR,RTNVAL=GR,LONG_RETURN + .IMPORT BN_num_bits_word,CODE + ;--- not PIC .IMPORT __iob,DATA + ;--- not PIC .IMPORT fprintf,CODE + .IMPORT abort,CODE + .IMPORT $$div2U,MILLICODE + .CALLINFO CALLER,FRAME=144,ENTRY_GR=%r9,SAVE_RP,ARGS_SAVED,ORDERING_AWARE + .ENTRY + STW %r2,-20(%r30) ;offset 0x8ec + STW,MA %r3,192(%r30) ;offset 0x8f0 + STW %r4,-188(%r30) ;offset 0x8f4 + DEPD %r5,31,32,%r6 ;offset 0x8f8 + STD %r6,-184(%r30) ;offset 0x8fc + DEPD %r7,31,32,%r8 ;offset 0x900 + STD %r8,-176(%r30) ;offset 0x904 + STW %r9,-168(%r30) ;offset 0x908 + LDD -248(%r30),%r3 ;offset 0x90c + COPY %r26,%r4 ;offset 0x910 + COPY %r24,%r5 ;offset 0x914 + DEPD %r25,31,32,%r4 ;offset 0x918 + CMPB,*<> %r3,%r0,$0006000C ;offset 0x91c + DEPD %r23,31,32,%r5 ;offset 0x920 + MOVIB,TR -1,%r29,$00060002 ;offset 0x924 + EXTRD,U %r29,31,32,%r28 ;offset 0x928 +$0006002A + LDO -1(%r29),%r29 ;offset 0x92c + SUB %r23,%r7,%r23 ;offset 0x930 +$00060024 + SUB %r4,%r31,%r25 ;offset 0x934 + AND %r25,%r19,%r26 ;offset 0x938 + CMPB,*<>,N %r0,%r26,$00060046 ;offset 0x93c + DEPD,Z %r25,31,32,%r20 ;offset 0x940 + OR %r20,%r24,%r21 ;offset 0x944 + CMPB,*<<,N %r21,%r23,$0006002A ;offset 0x948 + SUB %r31,%r2,%r31 ;offset 0x94c +$00060046 +$0006002E + DEPD,Z %r23,31,32,%r25 ;offset 0x950 + EXTRD,U %r23,31,32,%r26 ;offset 0x954 + AND %r25,%r19,%r24 ;offset 0x958 + ADD,L %r31,%r26,%r31 ;offset 0x95c + CMPCLR,*>>= %r5,%r24,%r0 ;offset 0x960 + LDO 1(%r31),%r31 ;offset 0x964 +$00060032 + CMPB,*<<=,N %r31,%r4,$00060036 ;offset 0x968 + LDO -1(%r29),%r29 ;offset 0x96c + ADD,L %r4,%r3,%r4 ;offset 0x970 +$00060036 + ADDIB,=,N -1,%r8,$D0 ;offset 0x974 + SUB %r5,%r24,%r28 ;offset 0x978 +$0006003A + SUB %r4,%r31,%r24 ;offset 0x97c + SHRPD %r24,%r28,32,%r4 ;offset 0x980 + DEPD,Z %r29,31,32,%r9 ;offset 0x984 + DEPD,Z %r28,31,32,%r5 ;offset 0x988 +$0006001C + EXTRD,U %r4,31,32,%r31 ;offset 0x98c + CMPB,*<>,N %r31,%r2,$00060020 ;offset 0x990 + MOVB,TR %r6,%r29,$D1 ;offset 0x994 + STD %r29,-152(%r30) ;offset 0x998 +$0006000C + EXTRD,U %r3,31,32,%r25 ;offset 0x99c + COPY %r3,%r26 ;offset 0x9a0 + EXTRD,U %r3,31,32,%r9 ;offset 0x9a4 + EXTRD,U %r4,31,32,%r8 ;offset 0x9a8 + .CALL ARGW0=GR,ARGW1=GR,RTNVAL=GR ;in=25,26;out=28; + B,L BN_num_bits_word,%r2 ;offset 0x9ac + EXTRD,U %r5,31,32,%r7 ;offset 0x9b0 + LDI 64,%r20 ;offset 0x9b4 + DEPD %r7,31,32,%r5 ;offset 0x9b8 + DEPD %r8,31,32,%r4 ;offset 0x9bc + DEPD %r9,31,32,%r3 ;offset 0x9c0 + CMPB,= %r28,%r20,$00060012 ;offset 0x9c4 + COPY %r28,%r24 ;offset 0x9c8 + MTSARCM %r24 ;offset 0x9cc + DEPDI,Z -1,%sar,1,%r19 ;offset 0x9d0 + CMPB,*>>,N %r4,%r19,$D2 ;offset 0x9d4 +$00060012 + SUBI 64,%r24,%r31 ;offset 0x9d8 + CMPCLR,*<< %r4,%r3,%r0 ;offset 0x9dc + SUB %r4,%r3,%r4 ;offset 0x9e0 +$00060016 + CMPB,= %r31,%r0,$0006001A ;offset 0x9e4 + COPY %r0,%r9 ;offset 0x9e8 + MTSARCM %r31 ;offset 0x9ec + DEPD,Z %r3,%sar,64,%r3 ;offset 0x9f0 + SUBI 64,%r31,%r26 ;offset 0x9f4 + MTSAR %r26 ;offset 0x9f8 + SHRPD %r4,%r5,%sar,%r4 ;offset 0x9fc + MTSARCM %r31 ;offset 0xa00 + DEPD,Z %r5,%sar,64,%r5 ;offset 0xa04 +$0006001A + DEPDI,Z -1,31,32,%r19 ;offset 0xa08 + AND %r3,%r19,%r29 ;offset 0xa0c + EXTRD,U %r29,31,32,%r2 ;offset 0xa10 + DEPDI,Z -1,63,32,%r6 ;offset 0xa14 + MOVIB,TR 2,%r8,$0006001C ;offset 0xa18 + EXTRD,U %r3,63,32,%r7 ;offset 0xa1c +$D2 + ;--- not PIC ADDIL LR'__iob-$global$,%r27,%r1 ;offset 0xa20 + ;--- not PIC LDIL LR'C$7,%r21 ;offset 0xa24 + ;--- not PIC LDO RR'__iob-$global$+32(%r1),%r26 ;offset 0xa28 + ;--- not PIC .CALL ARGW0=GR,ARGW1=GR,ARGW2=GR,RTNVAL=GR ;in=24,25,26;out=28; + ;--- not PIC B,L fprintf,%r2 ;offset 0xa2c + ;--- not PIC LDO RR'C$7(%r21),%r25 ;offset 0xa30 + .CALL ; + B,L abort,%r2 ;offset 0xa34 + NOP ;offset 0xa38 + B $D3 ;offset 0xa3c + LDW -212(%r30),%r2 ;offset 0xa40 +$00060020 + COPY %r4,%r26 ;offset 0xa44 + EXTRD,U %r4,31,32,%r25 ;offset 0xa48 + COPY %r2,%r24 ;offset 0xa4c + .CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL) + B,L $$div2U,%r31 ;offset 0xa50 + EXTRD,U %r2,31,32,%r23 ;offset 0xa54 + DEPD %r28,31,32,%r29 ;offset 0xa58 +$00060022 + STD %r29,-152(%r30) ;offset 0xa5c +$D1 + AND %r5,%r19,%r24 ;offset 0xa60 + EXTRD,U %r24,31,32,%r24 ;offset 0xa64 + STW %r2,-160(%r30) ;offset 0xa68 + STW %r7,-128(%r30) ;offset 0xa6c + FLDD -152(%r30),%fr4 ;offset 0xa70 + FLDD -152(%r30),%fr7 ;offset 0xa74 + FLDW -160(%r30),%fr8L ;offset 0xa78 + FLDW -128(%r30),%fr5L ;offset 0xa7c + XMPYU %fr8L,%fr7L,%fr10 ;offset 0xa80 + FSTD %fr10,-136(%r30) ;offset 0xa84 + XMPYU %fr8L,%fr7R,%fr22 ;offset 0xa88 + FSTD %fr22,-144(%r30) ;offset 0xa8c + XMPYU %fr5L,%fr4L,%fr11 ;offset 0xa90 + XMPYU %fr5L,%fr4R,%fr23 ;offset 0xa94 + FSTD %fr11,-112(%r30) ;offset 0xa98 + FSTD %fr23,-120(%r30) ;offset 0xa9c + LDD -136(%r30),%r28 ;offset 0xaa0 + DEPD,Z %r28,31,32,%r31 ;offset 0xaa4 + LDD -144(%r30),%r20 ;offset 0xaa8 + ADD,L %r20,%r31,%r31 ;offset 0xaac + LDD -112(%r30),%r22 ;offset 0xab0 + DEPD,Z %r22,31,32,%r22 ;offset 0xab4 + LDD -120(%r30),%r21 ;offset 0xab8 + B $00060024 ;offset 0xabc + ADD,L %r21,%r22,%r23 ;offset 0xac0 +$D0 + OR %r9,%r29,%r29 ;offset 0xac4 +$00060040 + EXTRD,U %r29,31,32,%r28 ;offset 0xac8 +$00060002 +$L2 + LDW -212(%r30),%r2 ;offset 0xacc +$D3 + LDW -168(%r30),%r9 ;offset 0xad0 + LDD -176(%r30),%r8 ;offset 0xad4 + EXTRD,U %r8,31,32,%r7 ;offset 0xad8 + LDD -184(%r30),%r6 ;offset 0xadc + EXTRD,U %r6,31,32,%r5 ;offset 0xae0 + LDW -188(%r30),%r4 ;offset 0xae4 + BVE (%r2) ;offset 0xae8 + .EXIT + LDW,MB -192(%r30),%r3 ;offset 0xaec + .PROCEND ;in=23,25;out=28,29;fpin=105,107; + + + + +;---------------------------------------------------------------------------- +; +; Registers to hold 64-bit values to manipulate. The "L" part +; of the register corresponds to the upper 32-bits, while the "R" +; part corresponds to the lower 32-bits +; +; Note, that when using b6 and b7, the code must save these before +; using them because they are callee save registers +; +; +; Floating point registers to use to save values that +; are manipulated. These don't collide with ftemp1-6 and +; are all caller save registers +; +a0 .reg %fr22 +a0L .reg %fr22L +a0R .reg %fr22R + +a1 .reg %fr23 +a1L .reg %fr23L +a1R .reg %fr23R + +a2 .reg %fr24 +a2L .reg %fr24L +a2R .reg %fr24R + +a3 .reg %fr25 +a3L .reg %fr25L +a3R .reg %fr25R + +a4 .reg %fr26 +a4L .reg %fr26L +a4R .reg %fr26R + +a5 .reg %fr27 +a5L .reg %fr27L +a5R .reg %fr27R + +a6 .reg %fr28 +a6L .reg %fr28L +a6R .reg %fr28R + +a7 .reg %fr29 +a7L .reg %fr29L +a7R .reg %fr29R + +b0 .reg %fr30 +b0L .reg %fr30L +b0R .reg %fr30R + +b1 .reg %fr31 +b1L .reg %fr31L +b1R .reg %fr31R + +; +; Temporary floating point variables, these are all caller save +; registers +; +ftemp1 .reg %fr4 +ftemp2 .reg %fr5 +ftemp3 .reg %fr6 +ftemp4 .reg %fr7 + +; +; The B set of registers when used. +; + +b2 .reg %fr8 +b2L .reg %fr8L +b2R .reg %fr8R + +b3 .reg %fr9 +b3L .reg %fr9L +b3R .reg %fr9R + +b4 .reg %fr10 +b4L .reg %fr10L +b4R .reg %fr10R + +b5 .reg %fr11 +b5L .reg %fr11L +b5R .reg %fr11R + +b6 .reg %fr12 +b6L .reg %fr12L +b6R .reg %fr12R + +b7 .reg %fr13 +b7L .reg %fr13L +b7R .reg %fr13R + +c1 .reg %r21 ; only reg +temp1 .reg %r20 ; only reg +temp2 .reg %r19 ; only reg +temp3 .reg %r31 ; only reg + +m1 .reg %r28 +c2 .reg %r23 +high_one .reg %r1 +ht .reg %r6 +lt .reg %r5 +m .reg %r4 +c3 .reg %r3 + +SQR_ADD_C .macro A0L,A0R,C1,C2,C3 + XMPYU A0L,A0R,ftemp1 ; m + FSTD ftemp1,-24(%sp) ; store m + + XMPYU A0R,A0R,ftemp2 ; lt + FSTD ftemp2,-16(%sp) ; store lt + + XMPYU A0L,A0L,ftemp3 ; ht + FSTD ftemp3,-8(%sp) ; store ht + + LDD -24(%sp),m ; load m + AND m,high_mask,temp2 ; m & Mask + DEPD,Z m,30,31,temp3 ; m << 32+1 + LDD -16(%sp),lt ; lt + + LDD -8(%sp),ht ; ht + EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1 + ADD temp3,lt,lt ; lt = lt+m + ADD,L ht,temp1,ht ; ht += temp1 + ADD,DC ht,%r0,ht ; ht++ + + ADD C1,lt,C1 ; c1=c1+lt + ADD,DC ht,%r0,ht ; ht++ + + ADD C2,ht,C2 ; c2=c2+ht + ADD,DC C3,%r0,C3 ; c3++ +.endm + +SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3 + XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht + FSTD ftemp1,-16(%sp) ; + XMPYU A0R,A1L,ftemp2 ; m = bh*lt + FSTD ftemp2,-8(%sp) ; + XMPYU A0R,A1R,ftemp3 ; lt = bl*lt + FSTD ftemp3,-32(%sp) + XMPYU A0L,A1L,ftemp4 ; ht = bh*ht + FSTD ftemp4,-24(%sp) ; + + LDD -8(%sp),m ; r21 = m + LDD -16(%sp),m1 ; r19 = m1 + ADD,L m,m1,m ; m+m1 + + DEPD,Z m,31,32,temp3 ; (m+m1<<32) + LDD -24(%sp),ht ; r24 = ht + + CMPCLR,*>>= m,m1,%r0 ; if (m < m1) + ADD,L ht,high_one,ht ; ht+=high_one + + EXTRD,U m,31,32,temp1 ; m >> 32 + LDD -32(%sp),lt ; lt + ADD,L ht,temp1,ht ; ht+= m>>32 + ADD lt,temp3,lt ; lt = lt+m1 + ADD,DC ht,%r0,ht ; ht++ + + ADD ht,ht,ht ; ht=ht+ht; + ADD,DC C3,%r0,C3 ; add in carry (c3++) + + ADD lt,lt,lt ; lt=lt+lt; + ADD,DC ht,%r0,ht ; add in carry (ht++) + + ADD C1,lt,C1 ; c1=c1+lt + ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++) + LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise + + ADD C2,ht,C2 ; c2 = c2 + ht + ADD,DC C3,%r0,C3 ; add in carry (c3++) +.endm + +; +;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a) +; arg0 = r_ptr +; arg1 = a_ptr +; + +bn_sqr_comba8 + .PROC + .CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE + .EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .ENTRY + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + STD %r5,16(%sp) ; save r5 + STD %r6,24(%sp) ; save r6 + + ; + ; Zero out carries + ; + COPY %r0,c1 + COPY %r0,c2 + COPY %r0,c3 + + LDO 128(%sp),%sp ; bump stack + DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L + DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32 + + ; + ; Load up all of the values we are going to use + ; + FLDD 0(a_ptr),a0 + FLDD 8(a_ptr),a1 + FLDD 16(a_ptr),a2 + FLDD 24(a_ptr),a3 + FLDD 32(a_ptr),a4 + FLDD 40(a_ptr),a5 + FLDD 48(a_ptr),a6 + FLDD 56(a_ptr),a7 + + SQR_ADD_C a0L,a0R,c1,c2,c3 + STD c1,0(r_ptr) ; r[0] = c1; + COPY %r0,c1 + + SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1 + STD c2,8(r_ptr) ; r[1] = c2; + COPY %r0,c2 + + SQR_ADD_C a1L,a1R,c3,c1,c2 + SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2 + STD c3,16(r_ptr) ; r[2] = c3; + COPY %r0,c3 + + SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3 + SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3 + STD c1,24(r_ptr) ; r[3] = c1; + COPY %r0,c1 + + SQR_ADD_C a2L,a2R,c2,c3,c1 + SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1 + SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1 + STD c2,32(r_ptr) ; r[4] = c2; + COPY %r0,c2 + + SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2 + SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2 + SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2 + STD c3,40(r_ptr) ; r[5] = c3; + COPY %r0,c3 + + SQR_ADD_C a3L,a3R,c1,c2,c3 + SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3 + SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3 + SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3 + STD c1,48(r_ptr) ; r[6] = c1; + COPY %r0,c1 + + SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1 + SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1 + SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1 + SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1 + STD c2,56(r_ptr) ; r[7] = c2; + COPY %r0,c2 + + SQR_ADD_C a4L,a4R,c3,c1,c2 + SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2 + SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2 + SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2 + STD c3,64(r_ptr) ; r[8] = c3; + COPY %r0,c3 + + SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3 + SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3 + SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3 + STD c1,72(r_ptr) ; r[9] = c1; + COPY %r0,c1 + + SQR_ADD_C a5L,a5R,c2,c3,c1 + SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1 + SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1 + STD c2,80(r_ptr) ; r[10] = c2; + COPY %r0,c2 + + SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2 + SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2 + STD c3,88(r_ptr) ; r[11] = c3; + COPY %r0,c3 + + SQR_ADD_C a6L,a6R,c1,c2,c3 + SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3 + STD c1,96(r_ptr) ; r[12] = c1; + COPY %r0,c1 + + SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1 + STD c2,104(r_ptr) ; r[13] = c2; + COPY %r0,c2 + + SQR_ADD_C a7L,a7R,c3,c1,c2 + STD c3, 112(r_ptr) ; r[14] = c3 + STD c1, 120(r_ptr) ; r[15] = c1 + + .EXIT + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 + + .PROCEND + +;----------------------------------------------------------------------------- +; +;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a) +; arg0 = r_ptr +; arg1 = a_ptr +; + +bn_sqr_comba4 + .proc + .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE + .EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .entry + .align 64 + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + STD %r5,16(%sp) ; save r5 + STD %r6,24(%sp) ; save r6 + + ; + ; Zero out carries + ; + COPY %r0,c1 + COPY %r0,c2 + COPY %r0,c3 + + LDO 128(%sp),%sp ; bump stack + DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L + DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32 + + ; + ; Load up all of the values we are going to use + ; + FLDD 0(a_ptr),a0 + FLDD 8(a_ptr),a1 + FLDD 16(a_ptr),a2 + FLDD 24(a_ptr),a3 + FLDD 32(a_ptr),a4 + FLDD 40(a_ptr),a5 + FLDD 48(a_ptr),a6 + FLDD 56(a_ptr),a7 + + SQR_ADD_C a0L,a0R,c1,c2,c3 + + STD c1,0(r_ptr) ; r[0] = c1; + COPY %r0,c1 + + SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1 + + STD c2,8(r_ptr) ; r[1] = c2; + COPY %r0,c2 + + SQR_ADD_C a1L,a1R,c3,c1,c2 + SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2 + + STD c3,16(r_ptr) ; r[2] = c3; + COPY %r0,c3 + + SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3 + SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3 + + STD c1,24(r_ptr) ; r[3] = c1; + COPY %r0,c1 + + SQR_ADD_C a2L,a2R,c2,c3,c1 + SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1 + + STD c2,32(r_ptr) ; r[4] = c2; + COPY %r0,c2 + + SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2 + STD c3,40(r_ptr) ; r[5] = c3; + COPY %r0,c3 + + SQR_ADD_C a3L,a3R,c1,c2,c3 + STD c1,48(r_ptr) ; r[6] = c1; + STD c2,56(r_ptr) ; r[7] = c2; + + .EXIT + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 + + .PROCEND + + +;--------------------------------------------------------------------------- + +MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3 + XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht + FSTD ftemp1,-16(%sp) ; + XMPYU A0R,B0L,ftemp2 ; m = bh*lt + FSTD ftemp2,-8(%sp) ; + XMPYU A0R,B0R,ftemp3 ; lt = bl*lt + FSTD ftemp3,-32(%sp) + XMPYU A0L,B0L,ftemp4 ; ht = bh*ht + FSTD ftemp4,-24(%sp) ; + + LDD -8(%sp),m ; r21 = m + LDD -16(%sp),m1 ; r19 = m1 + ADD,L m,m1,m ; m+m1 + + DEPD,Z m,31,32,temp3 ; (m+m1<<32) + LDD -24(%sp),ht ; r24 = ht + + CMPCLR,*>>= m,m1,%r0 ; if (m < m1) + ADD,L ht,high_one,ht ; ht+=high_one + + EXTRD,U m,31,32,temp1 ; m >> 32 + LDD -32(%sp),lt ; lt + ADD,L ht,temp1,ht ; ht+= m>>32 + ADD lt,temp3,lt ; lt = lt+m1 + ADD,DC ht,%r0,ht ; ht++ + + ADD C1,lt,C1 ; c1=c1+lt + ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise + + ADD C2,ht,C2 ; c2 = c2 + ht + ADD,DC C3,%r0,C3 ; add in carry (c3++) +.endm + + +; +;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +; arg0 = r_ptr +; arg1 = a_ptr +; arg2 = b_ptr +; + +bn_mul_comba8 + .proc + .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE + .EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .entry + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + STD %r5,16(%sp) ; save r5 + STD %r6,24(%sp) ; save r6 + FSTD %fr12,32(%sp) ; save r6 + FSTD %fr13,40(%sp) ; save r7 + + ; + ; Zero out carries + ; + COPY %r0,c1 + COPY %r0,c2 + COPY %r0,c3 + + LDO 128(%sp),%sp ; bump stack + DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32 + + ; + ; Load up all of the values we are going to use + ; + FLDD 0(a_ptr),a0 + FLDD 8(a_ptr),a1 + FLDD 16(a_ptr),a2 + FLDD 24(a_ptr),a3 + FLDD 32(a_ptr),a4 + FLDD 40(a_ptr),a5 + FLDD 48(a_ptr),a6 + FLDD 56(a_ptr),a7 + + FLDD 0(b_ptr),b0 + FLDD 8(b_ptr),b1 + FLDD 16(b_ptr),b2 + FLDD 24(b_ptr),b3 + FLDD 32(b_ptr),b4 + FLDD 40(b_ptr),b5 + FLDD 48(b_ptr),b6 + FLDD 56(b_ptr),b7 + + MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3 + STD c1,0(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1 + MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1 + STD c2,8(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2 + MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2 + MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2 + STD c3,16(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3 + MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3 + MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3 + MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3 + STD c1,24(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1 + MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1 + MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1 + MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1 + MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1 + STD c2,32(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2 + MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2 + MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2 + MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2 + MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2 + MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2 + STD c3,40(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3 + MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3 + MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3 + MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3 + MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3 + MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3 + MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3 + STD c1,48(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1 + MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1 + MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1 + MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1 + MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1 + MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1 + MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1 + MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1 + STD c2,56(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2 + MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2 + MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2 + MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2 + MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2 + MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2 + MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2 + STD c3,64(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3 + MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3 + MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3 + MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3 + MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3 + MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3 + STD c1,72(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1 + MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1 + MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1 + MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1 + MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1 + STD c2,80(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2 + MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2 + MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2 + MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2 + STD c3,88(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3 + MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3 + MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3 + STD c1,96(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1 + MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1 + STD c2,104(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2 + STD c3,112(r_ptr) + STD c1,120(r_ptr) + + .EXIT + FLDD -88(%sp),%fr13 + FLDD -96(%sp),%fr12 + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 + + .PROCEND + +;----------------------------------------------------------------------------- +; +;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +; arg0 = r_ptr +; arg1 = a_ptr +; arg2 = b_ptr +; + +bn_mul_comba4 + .proc + .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE + .EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .entry + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + STD %r5,16(%sp) ; save r5 + STD %r6,24(%sp) ; save r6 + FSTD %fr12,32(%sp) ; save r6 + FSTD %fr13,40(%sp) ; save r7 + + ; + ; Zero out carries + ; + COPY %r0,c1 + COPY %r0,c2 + COPY %r0,c3 + + LDO 128(%sp),%sp ; bump stack + DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32 + + ; + ; Load up all of the values we are going to use + ; + FLDD 0(a_ptr),a0 + FLDD 8(a_ptr),a1 + FLDD 16(a_ptr),a2 + FLDD 24(a_ptr),a3 + + FLDD 0(b_ptr),b0 + FLDD 8(b_ptr),b1 + FLDD 16(b_ptr),b2 + FLDD 24(b_ptr),b3 + + MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3 + STD c1,0(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1 + MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1 + STD c2,8(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2 + MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2 + MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2 + STD c3,16(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3 + MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3 + MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3 + MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3 + STD c1,24(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1 + MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1 + MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1 + STD c2,32(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2 + MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2 + STD c3,40(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3 + STD c1,48(r_ptr) + STD c2,56(r_ptr) + + .EXIT + FLDD -88(%sp),%fr13 + FLDD -96(%sp),%fr12 + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 + + .PROCEND + + +;--- not PIC .SPACE $TEXT$ +;--- not PIC .SUBSPA $CODE$ +;--- not PIC .SPACE $PRIVATE$,SORT=16 +;--- not PIC .IMPORT $global$,DATA +;--- not PIC .SPACE $TEXT$ +;--- not PIC .SUBSPA $CODE$ +;--- not PIC .SUBSPA $LIT$,ACCESS=0x2c +;--- not PIC C$7 +;--- not PIC .ALIGN 8 +;--- not PIC .STRINGZ "Division would overflow (%d)\n" + .END diff --git a/openssl/crypto/bn/asm/pa-risc2W.s b/openssl/crypto/bn/asm/pa-risc2W.s new file mode 100644 index 00000000..a9954575 --- /dev/null +++ b/openssl/crypto/bn/asm/pa-risc2W.s @@ -0,0 +1,1605 @@ +; +; PA-RISC 64-bit implementation of bn_asm code +; +; This code is approximately 2x faster than the C version +; for RSA/DSA. +; +; See http://devresource.hp.com/ for more details on the PA-RISC +; architecture. Also see the book "PA-RISC 2.0 Architecture" +; by Gerry Kane for information on the instruction set architecture. +; +; Code written by Chris Ruemmler (with some help from the HP C +; compiler). +; +; The code compiles with HP's assembler +; + + .level 2.0W + .space $TEXT$ + .subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY + +; +; Global Register definitions used for the routines. +; +; Some information about HP's runtime architecture for 64-bits. +; +; "Caller save" means the calling function must save the register +; if it wants the register to be preserved. +; "Callee save" means if a function uses the register, it must save +; the value before using it. +; +; For the floating point registers +; +; "caller save" registers: fr4-fr11, fr22-fr31 +; "callee save" registers: fr12-fr21 +; "special" registers: fr0-fr3 (status and exception registers) +; +; For the integer registers +; value zero : r0 +; "caller save" registers: r1,r19-r26 +; "callee save" registers: r3-r18 +; return register : r2 (rp) +; return values ; r28 (ret0,ret1) +; Stack pointer ; r30 (sp) +; global data pointer ; r27 (dp) +; argument pointer ; r29 (ap) +; millicode return ptr ; r31 (also a caller save register) + + +; +; Arguments to the routines +; +r_ptr .reg %r26 +a_ptr .reg %r25 +b_ptr .reg %r24 +num .reg %r24 +w .reg %r23 +n .reg %r23 + + +; +; Globals used in some routines +; + +top_overflow .reg %r29 +high_mask .reg %r22 ; value 0xffffffff80000000L + + +;------------------------------------------------------------------------------ +; +; bn_mul_add_words +; +;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr, +; int num, BN_ULONG w) +; +; arg0 = r_ptr +; arg1 = a_ptr +; arg2 = num +; arg3 = w +; +; Local register definitions +; + +fm1 .reg %fr22 +fm .reg %fr23 +ht_temp .reg %fr24 +ht_temp_1 .reg %fr25 +lt_temp .reg %fr26 +lt_temp_1 .reg %fr27 +fm1_1 .reg %fr28 +fm_1 .reg %fr29 + +fw_h .reg %fr7L +fw_l .reg %fr7R +fw .reg %fr7 + +fht_0 .reg %fr8L +flt_0 .reg %fr8R +t_float_0 .reg %fr8 + +fht_1 .reg %fr9L +flt_1 .reg %fr9R +t_float_1 .reg %fr9 + +tmp_0 .reg %r31 +tmp_1 .reg %r21 +m_0 .reg %r20 +m_1 .reg %r19 +ht_0 .reg %r1 +ht_1 .reg %r3 +lt_0 .reg %r4 +lt_1 .reg %r5 +m1_0 .reg %r6 +m1_1 .reg %r7 +rp_val .reg %r8 +rp_val_1 .reg %r9 + +bn_mul_add_words + .export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN + .proc + .callinfo frame=128 + .entry + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + NOP ; Needed to make the loop 16-byte aligned + NOP ; Needed to make the loop 16-byte aligned + + STD %r5,16(%sp) ; save r5 + STD %r6,24(%sp) ; save r6 + STD %r7,32(%sp) ; save r7 + STD %r8,40(%sp) ; save r8 + + STD %r9,48(%sp) ; save r9 + COPY %r0,%ret0 ; return 0 by default + DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32 + STD w,56(%sp) ; store w on stack + + CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit + LDO 128(%sp),%sp ; bump stack + + ; + ; The loop is unrolled twice, so if there is only 1 number + ; then go straight to the cleanup code. + ; + CMPIB,= 1,num,bn_mul_add_words_single_top + FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l) + + ; + ; This loop is unrolled 2 times (64-byte aligned as well) + ; + ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus + ; two 32-bit mutiplies can be issued per cycle. + ; +bn_mul_add_words_unroll2 + + FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R) + FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R) + LDD 0(r_ptr),rp_val ; rp[0] + LDD 8(r_ptr),rp_val_1 ; rp[1] + + XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l + XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l + FSTD fm1,-16(%sp) ; -16(sp) = m1[0] + FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1] + + XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h + XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h + FSTD fm,-8(%sp) ; -8(sp) = m[0] + FSTD fm_1,-40(%sp) ; -40(sp) = m[1] + + XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h + XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h + FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp + FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1 + + XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l + XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l + FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp + FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1 + + LDD -8(%sp),m_0 ; m[0] + LDD -40(%sp),m_1 ; m[1] + LDD -16(%sp),m1_0 ; m1[0] + LDD -48(%sp),m1_1 ; m1[1] + + LDD -24(%sp),ht_0 ; ht[0] + LDD -56(%sp),ht_1 ; ht[1] + ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0]; + ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1]; + + LDD -32(%sp),lt_0 + LDD -64(%sp),lt_1 + CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0]) + ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32) + + CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1]) + ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32) + EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32 + DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32 + + EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32 + DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32 + ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32) + ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32) + + ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0]; + ADD,DC ht_0,%r0,ht_0 ; ht[0]++ + ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1]; + ADD,DC ht_1,%r0,ht_1 ; ht[1]++ + + ADD %ret0,lt_0,lt_0 ; lt[0] = lt[0] + c; + ADD,DC ht_0,%r0,ht_0 ; ht[0]++ + ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0] + ADD,DC ht_0,%r0,ht_0 ; ht[0]++ + + LDO -2(num),num ; num = num - 2; + ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c); + ADD,DC ht_1,%r0,ht_1 ; ht[1]++ + STD lt_0,0(r_ptr) ; rp[0] = lt[0] + + ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1] + ADD,DC ht_1,%r0,%ret0 ; ht[1]++ + LDO 16(a_ptr),a_ptr ; a_ptr += 2 + + STD lt_1,8(r_ptr) ; rp[1] = lt[1] + CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do + LDO 16(r_ptr),r_ptr ; r_ptr += 2 + + CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one + + ; + ; Top of loop aligned on 64-byte boundary + ; +bn_mul_add_words_single_top + FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R) + LDD 0(r_ptr),rp_val ; rp[0] + LDO 8(a_ptr),a_ptr ; a_ptr++ + XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l + FSTD fm1,-16(%sp) ; -16(sp) = m1 + XMPYU flt_0,fw_h,fm ; m = lt*fw_h + FSTD fm,-8(%sp) ; -8(sp) = m + XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h + FSTD ht_temp,-24(%sp) ; -24(sp) = ht + XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l + FSTD lt_temp,-32(%sp) ; -32(sp) = lt + + LDD -8(%sp),m_0 + LDD -16(%sp),m1_0 ; m1 = temp1 + ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1; + LDD -24(%sp),ht_0 + LDD -32(%sp),lt_0 + + CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1) + ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32) + + EXTRD,U tmp_0,31,32,m_0 ; m>>32 + DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32 + + ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32) + ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1; + ADD,DC ht_0,%r0,ht_0 ; ht++ + ADD %ret0,tmp_0,lt_0 ; lt = lt + c; + ADD,DC ht_0,%r0,ht_0 ; ht++ + ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0] + ADD,DC ht_0,%r0,%ret0 ; ht++ + STD lt_0,0(r_ptr) ; rp[0] = lt + +bn_mul_add_words_exit + .EXIT + LDD -80(%sp),%r9 ; restore r9 + LDD -88(%sp),%r8 ; restore r8 + LDD -96(%sp),%r7 ; restore r7 + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 ; restore r3 + .PROCEND ;in=23,24,25,26,29;out=28; + +;---------------------------------------------------------------------------- +; +;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) +; +; arg0 = rp +; arg1 = ap +; arg2 = num +; arg3 = w + +bn_mul_words + .proc + .callinfo frame=128 + .entry + .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + STD %r5,16(%sp) ; save r5 + STD %r6,24(%sp) ; save r6 + + STD %r7,32(%sp) ; save r7 + COPY %r0,%ret0 ; return 0 by default + DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32 + STD w,56(%sp) ; w on stack + + CMPIB,>= 0,num,bn_mul_words_exit + LDO 128(%sp),%sp ; bump stack + + ; + ; See if only 1 word to do, thus just do cleanup + ; + CMPIB,= 1,num,bn_mul_words_single_top + FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l) + + ; + ; This loop is unrolled 2 times (64-byte aligned as well) + ; + ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus + ; two 32-bit mutiplies can be issued per cycle. + ; +bn_mul_words_unroll2 + + FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R) + FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R) + XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l + XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l + + FSTD fm1,-16(%sp) ; -16(sp) = m1 + FSTD fm1_1,-48(%sp) ; -48(sp) = m1 + XMPYU flt_0,fw_h,fm ; m = lt*fw_h + XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h + + FSTD fm,-8(%sp) ; -8(sp) = m + FSTD fm_1,-40(%sp) ; -40(sp) = m + XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h + XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h + + FSTD ht_temp,-24(%sp) ; -24(sp) = ht + FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht + XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l + XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l + + FSTD lt_temp,-32(%sp) ; -32(sp) = lt + FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt + LDD -8(%sp),m_0 + LDD -40(%sp),m_1 + + LDD -16(%sp),m1_0 + LDD -48(%sp),m1_1 + LDD -24(%sp),ht_0 + LDD -56(%sp),ht_1 + + ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1; + ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1; + LDD -32(%sp),lt_0 + LDD -64(%sp),lt_1 + + CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1) + ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32) + CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1) + ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32) + + EXTRD,U tmp_0,31,32,m_0 ; m>>32 + DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32 + EXTRD,U tmp_1,31,32,m_1 ; m>>32 + DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32 + + ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32) + ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32) + ADD lt_0,m1_0,lt_0 ; lt = lt+m1; + ADD,DC ht_0,%r0,ht_0 ; ht++ + + ADD lt_1,m1_1,lt_1 ; lt = lt+m1; + ADD,DC ht_1,%r0,ht_1 ; ht++ + ADD %ret0,lt_0,lt_0 ; lt = lt + c (ret0); + ADD,DC ht_0,%r0,ht_0 ; ht++ + + ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0) + ADD,DC ht_1,%r0,ht_1 ; ht++ + STD lt_0,0(r_ptr) ; rp[0] = lt + STD lt_1,8(r_ptr) ; rp[1] = lt + + COPY ht_1,%ret0 ; carry = ht + LDO -2(num),num ; num = num - 2; + LDO 16(a_ptr),a_ptr ; ap += 2 + CMPIB,<= 2,num,bn_mul_words_unroll2 + LDO 16(r_ptr),r_ptr ; rp++ + + CMPIB,=,N 0,num,bn_mul_words_exit ; are we done? + + ; + ; Top of loop aligned on 64-byte boundary + ; +bn_mul_words_single_top + FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R) + + XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l + FSTD fm1,-16(%sp) ; -16(sp) = m1 + XMPYU flt_0,fw_h,fm ; m = lt*fw_h + FSTD fm,-8(%sp) ; -8(sp) = m + XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h + FSTD ht_temp,-24(%sp) ; -24(sp) = ht + XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l + FSTD lt_temp,-32(%sp) ; -32(sp) = lt + + LDD -8(%sp),m_0 + LDD -16(%sp),m1_0 + ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1; + LDD -24(%sp),ht_0 + LDD -32(%sp),lt_0 + + CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1) + ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32) + + EXTRD,U tmp_0,31,32,m_0 ; m>>32 + DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32 + + ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32) + ADD lt_0,m1_0,lt_0 ; lt= lt+m1; + ADD,DC ht_0,%r0,ht_0 ; ht++ + + ADD %ret0,lt_0,lt_0 ; lt = lt + c; + ADD,DC ht_0,%r0,ht_0 ; ht++ + + COPY ht_0,%ret0 ; copy carry + STD lt_0,0(r_ptr) ; rp[0] = lt + +bn_mul_words_exit + .EXIT + LDD -96(%sp),%r7 ; restore r7 + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 ; restore r3 + .PROCEND ;in=23,24,25,26,29;out=28; + +;---------------------------------------------------------------------------- +; +;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num) +; +; arg0 = rp +; arg1 = ap +; arg2 = num +; + +bn_sqr_words + .proc + .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE + .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .entry + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + NOP + STD %r5,16(%sp) ; save r5 + + CMPIB,>= 0,num,bn_sqr_words_exit + LDO 128(%sp),%sp ; bump stack + + ; + ; If only 1, the goto straight to cleanup + ; + CMPIB,= 1,num,bn_sqr_words_single_top + DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L + + ; + ; This loop is unrolled 2 times (64-byte aligned as well) + ; + +bn_sqr_words_unroll2 + FLDD 0(a_ptr),t_float_0 ; a[0] + FLDD 8(a_ptr),t_float_1 ; a[1] + XMPYU fht_0,flt_0,fm ; m[0] + XMPYU fht_1,flt_1,fm_1 ; m[1] + + FSTD fm,-24(%sp) ; store m[0] + FSTD fm_1,-56(%sp) ; store m[1] + XMPYU flt_0,flt_0,lt_temp ; lt[0] + XMPYU flt_1,flt_1,lt_temp_1 ; lt[1] + + FSTD lt_temp,-16(%sp) ; store lt[0] + FSTD lt_temp_1,-48(%sp) ; store lt[1] + XMPYU fht_0,fht_0,ht_temp ; ht[0] + XMPYU fht_1,fht_1,ht_temp_1 ; ht[1] + + FSTD ht_temp,-8(%sp) ; store ht[0] + FSTD ht_temp_1,-40(%sp) ; store ht[1] + LDD -24(%sp),m_0 + LDD -56(%sp),m_1 + + AND m_0,high_mask,tmp_0 ; m[0] & Mask + AND m_1,high_mask,tmp_1 ; m[1] & Mask + DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1 + DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1 + + LDD -16(%sp),lt_0 + LDD -48(%sp),lt_1 + EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1 + EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1 + + LDD -8(%sp),ht_0 + LDD -40(%sp),ht_1 + ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0 + ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1 + + ADD lt_0,m_0,lt_0 ; lt = lt+m + ADD,DC ht_0,%r0,ht_0 ; ht[0]++ + STD lt_0,0(r_ptr) ; rp[0] = lt[0] + STD ht_0,8(r_ptr) ; rp[1] = ht[1] + + ADD lt_1,m_1,lt_1 ; lt = lt+m + ADD,DC ht_1,%r0,ht_1 ; ht[1]++ + STD lt_1,16(r_ptr) ; rp[2] = lt[1] + STD ht_1,24(r_ptr) ; rp[3] = ht[1] + + LDO -2(num),num ; num = num - 2; + LDO 16(a_ptr),a_ptr ; ap += 2 + CMPIB,<= 2,num,bn_sqr_words_unroll2 + LDO 32(r_ptr),r_ptr ; rp += 4 + + CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done? + + ; + ; Top of loop aligned on 64-byte boundary + ; +bn_sqr_words_single_top + FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R) + + XMPYU fht_0,flt_0,fm ; m + FSTD fm,-24(%sp) ; store m + + XMPYU flt_0,flt_0,lt_temp ; lt + FSTD lt_temp,-16(%sp) ; store lt + + XMPYU fht_0,fht_0,ht_temp ; ht + FSTD ht_temp,-8(%sp) ; store ht + + LDD -24(%sp),m_0 ; load m + AND m_0,high_mask,tmp_0 ; m & Mask + DEPD,Z m_0,30,31,m_0 ; m << 32+1 + LDD -16(%sp),lt_0 ; lt + + LDD -8(%sp),ht_0 ; ht + EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1 + ADD m_0,lt_0,lt_0 ; lt = lt+m + ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0 + ADD,DC ht_0,%r0,ht_0 ; ht++ + + STD lt_0,0(r_ptr) ; rp[0] = lt + STD ht_0,8(r_ptr) ; rp[1] = ht + +bn_sqr_words_exit + .EXIT + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 + .PROCEND ;in=23,24,25,26,29;out=28; + + +;---------------------------------------------------------------------------- +; +;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n) +; +; arg0 = rp +; arg1 = ap +; arg2 = bp +; arg3 = n + +t .reg %r22 +b .reg %r21 +l .reg %r20 + +bn_add_words + .proc + .entry + .callinfo + .EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .align 64 + + CMPIB,>= 0,n,bn_add_words_exit + COPY %r0,%ret0 ; return 0 by default + + ; + ; If 2 or more numbers do the loop + ; + CMPIB,= 1,n,bn_add_words_single_top + NOP + + ; + ; This loop is unrolled 2 times (64-byte aligned as well) + ; +bn_add_words_unroll2 + LDD 0(a_ptr),t + LDD 0(b_ptr),b + ADD t,%ret0,t ; t = t+c; + ADD,DC %r0,%r0,%ret0 ; set c to carry + ADD t,b,l ; l = t + b[0] + ADD,DC %ret0,%r0,%ret0 ; c+= carry + STD l,0(r_ptr) + + LDD 8(a_ptr),t + LDD 8(b_ptr),b + ADD t,%ret0,t ; t = t+c; + ADD,DC %r0,%r0,%ret0 ; set c to carry + ADD t,b,l ; l = t + b[0] + ADD,DC %ret0,%r0,%ret0 ; c+= carry + STD l,8(r_ptr) + + LDO -2(n),n + LDO 16(a_ptr),a_ptr + LDO 16(b_ptr),b_ptr + + CMPIB,<= 2,n,bn_add_words_unroll2 + LDO 16(r_ptr),r_ptr + + CMPIB,=,N 0,n,bn_add_words_exit ; are we done? + +bn_add_words_single_top + LDD 0(a_ptr),t + LDD 0(b_ptr),b + + ADD t,%ret0,t ; t = t+c; + ADD,DC %r0,%r0,%ret0 ; set c to carry (could use CMPCLR??) + ADD t,b,l ; l = t + b[0] + ADD,DC %ret0,%r0,%ret0 ; c+= carry + STD l,0(r_ptr) + +bn_add_words_exit + .EXIT + BVE (%rp) + NOP + .PROCEND ;in=23,24,25,26,29;out=28; + +;---------------------------------------------------------------------------- +; +;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n) +; +; arg0 = rp +; arg1 = ap +; arg2 = bp +; arg3 = n + +t1 .reg %r22 +t2 .reg %r21 +sub_tmp1 .reg %r20 +sub_tmp2 .reg %r19 + + +bn_sub_words + .proc + .callinfo + .EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .entry + .align 64 + + CMPIB,>= 0,n,bn_sub_words_exit + COPY %r0,%ret0 ; return 0 by default + + ; + ; If 2 or more numbers do the loop + ; + CMPIB,= 1,n,bn_sub_words_single_top + NOP + + ; + ; This loop is unrolled 2 times (64-byte aligned as well) + ; +bn_sub_words_unroll2 + LDD 0(a_ptr),t1 + LDD 0(b_ptr),t2 + SUB t1,t2,sub_tmp1 ; t3 = t1-t2; + SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c; + + CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2 + LDO 1(%r0),sub_tmp2 + + CMPCLR,*= t1,t2,%r0 + COPY sub_tmp2,%ret0 + STD sub_tmp1,0(r_ptr) + + LDD 8(a_ptr),t1 + LDD 8(b_ptr),t2 + SUB t1,t2,sub_tmp1 ; t3 = t1-t2; + SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c; + CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2 + LDO 1(%r0),sub_tmp2 + + CMPCLR,*= t1,t2,%r0 + COPY sub_tmp2,%ret0 + STD sub_tmp1,8(r_ptr) + + LDO -2(n),n + LDO 16(a_ptr),a_ptr + LDO 16(b_ptr),b_ptr + + CMPIB,<= 2,n,bn_sub_words_unroll2 + LDO 16(r_ptr),r_ptr + + CMPIB,=,N 0,n,bn_sub_words_exit ; are we done? + +bn_sub_words_single_top + LDD 0(a_ptr),t1 + LDD 0(b_ptr),t2 + SUB t1,t2,sub_tmp1 ; t3 = t1-t2; + SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c; + CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2 + LDO 1(%r0),sub_tmp2 + + CMPCLR,*= t1,t2,%r0 + COPY sub_tmp2,%ret0 + + STD sub_tmp1,0(r_ptr) + +bn_sub_words_exit + .EXIT + BVE (%rp) + NOP + .PROCEND ;in=23,24,25,26,29;out=28; + +;------------------------------------------------------------------------------ +; +; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d) +; +; arg0 = h +; arg1 = l +; arg2 = d +; +; This is mainly just modified assembly from the compiler, thus the +; lack of variable names. +; +;------------------------------------------------------------------------------ +bn_div_words + .proc + .callinfo CALLER,FRAME=272,ENTRY_GR=%r10,SAVE_RP,ARGS_SAVED,ORDERING_AWARE + .EXPORT bn_div_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .IMPORT BN_num_bits_word,CODE,NO_RELOCATION + .IMPORT __iob,DATA + .IMPORT fprintf,CODE,NO_RELOCATION + .IMPORT abort,CODE,NO_RELOCATION + .IMPORT $$div2U,MILLICODE + .entry + STD %r2,-16(%r30) + STD,MA %r3,352(%r30) + STD %r4,-344(%r30) + STD %r5,-336(%r30) + STD %r6,-328(%r30) + STD %r7,-320(%r30) + STD %r8,-312(%r30) + STD %r9,-304(%r30) + STD %r10,-296(%r30) + + STD %r27,-288(%r30) ; save gp + + COPY %r24,%r3 ; save d + COPY %r26,%r4 ; save h (high 64-bits) + LDO -1(%r0),%ret0 ; return -1 by default + + CMPB,*= %r0,%arg2,$D3 ; if (d == 0) + COPY %r25,%r5 ; save l (low 64-bits) + + LDO -48(%r30),%r29 ; create ap + .CALL ;in=26,29;out=28; + B,L BN_num_bits_word,%r2 + COPY %r3,%r26 + LDD -288(%r30),%r27 ; restore gp + LDI 64,%r21 + + CMPB,= %r21,%ret0,$00000012 ;if (i == 64) (forward) + COPY %ret0,%r24 ; i + MTSARCM %r24 + DEPDI,Z -1,%sar,1,%r29 + CMPB,*<<,N %r29,%r4,bn_div_err_case ; if (h > 1<<i) (forward) + +$00000012 + SUBI 64,%r24,%r31 ; i = 64 - i; + CMPCLR,*<< %r4,%r3,%r0 ; if (h >= d) + SUB %r4,%r3,%r4 ; h -= d + CMPB,= %r31,%r0,$0000001A ; if (i) + COPY %r0,%r10 ; ret = 0 + MTSARCM %r31 ; i to shift + DEPD,Z %r3,%sar,64,%r3 ; d <<= i; + SUBI 64,%r31,%r19 ; 64 - i; redundent + MTSAR %r19 ; (64 -i) to shift + SHRPD %r4,%r5,%sar,%r4 ; l>> (64-i) + MTSARCM %r31 ; i to shift + DEPD,Z %r5,%sar,64,%r5 ; l <<= i; + +$0000001A + DEPDI,Z -1,31,32,%r19 + EXTRD,U %r3,31,32,%r6 ; dh=(d&0xfff)>>32 + EXTRD,U %r3,63,32,%r8 ; dl = d&0xffffff + LDO 2(%r0),%r9 + STD %r3,-280(%r30) ; "d" to stack + +$0000001C + DEPDI,Z -1,63,32,%r29 ; + EXTRD,U %r4,31,32,%r31 ; h >> 32 + CMPB,*=,N %r31,%r6,$D2 ; if ((h>>32) != dh)(forward) div + COPY %r4,%r26 + EXTRD,U %r4,31,32,%r25 + COPY %r6,%r24 + .CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL) + B,L $$div2U,%r2 + EXTRD,U %r6,31,32,%r23 + DEPD %r28,31,32,%r29 +$D2 + STD %r29,-272(%r30) ; q + AND %r5,%r19,%r24 ; t & 0xffffffff00000000; + EXTRD,U %r24,31,32,%r24 ; ??? + FLDD -272(%r30),%fr7 ; q + FLDD -280(%r30),%fr8 ; d + XMPYU %fr8L,%fr7L,%fr10 + FSTD %fr10,-256(%r30) + XMPYU %fr8L,%fr7R,%fr22 + FSTD %fr22,-264(%r30) + XMPYU %fr8R,%fr7L,%fr11 + XMPYU %fr8R,%fr7R,%fr23 + FSTD %fr11,-232(%r30) + FSTD %fr23,-240(%r30) + LDD -256(%r30),%r28 + DEPD,Z %r28,31,32,%r2 + LDD -264(%r30),%r20 + ADD,L %r20,%r2,%r31 + LDD -232(%r30),%r22 + DEPD,Z %r22,31,32,%r22 + LDD -240(%r30),%r21 + B $00000024 ; enter loop + ADD,L %r21,%r22,%r23 + +$0000002A + LDO -1(%r29),%r29 + SUB %r23,%r8,%r23 +$00000024 + SUB %r4,%r31,%r25 + AND %r25,%r19,%r26 + CMPB,*<>,N %r0,%r26,$00000046 ; (forward) + DEPD,Z %r25,31,32,%r20 + OR %r20,%r24,%r21 + CMPB,*<<,N %r21,%r23,$0000002A ;(backward) + SUB %r31,%r6,%r31 +;-------------Break path--------------------- + +$00000046 + DEPD,Z %r23,31,32,%r25 ;tl + EXTRD,U %r23,31,32,%r26 ;t + AND %r25,%r19,%r24 ;tl = (tl<<32)&0xfffffff0000000L + ADD,L %r31,%r26,%r31 ;th += t; + CMPCLR,*>>= %r5,%r24,%r0 ;if (l<tl) + LDO 1(%r31),%r31 ; th++; + CMPB,*<<=,N %r31,%r4,$00000036 ;if (n < th) (forward) + LDO -1(%r29),%r29 ;q--; + ADD,L %r4,%r3,%r4 ;h += d; +$00000036 + ADDIB,=,N -1,%r9,$D1 ;if (--count == 0) break (forward) + SUB %r5,%r24,%r28 ; l -= tl; + SUB %r4,%r31,%r24 ; h -= th; + SHRPD %r24,%r28,32,%r4 ; h = ((h<<32)|(l>>32)); + DEPD,Z %r29,31,32,%r10 ; ret = q<<32 + b $0000001C + DEPD,Z %r28,31,32,%r5 ; l = l << 32 + +$D1 + OR %r10,%r29,%r28 ; ret |= q +$D3 + LDD -368(%r30),%r2 +$D0 + LDD -296(%r30),%r10 + LDD -304(%r30),%r9 + LDD -312(%r30),%r8 + LDD -320(%r30),%r7 + LDD -328(%r30),%r6 + LDD -336(%r30),%r5 + LDD -344(%r30),%r4 + BVE (%r2) + .EXIT + LDD,MB -352(%r30),%r3 + +bn_div_err_case + MFIA %r6 + ADDIL L'bn_div_words-bn_div_err_case,%r6,%r1 + LDO R'bn_div_words-bn_div_err_case(%r1),%r6 + ADDIL LT'__iob,%r27,%r1 + LDD RT'__iob(%r1),%r26 + ADDIL L'C$4-bn_div_words,%r6,%r1 + LDO R'C$4-bn_div_words(%r1),%r25 + LDO 64(%r26),%r26 + .CALL ;in=24,25,26,29;out=28; + B,L fprintf,%r2 + LDO -48(%r30),%r29 + LDD -288(%r30),%r27 + .CALL ;in=29; + B,L abort,%r2 + LDO -48(%r30),%r29 + LDD -288(%r30),%r27 + B $D0 + LDD -368(%r30),%r2 + .PROCEND ;in=24,25,26,29;out=28; + +;---------------------------------------------------------------------------- +; +; Registers to hold 64-bit values to manipulate. The "L" part +; of the register corresponds to the upper 32-bits, while the "R" +; part corresponds to the lower 32-bits +; +; Note, that when using b6 and b7, the code must save these before +; using them because they are callee save registers +; +; +; Floating point registers to use to save values that +; are manipulated. These don't collide with ftemp1-6 and +; are all caller save registers +; +a0 .reg %fr22 +a0L .reg %fr22L +a0R .reg %fr22R + +a1 .reg %fr23 +a1L .reg %fr23L +a1R .reg %fr23R + +a2 .reg %fr24 +a2L .reg %fr24L +a2R .reg %fr24R + +a3 .reg %fr25 +a3L .reg %fr25L +a3R .reg %fr25R + +a4 .reg %fr26 +a4L .reg %fr26L +a4R .reg %fr26R + +a5 .reg %fr27 +a5L .reg %fr27L +a5R .reg %fr27R + +a6 .reg %fr28 +a6L .reg %fr28L +a6R .reg %fr28R + +a7 .reg %fr29 +a7L .reg %fr29L +a7R .reg %fr29R + +b0 .reg %fr30 +b0L .reg %fr30L +b0R .reg %fr30R + +b1 .reg %fr31 +b1L .reg %fr31L +b1R .reg %fr31R + +; +; Temporary floating point variables, these are all caller save +; registers +; +ftemp1 .reg %fr4 +ftemp2 .reg %fr5 +ftemp3 .reg %fr6 +ftemp4 .reg %fr7 + +; +; The B set of registers when used. +; + +b2 .reg %fr8 +b2L .reg %fr8L +b2R .reg %fr8R + +b3 .reg %fr9 +b3L .reg %fr9L +b3R .reg %fr9R + +b4 .reg %fr10 +b4L .reg %fr10L +b4R .reg %fr10R + +b5 .reg %fr11 +b5L .reg %fr11L +b5R .reg %fr11R + +b6 .reg %fr12 +b6L .reg %fr12L +b6R .reg %fr12R + +b7 .reg %fr13 +b7L .reg %fr13L +b7R .reg %fr13R + +c1 .reg %r21 ; only reg +temp1 .reg %r20 ; only reg +temp2 .reg %r19 ; only reg +temp3 .reg %r31 ; only reg + +m1 .reg %r28 +c2 .reg %r23 +high_one .reg %r1 +ht .reg %r6 +lt .reg %r5 +m .reg %r4 +c3 .reg %r3 + +SQR_ADD_C .macro A0L,A0R,C1,C2,C3 + XMPYU A0L,A0R,ftemp1 ; m + FSTD ftemp1,-24(%sp) ; store m + + XMPYU A0R,A0R,ftemp2 ; lt + FSTD ftemp2,-16(%sp) ; store lt + + XMPYU A0L,A0L,ftemp3 ; ht + FSTD ftemp3,-8(%sp) ; store ht + + LDD -24(%sp),m ; load m + AND m,high_mask,temp2 ; m & Mask + DEPD,Z m,30,31,temp3 ; m << 32+1 + LDD -16(%sp),lt ; lt + + LDD -8(%sp),ht ; ht + EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1 + ADD temp3,lt,lt ; lt = lt+m + ADD,L ht,temp1,ht ; ht += temp1 + ADD,DC ht,%r0,ht ; ht++ + + ADD C1,lt,C1 ; c1=c1+lt + ADD,DC ht,%r0,ht ; ht++ + + ADD C2,ht,C2 ; c2=c2+ht + ADD,DC C3,%r0,C3 ; c3++ +.endm + +SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3 + XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht + FSTD ftemp1,-16(%sp) ; + XMPYU A0R,A1L,ftemp2 ; m = bh*lt + FSTD ftemp2,-8(%sp) ; + XMPYU A0R,A1R,ftemp3 ; lt = bl*lt + FSTD ftemp3,-32(%sp) + XMPYU A0L,A1L,ftemp4 ; ht = bh*ht + FSTD ftemp4,-24(%sp) ; + + LDD -8(%sp),m ; r21 = m + LDD -16(%sp),m1 ; r19 = m1 + ADD,L m,m1,m ; m+m1 + + DEPD,Z m,31,32,temp3 ; (m+m1<<32) + LDD -24(%sp),ht ; r24 = ht + + CMPCLR,*>>= m,m1,%r0 ; if (m < m1) + ADD,L ht,high_one,ht ; ht+=high_one + + EXTRD,U m,31,32,temp1 ; m >> 32 + LDD -32(%sp),lt ; lt + ADD,L ht,temp1,ht ; ht+= m>>32 + ADD lt,temp3,lt ; lt = lt+m1 + ADD,DC ht,%r0,ht ; ht++ + + ADD ht,ht,ht ; ht=ht+ht; + ADD,DC C3,%r0,C3 ; add in carry (c3++) + + ADD lt,lt,lt ; lt=lt+lt; + ADD,DC ht,%r0,ht ; add in carry (ht++) + + ADD C1,lt,C1 ; c1=c1+lt + ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++) + LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise + + ADD C2,ht,C2 ; c2 = c2 + ht + ADD,DC C3,%r0,C3 ; add in carry (c3++) +.endm + +; +;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a) +; arg0 = r_ptr +; arg1 = a_ptr +; + +bn_sqr_comba8 + .PROC + .CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE + .EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .ENTRY + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + STD %r5,16(%sp) ; save r5 + STD %r6,24(%sp) ; save r6 + + ; + ; Zero out carries + ; + COPY %r0,c1 + COPY %r0,c2 + COPY %r0,c3 + + LDO 128(%sp),%sp ; bump stack + DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L + DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32 + + ; + ; Load up all of the values we are going to use + ; + FLDD 0(a_ptr),a0 + FLDD 8(a_ptr),a1 + FLDD 16(a_ptr),a2 + FLDD 24(a_ptr),a3 + FLDD 32(a_ptr),a4 + FLDD 40(a_ptr),a5 + FLDD 48(a_ptr),a6 + FLDD 56(a_ptr),a7 + + SQR_ADD_C a0L,a0R,c1,c2,c3 + STD c1,0(r_ptr) ; r[0] = c1; + COPY %r0,c1 + + SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1 + STD c2,8(r_ptr) ; r[1] = c2; + COPY %r0,c2 + + SQR_ADD_C a1L,a1R,c3,c1,c2 + SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2 + STD c3,16(r_ptr) ; r[2] = c3; + COPY %r0,c3 + + SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3 + SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3 + STD c1,24(r_ptr) ; r[3] = c1; + COPY %r0,c1 + + SQR_ADD_C a2L,a2R,c2,c3,c1 + SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1 + SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1 + STD c2,32(r_ptr) ; r[4] = c2; + COPY %r0,c2 + + SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2 + SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2 + SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2 + STD c3,40(r_ptr) ; r[5] = c3; + COPY %r0,c3 + + SQR_ADD_C a3L,a3R,c1,c2,c3 + SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3 + SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3 + SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3 + STD c1,48(r_ptr) ; r[6] = c1; + COPY %r0,c1 + + SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1 + SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1 + SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1 + SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1 + STD c2,56(r_ptr) ; r[7] = c2; + COPY %r0,c2 + + SQR_ADD_C a4L,a4R,c3,c1,c2 + SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2 + SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2 + SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2 + STD c3,64(r_ptr) ; r[8] = c3; + COPY %r0,c3 + + SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3 + SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3 + SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3 + STD c1,72(r_ptr) ; r[9] = c1; + COPY %r0,c1 + + SQR_ADD_C a5L,a5R,c2,c3,c1 + SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1 + SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1 + STD c2,80(r_ptr) ; r[10] = c2; + COPY %r0,c2 + + SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2 + SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2 + STD c3,88(r_ptr) ; r[11] = c3; + COPY %r0,c3 + + SQR_ADD_C a6L,a6R,c1,c2,c3 + SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3 + STD c1,96(r_ptr) ; r[12] = c1; + COPY %r0,c1 + + SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1 + STD c2,104(r_ptr) ; r[13] = c2; + COPY %r0,c2 + + SQR_ADD_C a7L,a7R,c3,c1,c2 + STD c3, 112(r_ptr) ; r[14] = c3 + STD c1, 120(r_ptr) ; r[15] = c1 + + .EXIT + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 + + .PROCEND + +;----------------------------------------------------------------------------- +; +;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a) +; arg0 = r_ptr +; arg1 = a_ptr +; + +bn_sqr_comba4 + .proc + .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE + .EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .entry + .align 64 + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + STD %r5,16(%sp) ; save r5 + STD %r6,24(%sp) ; save r6 + + ; + ; Zero out carries + ; + COPY %r0,c1 + COPY %r0,c2 + COPY %r0,c3 + + LDO 128(%sp),%sp ; bump stack + DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L + DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32 + + ; + ; Load up all of the values we are going to use + ; + FLDD 0(a_ptr),a0 + FLDD 8(a_ptr),a1 + FLDD 16(a_ptr),a2 + FLDD 24(a_ptr),a3 + FLDD 32(a_ptr),a4 + FLDD 40(a_ptr),a5 + FLDD 48(a_ptr),a6 + FLDD 56(a_ptr),a7 + + SQR_ADD_C a0L,a0R,c1,c2,c3 + + STD c1,0(r_ptr) ; r[0] = c1; + COPY %r0,c1 + + SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1 + + STD c2,8(r_ptr) ; r[1] = c2; + COPY %r0,c2 + + SQR_ADD_C a1L,a1R,c3,c1,c2 + SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2 + + STD c3,16(r_ptr) ; r[2] = c3; + COPY %r0,c3 + + SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3 + SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3 + + STD c1,24(r_ptr) ; r[3] = c1; + COPY %r0,c1 + + SQR_ADD_C a2L,a2R,c2,c3,c1 + SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1 + + STD c2,32(r_ptr) ; r[4] = c2; + COPY %r0,c2 + + SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2 + STD c3,40(r_ptr) ; r[5] = c3; + COPY %r0,c3 + + SQR_ADD_C a3L,a3R,c1,c2,c3 + STD c1,48(r_ptr) ; r[6] = c1; + STD c2,56(r_ptr) ; r[7] = c2; + + .EXIT + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 + + .PROCEND + + +;--------------------------------------------------------------------------- + +MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3 + XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht + FSTD ftemp1,-16(%sp) ; + XMPYU A0R,B0L,ftemp2 ; m = bh*lt + FSTD ftemp2,-8(%sp) ; + XMPYU A0R,B0R,ftemp3 ; lt = bl*lt + FSTD ftemp3,-32(%sp) + XMPYU A0L,B0L,ftemp4 ; ht = bh*ht + FSTD ftemp4,-24(%sp) ; + + LDD -8(%sp),m ; r21 = m + LDD -16(%sp),m1 ; r19 = m1 + ADD,L m,m1,m ; m+m1 + + DEPD,Z m,31,32,temp3 ; (m+m1<<32) + LDD -24(%sp),ht ; r24 = ht + + CMPCLR,*>>= m,m1,%r0 ; if (m < m1) + ADD,L ht,high_one,ht ; ht+=high_one + + EXTRD,U m,31,32,temp1 ; m >> 32 + LDD -32(%sp),lt ; lt + ADD,L ht,temp1,ht ; ht+= m>>32 + ADD lt,temp3,lt ; lt = lt+m1 + ADD,DC ht,%r0,ht ; ht++ + + ADD C1,lt,C1 ; c1=c1+lt + ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise + + ADD C2,ht,C2 ; c2 = c2 + ht + ADD,DC C3,%r0,C3 ; add in carry (c3++) +.endm + + +; +;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +; arg0 = r_ptr +; arg1 = a_ptr +; arg2 = b_ptr +; + +bn_mul_comba8 + .proc + .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE + .EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .entry + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + STD %r5,16(%sp) ; save r5 + STD %r6,24(%sp) ; save r6 + FSTD %fr12,32(%sp) ; save r6 + FSTD %fr13,40(%sp) ; save r7 + + ; + ; Zero out carries + ; + COPY %r0,c1 + COPY %r0,c2 + COPY %r0,c3 + + LDO 128(%sp),%sp ; bump stack + DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32 + + ; + ; Load up all of the values we are going to use + ; + FLDD 0(a_ptr),a0 + FLDD 8(a_ptr),a1 + FLDD 16(a_ptr),a2 + FLDD 24(a_ptr),a3 + FLDD 32(a_ptr),a4 + FLDD 40(a_ptr),a5 + FLDD 48(a_ptr),a6 + FLDD 56(a_ptr),a7 + + FLDD 0(b_ptr),b0 + FLDD 8(b_ptr),b1 + FLDD 16(b_ptr),b2 + FLDD 24(b_ptr),b3 + FLDD 32(b_ptr),b4 + FLDD 40(b_ptr),b5 + FLDD 48(b_ptr),b6 + FLDD 56(b_ptr),b7 + + MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3 + STD c1,0(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1 + MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1 + STD c2,8(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2 + MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2 + MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2 + STD c3,16(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3 + MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3 + MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3 + MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3 + STD c1,24(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1 + MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1 + MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1 + MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1 + MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1 + STD c2,32(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2 + MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2 + MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2 + MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2 + MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2 + MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2 + STD c3,40(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3 + MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3 + MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3 + MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3 + MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3 + MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3 + MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3 + STD c1,48(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1 + MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1 + MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1 + MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1 + MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1 + MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1 + MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1 + MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1 + STD c2,56(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2 + MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2 + MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2 + MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2 + MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2 + MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2 + MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2 + STD c3,64(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3 + MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3 + MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3 + MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3 + MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3 + MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3 + STD c1,72(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1 + MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1 + MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1 + MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1 + MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1 + STD c2,80(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2 + MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2 + MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2 + MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2 + STD c3,88(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3 + MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3 + MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3 + STD c1,96(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1 + MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1 + STD c2,104(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2 + STD c3,112(r_ptr) + STD c1,120(r_ptr) + + .EXIT + FLDD -88(%sp),%fr13 + FLDD -96(%sp),%fr12 + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 + + .PROCEND + +;----------------------------------------------------------------------------- +; +;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +; arg0 = r_ptr +; arg1 = a_ptr +; arg2 = b_ptr +; + +bn_mul_comba4 + .proc + .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE + .EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN + .entry + .align 64 + + STD %r3,0(%sp) ; save r3 + STD %r4,8(%sp) ; save r4 + STD %r5,16(%sp) ; save r5 + STD %r6,24(%sp) ; save r6 + FSTD %fr12,32(%sp) ; save r6 + FSTD %fr13,40(%sp) ; save r7 + + ; + ; Zero out carries + ; + COPY %r0,c1 + COPY %r0,c2 + COPY %r0,c3 + + LDO 128(%sp),%sp ; bump stack + DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32 + + ; + ; Load up all of the values we are going to use + ; + FLDD 0(a_ptr),a0 + FLDD 8(a_ptr),a1 + FLDD 16(a_ptr),a2 + FLDD 24(a_ptr),a3 + + FLDD 0(b_ptr),b0 + FLDD 8(b_ptr),b1 + FLDD 16(b_ptr),b2 + FLDD 24(b_ptr),b3 + + MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3 + STD c1,0(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1 + MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1 + STD c2,8(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2 + MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2 + MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2 + STD c3,16(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3 + MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3 + MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3 + MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3 + STD c1,24(r_ptr) + COPY %r0,c1 + + MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1 + MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1 + MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1 + STD c2,32(r_ptr) + COPY %r0,c2 + + MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2 + MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2 + STD c3,40(r_ptr) + COPY %r0,c3 + + MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3 + STD c1,48(r_ptr) + STD c2,56(r_ptr) + + .EXIT + FLDD -88(%sp),%fr13 + FLDD -96(%sp),%fr12 + LDD -104(%sp),%r6 ; restore r6 + LDD -112(%sp),%r5 ; restore r5 + LDD -120(%sp),%r4 ; restore r4 + BVE (%rp) + LDD,MB -128(%sp),%r3 + + .PROCEND + + + .SPACE $TEXT$ + .SUBSPA $CODE$ + .SPACE $PRIVATE$,SORT=16 + .IMPORT $global$,DATA + .SPACE $TEXT$ + .SUBSPA $CODE$ + .SUBSPA $LIT$,ACCESS=0x2c +C$4 + .ALIGN 8 + .STRINGZ "Division would overflow (%d)\n" + .END diff --git a/openssl/crypto/bn/asm/ppc-mont.pl b/openssl/crypto/bn/asm/ppc-mont.pl new file mode 100644 index 00000000..7849eae9 --- /dev/null +++ b/openssl/crypto/bn/asm/ppc-mont.pl @@ -0,0 +1,323 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# April 2006 + +# "Teaser" Montgomery multiplication module for PowerPC. It's possible +# to gain a bit more by modulo-scheduling outer loop, then dedicated +# squaring procedure should give further 20% and code can be adapted +# for 32-bit application running on 64-bit CPU. As for the latter. +# It won't be able to achieve "native" 64-bit performance, because in +# 32-bit application context every addc instruction will have to be +# expanded as addc, twice right shift by 32 and finally adde, etc. +# So far RSA *sign* performance improvement over pre-bn_mul_mont asm +# for 64-bit application running on PPC970/G5 is: +# +# 512-bit +65% +# 1024-bit +35% +# 2048-bit +18% +# 4096-bit +4% + +$flavour = shift; + +if ($flavour =~ /32/) { + $BITS= 32; + $BNSZ= $BITS/8; + $SIZE_T=4; + $RZONE= 224; + $FRAME= $SIZE_T*16; + + $LD= "lwz"; # load + $LDU= "lwzu"; # load and update + $LDX= "lwzx"; # load indexed + $ST= "stw"; # store + $STU= "stwu"; # store and update + $STX= "stwx"; # store indexed + $STUX= "stwux"; # store indexed and update + $UMULL= "mullw"; # unsigned multiply low + $UMULH= "mulhwu"; # unsigned multiply high + $UCMP= "cmplw"; # unsigned compare + $SHRI= "srwi"; # unsigned shift right by immediate + $PUSH= $ST; + $POP= $LD; +} elsif ($flavour =~ /64/) { + $BITS= 64; + $BNSZ= $BITS/8; + $SIZE_T=8; + $RZONE= 288; + $FRAME= $SIZE_T*16; + + # same as above, but 64-bit mnemonics... + $LD= "ld"; # load + $LDU= "ldu"; # load and update + $LDX= "ldx"; # load indexed + $ST= "std"; # store + $STU= "stdu"; # store and update + $STX= "stdx"; # store indexed + $STUX= "stdux"; # store indexed and update + $UMULL= "mulld"; # unsigned multiply low + $UMULH= "mulhdu"; # unsigned multiply high + $UCMP= "cmpld"; # unsigned compare + $SHRI= "srdi"; # unsigned shift right by immediate + $PUSH= $ST; + $POP= $LD; +} else { die "nonsense $flavour"; } + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or +die "can't locate ppc-xlate.pl"; + +open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; + +$sp="r1"; +$toc="r2"; +$rp="r3"; $ovf="r3"; +$ap="r4"; +$bp="r5"; +$np="r6"; +$n0="r7"; +$num="r8"; +$rp="r9"; # $rp is reassigned +$aj="r10"; +$nj="r11"; +$tj="r12"; +# non-volatile registers +$i="r14"; +$j="r15"; +$tp="r16"; +$m0="r17"; +$m1="r18"; +$lo0="r19"; +$hi0="r20"; +$lo1="r21"; +$hi1="r22"; +$alo="r23"; +$ahi="r24"; +$nlo="r25"; +# +$nhi="r0"; + +$code=<<___; +.machine "any" +.text + +.globl .bn_mul_mont +.align 4 +.bn_mul_mont: + cmpwi $num,4 + mr $rp,r3 ; $rp is reassigned + li r3,0 + bltlr + + slwi $num,$num,`log($BNSZ)/log(2)` + li $tj,-4096 + addi $ovf,$num,`$FRAME+$RZONE` + subf $ovf,$ovf,$sp ; $sp-$ovf + and $ovf,$ovf,$tj ; minimize TLB usage + subf $ovf,$sp,$ovf ; $ovf-$sp + srwi $num,$num,`log($BNSZ)/log(2)` + $STUX $sp,$sp,$ovf + + $PUSH r14,`4*$SIZE_T`($sp) + $PUSH r15,`5*$SIZE_T`($sp) + $PUSH r16,`6*$SIZE_T`($sp) + $PUSH r17,`7*$SIZE_T`($sp) + $PUSH r18,`8*$SIZE_T`($sp) + $PUSH r19,`9*$SIZE_T`($sp) + $PUSH r20,`10*$SIZE_T`($sp) + $PUSH r21,`11*$SIZE_T`($sp) + $PUSH r22,`12*$SIZE_T`($sp) + $PUSH r23,`13*$SIZE_T`($sp) + $PUSH r24,`14*$SIZE_T`($sp) + $PUSH r25,`15*$SIZE_T`($sp) + + $LD $n0,0($n0) ; pull n0[0] value + addi $num,$num,-2 ; adjust $num for counter register + + $LD $m0,0($bp) ; m0=bp[0] + $LD $aj,0($ap) ; ap[0] + addi $tp,$sp,$FRAME + $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0] + $UMULH $hi0,$aj,$m0 + + $LD $aj,$BNSZ($ap) ; ap[1] + $LD $nj,0($np) ; np[0] + + $UMULL $m1,$lo0,$n0 ; "tp[0]"*n0 + + $UMULL $alo,$aj,$m0 ; ap[1]*bp[0] + $UMULH $ahi,$aj,$m0 + + $UMULL $lo1,$nj,$m1 ; np[0]*m1 + $UMULH $hi1,$nj,$m1 + $LD $nj,$BNSZ($np) ; np[1] + addc $lo1,$lo1,$lo0 + addze $hi1,$hi1 + + $UMULL $nlo,$nj,$m1 ; np[1]*m1 + $UMULH $nhi,$nj,$m1 + + mtctr $num + li $j,`2*$BNSZ` +.align 4 +L1st: + $LDX $aj,$ap,$j ; ap[j] + addc $lo0,$alo,$hi0 + $LDX $nj,$np,$j ; np[j] + addze $hi0,$ahi + $UMULL $alo,$aj,$m0 ; ap[j]*bp[0] + addc $lo1,$nlo,$hi1 + $UMULH $ahi,$aj,$m0 + addze $hi1,$nhi + $UMULL $nlo,$nj,$m1 ; np[j]*m1 + addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0] + $UMULH $nhi,$nj,$m1 + addze $hi1,$hi1 + $ST $lo1,0($tp) ; tp[j-1] + + addi $j,$j,$BNSZ ; j++ + addi $tp,$tp,$BNSZ ; tp++ + bdnz- L1st +;L1st + addc $lo0,$alo,$hi0 + addze $hi0,$ahi + + addc $lo1,$nlo,$hi1 + addze $hi1,$nhi + addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0] + addze $hi1,$hi1 + $ST $lo1,0($tp) ; tp[j-1] + + li $ovf,0 + addc $hi1,$hi1,$hi0 + addze $ovf,$ovf ; upmost overflow bit + $ST $hi1,$BNSZ($tp) + + li $i,$BNSZ +.align 4 +Louter: + $LDX $m0,$bp,$i ; m0=bp[i] + $LD $aj,0($ap) ; ap[0] + addi $tp,$sp,$FRAME + $LD $tj,$FRAME($sp) ; tp[0] + $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i] + $UMULH $hi0,$aj,$m0 + $LD $aj,$BNSZ($ap) ; ap[1] + $LD $nj,0($np) ; np[0] + addc $lo0,$lo0,$tj ; ap[0]*bp[i]+tp[0] + $UMULL $alo,$aj,$m0 ; ap[j]*bp[i] + addze $hi0,$hi0 + $UMULL $m1,$lo0,$n0 ; tp[0]*n0 + $UMULH $ahi,$aj,$m0 + $UMULL $lo1,$nj,$m1 ; np[0]*m1 + $UMULH $hi1,$nj,$m1 + $LD $nj,$BNSZ($np) ; np[1] + addc $lo1,$lo1,$lo0 + $UMULL $nlo,$nj,$m1 ; np[1]*m1 + addze $hi1,$hi1 + $UMULH $nhi,$nj,$m1 + + mtctr $num + li $j,`2*$BNSZ` +.align 4 +Linner: + $LDX $aj,$ap,$j ; ap[j] + addc $lo0,$alo,$hi0 + $LD $tj,$BNSZ($tp) ; tp[j] + addze $hi0,$ahi + $LDX $nj,$np,$j ; np[j] + addc $lo1,$nlo,$hi1 + $UMULL $alo,$aj,$m0 ; ap[j]*bp[i] + addze $hi1,$nhi + $UMULH $ahi,$aj,$m0 + addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j] + $UMULL $nlo,$nj,$m1 ; np[j]*m1 + addze $hi0,$hi0 + $UMULH $nhi,$nj,$m1 + addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j] + addi $j,$j,$BNSZ ; j++ + addze $hi1,$hi1 + $ST $lo1,0($tp) ; tp[j-1] + addi $tp,$tp,$BNSZ ; tp++ + bdnz- Linner +;Linner + $LD $tj,$BNSZ($tp) ; tp[j] + addc $lo0,$alo,$hi0 + addze $hi0,$ahi + addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j] + addze $hi0,$hi0 + + addc $lo1,$nlo,$hi1 + addze $hi1,$nhi + addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j] + addze $hi1,$hi1 + $ST $lo1,0($tp) ; tp[j-1] + + addic $ovf,$ovf,-1 ; move upmost overflow to XER[CA] + li $ovf,0 + adde $hi1,$hi1,$hi0 + addze $ovf,$ovf + $ST $hi1,$BNSZ($tp) +; + slwi $tj,$num,`log($BNSZ)/log(2)` + $UCMP $i,$tj + addi $i,$i,$BNSZ + ble- Louter + + addi $num,$num,2 ; restore $num + subfc $j,$j,$j ; j=0 and "clear" XER[CA] + addi $tp,$sp,$FRAME + mtctr $num + +.align 4 +Lsub: $LDX $tj,$tp,$j + $LDX $nj,$np,$j + subfe $aj,$nj,$tj ; tp[j]-np[j] + $STX $aj,$rp,$j + addi $j,$j,$BNSZ + bdnz- Lsub + + li $j,0 + mtctr $num + subfe $ovf,$j,$ovf ; handle upmost overflow bit + and $ap,$tp,$ovf + andc $np,$rp,$ovf + or $ap,$ap,$np ; ap=borrow?tp:rp + +.align 4 +Lcopy: ; copy or in-place refresh + $LDX $tj,$ap,$j + $STX $tj,$rp,$j + $STX $j,$tp,$j ; zap at once + addi $j,$j,$BNSZ + bdnz- Lcopy + + $POP r14,`4*$SIZE_T`($sp) + $POP r15,`5*$SIZE_T`($sp) + $POP r16,`6*$SIZE_T`($sp) + $POP r17,`7*$SIZE_T`($sp) + $POP r18,`8*$SIZE_T`($sp) + $POP r19,`9*$SIZE_T`($sp) + $POP r20,`10*$SIZE_T`($sp) + $POP r21,`11*$SIZE_T`($sp) + $POP r22,`12*$SIZE_T`($sp) + $POP r23,`13*$SIZE_T`($sp) + $POP r24,`14*$SIZE_T`($sp) + $POP r25,`15*$SIZE_T`($sp) + $POP $sp,0($sp) + li r3,1 + blr + .long 0 +.asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@fy.chalmers.se>" +___ + +$code =~ s/\`([^\`]*)\`/eval $1/gem; +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/ppc.pl b/openssl/crypto/bn/asm/ppc.pl new file mode 100644 index 00000000..37c65d35 --- /dev/null +++ b/openssl/crypto/bn/asm/ppc.pl @@ -0,0 +1,1981 @@ +#!/usr/bin/env perl +# +# Implemented as a Perl wrapper as we want to support several different +# architectures with single file. We pick up the target based on the +# file name we are asked to generate. +# +# It should be noted though that this perl code is nothing like +# <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much +# as pre-processor to cover for platform differences in name decoration, +# linker tables, 32-/64-bit instruction sets... +# +# As you might know there're several PowerPC ABI in use. Most notably +# Linux and AIX use different 32-bit ABIs. Good news are that these ABIs +# are similar enough to implement leaf(!) functions, which would be ABI +# neutral. And that's what you find here: ABI neutral leaf functions. +# In case you wonder what that is... +# +# AIX performance +# +# MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e. +# +# The following is the performance of 32-bit compiler +# generated code: +# +# OpenSSL 0.9.6c 21 dec 2001 +# built on: Tue Jun 11 11:06:51 EDT 2002 +# options:bn(64,32) ... +#compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3 +# sign verify sign/s verify/s +#rsa 512 bits 0.0098s 0.0009s 102.0 1170.6 +#rsa 1024 bits 0.0507s 0.0026s 19.7 387.5 +#rsa 2048 bits 0.3036s 0.0085s 3.3 117.1 +#rsa 4096 bits 2.0040s 0.0299s 0.5 33.4 +#dsa 512 bits 0.0087s 0.0106s 114.3 94.5 +#dsa 1024 bits 0.0256s 0.0313s 39.0 32.0 +# +# Same bechmark with this assembler code: +# +#rsa 512 bits 0.0056s 0.0005s 178.6 2049.2 +#rsa 1024 bits 0.0283s 0.0015s 35.3 674.1 +#rsa 2048 bits 0.1744s 0.0050s 5.7 201.2 +#rsa 4096 bits 1.1644s 0.0179s 0.9 55.7 +#dsa 512 bits 0.0052s 0.0062s 191.6 162.0 +#dsa 1024 bits 0.0149s 0.0180s 67.0 55.5 +# +# Number of operations increases by at almost 75% +# +# Here are performance numbers for 64-bit compiler +# generated code: +# +# OpenSSL 0.9.6g [engine] 9 Aug 2002 +# built on: Fri Apr 18 16:59:20 EDT 2003 +# options:bn(64,64) ... +# compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3 +# sign verify sign/s verify/s +#rsa 512 bits 0.0028s 0.0003s 357.1 3844.4 +#rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7 +#rsa 2048 bits 0.0963s 0.0028s 10.4 353.0 +#rsa 4096 bits 0.6538s 0.0102s 1.5 98.1 +#dsa 512 bits 0.0026s 0.0032s 382.5 313.7 +#dsa 1024 bits 0.0081s 0.0099s 122.8 100.6 +# +# Same benchmark with this assembler code: +# +#rsa 512 bits 0.0020s 0.0002s 510.4 6273.7 +#rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3 +#rsa 2048 bits 0.0540s 0.0016s 18.5 622.5 +#rsa 4096 bits 0.3700s 0.0058s 2.7 171.0 +#dsa 512 bits 0.0016s 0.0020s 610.7 507.1 +#dsa 1024 bits 0.0047s 0.0058s 212.5 173.2 +# +# Again, performance increases by at about 75% +# +# Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code) +# OpenSSL 0.9.7c 30 Sep 2003 +# +# Original code. +# +#rsa 512 bits 0.0011s 0.0001s 906.1 11012.5 +#rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1 +#rsa 2048 bits 0.0370s 0.0010s 27.1 982.4 +#rsa 4096 bits 0.2426s 0.0036s 4.1 280.4 +#dsa 512 bits 0.0010s 0.0012s 1038.1 841.5 +#dsa 1024 bits 0.0030s 0.0037s 329.6 269.7 +#dsa 2048 bits 0.0101s 0.0127s 98.9 78.6 +# +# Same benchmark with this assembler code: +# +#rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9 +#rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6 +#rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5 +#rsa 4096 bits 0.1469s 0.0022s 6.8 449.6 +#dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2 +#dsa 1024 bits 0.0018s 0.0023s 545.0 442.2 +#dsa 2048 bits 0.0061s 0.0075s 163.5 132.8 +# +# Performance increase of ~60% +# +# If you have comments or suggestions to improve code send +# me a note at schari@us.ibm.com +# + +$flavour = shift; + +if ($flavour =~ /32/) { + $BITS= 32; + $BNSZ= $BITS/8; + $ISA= "\"ppc\""; + + $LD= "lwz"; # load + $LDU= "lwzu"; # load and update + $ST= "stw"; # store + $STU= "stwu"; # store and update + $UMULL= "mullw"; # unsigned multiply low + $UMULH= "mulhwu"; # unsigned multiply high + $UDIV= "divwu"; # unsigned divide + $UCMPI= "cmplwi"; # unsigned compare with immediate + $UCMP= "cmplw"; # unsigned compare + $CNTLZ= "cntlzw"; # count leading zeros + $SHL= "slw"; # shift left + $SHR= "srw"; # unsigned shift right + $SHRI= "srwi"; # unsigned shift right by immediate + $SHLI= "slwi"; # shift left by immediate + $CLRU= "clrlwi"; # clear upper bits + $INSR= "insrwi"; # insert right + $ROTL= "rotlwi"; # rotate left by immediate + $TR= "tw"; # conditional trap +} elsif ($flavour =~ /64/) { + $BITS= 64; + $BNSZ= $BITS/8; + $ISA= "\"ppc64\""; + + # same as above, but 64-bit mnemonics... + $LD= "ld"; # load + $LDU= "ldu"; # load and update + $ST= "std"; # store + $STU= "stdu"; # store and update + $UMULL= "mulld"; # unsigned multiply low + $UMULH= "mulhdu"; # unsigned multiply high + $UDIV= "divdu"; # unsigned divide + $UCMPI= "cmpldi"; # unsigned compare with immediate + $UCMP= "cmpld"; # unsigned compare + $CNTLZ= "cntlzd"; # count leading zeros + $SHL= "sld"; # shift left + $SHR= "srd"; # unsigned shift right + $SHRI= "srdi"; # unsigned shift right by immediate + $SHLI= "sldi"; # shift left by immediate + $CLRU= "clrldi"; # clear upper bits + $INSR= "insrdi"; # insert right + $ROTL= "rotldi"; # rotate left by immediate + $TR= "td"; # conditional trap +} else { die "nonsense $flavour"; } + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or +die "can't locate ppc-xlate.pl"; + +open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; + +$data=<<EOF; +#-------------------------------------------------------------------- +# +# +# +# +# File: ppc32.s +# +# Created by: Suresh Chari +# IBM Thomas J. Watson Research Library +# Hawthorne, NY +# +# +# Description: Optimized assembly routines for OpenSSL crypto +# on the 32 bitPowerPC platform. +# +# +# Version History +# +# 2. Fixed bn_add,bn_sub and bn_div_words, added comments, +# cleaned up code. Also made a single version which can +# be used for both the AIX and Linux compilers. See NOTE +# below. +# 12/05/03 Suresh Chari +# (with lots of help from) Andy Polyakov +## +# 1. Initial version 10/20/02 Suresh Chari +# +# +# The following file works for the xlc,cc +# and gcc compilers. +# +# NOTE: To get the file to link correctly with the gcc compiler +# you have to change the names of the routines and remove +# the first .(dot) character. This should automatically +# be done in the build process. +# +# Hand optimized assembly code for the following routines +# +# bn_sqr_comba4 +# bn_sqr_comba8 +# bn_mul_comba4 +# bn_mul_comba8 +# bn_sub_words +# bn_add_words +# bn_div_words +# bn_sqr_words +# bn_mul_words +# bn_mul_add_words +# +# NOTE: It is possible to optimize this code more for +# specific PowerPC or Power architectures. On the Northstar +# architecture the optimizations in this file do +# NOT provide much improvement. +# +# If you have comments or suggestions to improve code send +# me a note at schari\@us.ibm.com +# +#-------------------------------------------------------------------------- +# +# Defines to be used in the assembly code. +# +#.set r0,0 # we use it as storage for value of 0 +#.set SP,1 # preserved +#.set RTOC,2 # preserved +#.set r3,3 # 1st argument/return value +#.set r4,4 # 2nd argument/volatile register +#.set r5,5 # 3rd argument/volatile register +#.set r6,6 # ... +#.set r7,7 +#.set r8,8 +#.set r9,9 +#.set r10,10 +#.set r11,11 +#.set r12,12 +#.set r13,13 # not used, nor any other "below" it... + +# Declare function names to be global +# NOTE: For gcc these names MUST be changed to remove +# the first . i.e. for example change ".bn_sqr_comba4" +# to "bn_sqr_comba4". This should be automatically done +# in the build. + + .globl .bn_sqr_comba4 + .globl .bn_sqr_comba8 + .globl .bn_mul_comba4 + .globl .bn_mul_comba8 + .globl .bn_sub_words + .globl .bn_add_words + .globl .bn_div_words + .globl .bn_sqr_words + .globl .bn_mul_words + .globl .bn_mul_add_words + +# .text section + + .machine "any" + +# +# NOTE: The following label name should be changed to +# "bn_sqr_comba4" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_sqr_comba4: +# +# Optimized version of bn_sqr_comba4. +# +# void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a) +# r3 contains r +# r4 contains a +# +# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows: +# +# r5,r6 are the two BN_ULONGs being multiplied. +# r7,r8 are the results of the 32x32 giving 64 bit multiply. +# r9,r10, r11 are the equivalents of c1,c2, c3. +# Here's the assembly +# +# + xor r0,r0,r0 # set r0 = 0. Used in the addze + # instructions below + + #sqr_add_c(a,0,c1,c2,c3) + $LD r5,`0*$BNSZ`(r4) + $UMULL r9,r5,r5 + $UMULH r10,r5,r5 #in first iteration. No need + #to add since c1=c2=c3=0. + # Note c3(r11) is NOT set to 0 + # but will be. + + $ST r9,`0*$BNSZ`(r3) # r[0]=c1; + # sqr_add_c2(a,1,0,c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8) + adde r8,r8,r8 + addze r9,r0 # catch carry if any. + # r9= r0(=0) and carry + + addc r10,r7,r10 # now add to temp result. + addze r11,r8 # r8 added to r11 which is 0 + addze r9,r9 + + $ST r10,`1*$BNSZ`(r3) #r[1]=c2; + #sqr_add_c(a,1,c3,c1,c2) + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r0 + #sqr_add_c2(a,2,0,c3,c1,c2) + $LD r6,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r7,r7,r7 + adde r8,r8,r8 + addze r10,r10 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + $ST r11,`2*$BNSZ`(r3) #r[2]=c3 + #sqr_add_c2(a,3,0,c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r7,r7,r7 + adde r8,r8,r8 + addze r11,r0 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,2,1,c1,c2,c3); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r7,r7,r7 + adde r8,r8,r8 + addze r11,r11 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + $ST r9,`3*$BNSZ`(r3) #r[3]=c1 + #sqr_add_c(a,2,c2,c3,c1); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r0 + #sqr_add_c2(a,3,1,c2,c3,c1); + $LD r6,`3*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r7,r7,r7 + adde r8,r8,r8 + addze r9,r9 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + $ST r10,`4*$BNSZ`(r3) #r[4]=c2 + #sqr_add_c2(a,3,2,c3,c1,c2); + $LD r5,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r7,r7,r7 + adde r8,r8,r8 + addze r10,r0 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + $ST r11,`5*$BNSZ`(r3) #r[5] = c3 + #sqr_add_c(a,3,c1,c2,c3); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r9,r7,r9 + adde r10,r8,r10 + + $ST r9,`6*$BNSZ`(r3) #r[6]=c1 + $ST r10,`7*$BNSZ`(r3) #r[7]=c2 + blr + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_sqr_comba8" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_sqr_comba8: +# +# This is an optimized version of the bn_sqr_comba8 routine. +# Tightly uses the adde instruction +# +# +# void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a) +# r3 contains r +# r4 contains a +# +# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows: +# +# r5,r6 are the two BN_ULONGs being multiplied. +# r7,r8 are the results of the 32x32 giving 64 bit multiply. +# r9,r10, r11 are the equivalents of c1,c2, c3. +# +# Possible optimization of loading all 8 longs of a into registers +# doesnt provide any speedup +# + + xor r0,r0,r0 #set r0 = 0.Used in addze + #instructions below. + + #sqr_add_c(a,0,c1,c2,c3); + $LD r5,`0*$BNSZ`(r4) + $UMULL r9,r5,r5 #1st iteration: no carries. + $UMULH r10,r5,r5 + $ST r9,`0*$BNSZ`(r3) # r[0]=c1; + #sqr_add_c2(a,1,0,c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 #add the two register number + adde r11,r8,r0 # (r8,r7) to the three register + addze r9,r0 # number (r9,r11,r10).NOTE:r0=0 + + addc r10,r7,r10 #add the two register number + adde r11,r8,r11 # (r8,r7) to the three register + addze r9,r9 # number (r9,r11,r10). + + $ST r10,`1*$BNSZ`(r3) # r[1]=c2 + + #sqr_add_c(a,1,c3,c1,c2); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r0 + #sqr_add_c2(a,2,0,c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + + $ST r11,`2*$BNSZ`(r3) #r[2]=c3 + #sqr_add_c2(a,3,0,c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0]. + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r0 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,2,1,c1,c2,c3); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + + $ST r9,`3*$BNSZ`(r3) #r[3]=c1; + #sqr_add_c(a,2,c2,c3,c1); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r0 + #sqr_add_c2(a,3,1,c2,c3,c1); + $LD r6,`3*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + #sqr_add_c2(a,4,0,c2,c3,c1); + $LD r5,`0*$BNSZ`(r4) + $LD r6,`4*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + $ST r10,`4*$BNSZ`(r3) #r[4]=c2; + #sqr_add_c2(a,5,0,c3,c1,c2); + $LD r6,`5*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r0 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + #sqr_add_c2(a,4,1,c3,c1,c2); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`4*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + #sqr_add_c2(a,3,2,c3,c1,c2); + $LD r5,`2*$BNSZ`(r4) + $LD r6,`3*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + $ST r11,`5*$BNSZ`(r3) #r[5]=c3; + #sqr_add_c(a,3,c1,c2,c3); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r0 + #sqr_add_c2(a,4,2,c1,c2,c3); + $LD r6,`4*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,5,1,c1,c2,c3); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`5*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,6,0,c1,c2,c3); + $LD r5,`0*$BNSZ`(r4) + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + $ST r9,`6*$BNSZ`(r3) #r[6]=c1; + #sqr_add_c2(a,7,0,c2,c3,c1); + $LD r6,`7*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r0 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + #sqr_add_c2(a,6,1,c2,c3,c1); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + #sqr_add_c2(a,5,2,c2,c3,c1); + $LD r5,`2*$BNSZ`(r4) + $LD r6,`5*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + #sqr_add_c2(a,4,3,c2,c3,c1); + $LD r5,`3*$BNSZ`(r4) + $LD r6,`4*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + $ST r10,`7*$BNSZ`(r3) #r[7]=c2; + #sqr_add_c(a,4,c3,c1,c2); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r0 + #sqr_add_c2(a,5,3,c3,c1,c2); + $LD r6,`5*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + #sqr_add_c2(a,6,2,c3,c1,c2); + $LD r5,`2*$BNSZ`(r4) + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + #sqr_add_c2(a,7,1,c3,c1,c2); + $LD r5,`1*$BNSZ`(r4) + $LD r6,`7*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + $ST r11,`8*$BNSZ`(r3) #r[8]=c3; + #sqr_add_c2(a,7,2,c1,c2,c3); + $LD r5,`2*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r0 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,6,3,c1,c2,c3); + $LD r5,`3*$BNSZ`(r4) + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + #sqr_add_c2(a,5,4,c1,c2,c3); + $LD r5,`4*$BNSZ`(r4) + $LD r6,`5*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + $ST r9,`9*$BNSZ`(r3) #r[9]=c1; + #sqr_add_c(a,5,c2,c3,c1); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r0 + #sqr_add_c2(a,6,4,c2,c3,c1); + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + #sqr_add_c2(a,7,3,c2,c3,c1); + $LD r5,`3*$BNSZ`(r4) + $LD r6,`7*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + $ST r10,`10*$BNSZ`(r3) #r[10]=c2; + #sqr_add_c2(a,7,4,c3,c1,c2); + $LD r5,`4*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r0 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + #sqr_add_c2(a,6,5,c3,c1,c2); + $LD r5,`5*$BNSZ`(r4) + $LD r6,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + addc r11,r7,r11 + adde r9,r8,r9 + addze r10,r10 + $ST r11,`11*$BNSZ`(r3) #r[11]=c3; + #sqr_add_c(a,6,c1,c2,c3); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r0 + #sqr_add_c2(a,7,5,c1,c2,c3) + $LD r6,`7*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + addc r9,r7,r9 + adde r10,r8,r10 + addze r11,r11 + $ST r9,`12*$BNSZ`(r3) #r[12]=c1; + + #sqr_add_c2(a,7,6,c2,c3,c1) + $LD r5,`6*$BNSZ`(r4) + $UMULL r7,r5,r6 + $UMULH r8,r5,r6 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r0 + addc r10,r7,r10 + adde r11,r8,r11 + addze r9,r9 + $ST r10,`13*$BNSZ`(r3) #r[13]=c2; + #sqr_add_c(a,7,c3,c1,c2); + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + addc r11,r7,r11 + adde r9,r8,r9 + $ST r11,`14*$BNSZ`(r3) #r[14]=c3; + $ST r9, `15*$BNSZ`(r3) #r[15]=c1; + + + blr + + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_mul_comba4" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_mul_comba4: +# +# This is an optimized version of the bn_mul_comba4 routine. +# +# void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +# r3 contains r +# r4 contains a +# r5 contains b +# r6, r7 are the 2 BN_ULONGs being multiplied. +# r8, r9 are the results of the 32x32 giving 64 multiply. +# r10, r11, r12 are the equivalents of c1, c2, and c3. +# + xor r0,r0,r0 #r0=0. Used in addze below. + #mul_add_c(a[0],b[0],c1,c2,c3); + $LD r6,`0*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r10,r6,r7 + $UMULH r11,r6,r7 + $ST r10,`0*$BNSZ`(r3) #r[0]=c1 + #mul_add_c(a[0],b[1],c2,c3,c1); + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r8,r11 + adde r12,r9,r0 + addze r10,r0 + #mul_add_c(a[1],b[0],c2,c3,c1); + $LD r6, `1*$BNSZ`(r4) + $LD r7, `0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r8,r11 + adde r12,r9,r12 + addze r10,r10 + $ST r11,`1*$BNSZ`(r3) #r[1]=c2 + #mul_add_c(a[2],b[0],c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r8,r12 + adde r10,r9,r10 + addze r11,r0 + #mul_add_c(a[1],b[1],c3,c1,c2); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r8,r12 + adde r10,r9,r10 + addze r11,r11 + #mul_add_c(a[0],b[2],c3,c1,c2); + $LD r6,`0*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r8,r12 + adde r10,r9,r10 + addze r11,r11 + $ST r12,`2*$BNSZ`(r3) #r[2]=c3 + #mul_add_c(a[0],b[3],c1,c2,c3); + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r8,r10 + adde r11,r9,r11 + addze r12,r0 + #mul_add_c(a[1],b[2],c1,c2,c3); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r8,r10 + adde r11,r9,r11 + addze r12,r12 + #mul_add_c(a[2],b[1],c1,c2,c3); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r8,r10 + adde r11,r9,r11 + addze r12,r12 + #mul_add_c(a[3],b[0],c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r8,r10 + adde r11,r9,r11 + addze r12,r12 + $ST r10,`3*$BNSZ`(r3) #r[3]=c1 + #mul_add_c(a[3],b[1],c2,c3,c1); + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r8,r11 + adde r12,r9,r12 + addze r10,r0 + #mul_add_c(a[2],b[2],c2,c3,c1); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r8,r11 + adde r12,r9,r12 + addze r10,r10 + #mul_add_c(a[1],b[3],c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r8,r11 + adde r12,r9,r12 + addze r10,r10 + $ST r11,`4*$BNSZ`(r3) #r[4]=c2 + #mul_add_c(a[2],b[3],c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r8,r12 + adde r10,r9,r10 + addze r11,r0 + #mul_add_c(a[3],b[2],c3,c1,c2); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r8,r12 + adde r10,r9,r10 + addze r11,r11 + $ST r12,`5*$BNSZ`(r3) #r[5]=c3 + #mul_add_c(a[3],b[3],c1,c2,c3); + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r8,r10 + adde r11,r9,r11 + + $ST r10,`6*$BNSZ`(r3) #r[6]=c1 + $ST r11,`7*$BNSZ`(r3) #r[7]=c2 + blr + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_mul_comba8" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_mul_comba8: +# +# Optimized version of the bn_mul_comba8 routine. +# +# void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) +# r3 contains r +# r4 contains a +# r5 contains b +# r6, r7 are the 2 BN_ULONGs being multiplied. +# r8, r9 are the results of the 32x32 giving 64 multiply. +# r10, r11, r12 are the equivalents of c1, c2, and c3. +# + xor r0,r0,r0 #r0=0. Used in addze below. + + #mul_add_c(a[0],b[0],c1,c2,c3); + $LD r6,`0*$BNSZ`(r4) #a[0] + $LD r7,`0*$BNSZ`(r5) #b[0] + $UMULL r10,r6,r7 + $UMULH r11,r6,r7 + $ST r10,`0*$BNSZ`(r3) #r[0]=c1; + #mul_add_c(a[0],b[1],c2,c3,c1); + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + addze r12,r9 # since we didnt set r12 to zero before. + addze r10,r0 + #mul_add_c(a[1],b[0],c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + $ST r11,`1*$BNSZ`(r3) #r[1]=c2; + #mul_add_c(a[2],b[0],c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r0 + #mul_add_c(a[1],b[1],c3,c1,c2); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[0],b[2],c3,c1,c2); + $LD r6,`0*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + $ST r12,`2*$BNSZ`(r3) #r[2]=c3; + #mul_add_c(a[0],b[3],c1,c2,c3); + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r0 + #mul_add_c(a[1],b[2],c1,c2,c3); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + + #mul_add_c(a[2],b[1],c1,c2,c3); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[3],b[0],c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + $ST r10,`3*$BNSZ`(r3) #r[3]=c1; + #mul_add_c(a[4],b[0],c2,c3,c1); + $LD r6,`4*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r0 + #mul_add_c(a[3],b[1],c2,c3,c1); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[2],b[2],c2,c3,c1); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[1],b[3],c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[0],b[4],c2,c3,c1); + $LD r6,`0*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + $ST r11,`4*$BNSZ`(r3) #r[4]=c2; + #mul_add_c(a[0],b[5],c3,c1,c2); + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r0 + #mul_add_c(a[1],b[4],c3,c1,c2); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[2],b[3],c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[3],b[2],c3,c1,c2); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[4],b[1],c3,c1,c2); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[5],b[0],c3,c1,c2); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + $ST r12,`5*$BNSZ`(r3) #r[5]=c3; + #mul_add_c(a[6],b[0],c1,c2,c3); + $LD r6,`6*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r0 + #mul_add_c(a[5],b[1],c1,c2,c3); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[4],b[2],c1,c2,c3); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[3],b[3],c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[2],b[4],c1,c2,c3); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[1],b[5],c1,c2,c3); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[0],b[6],c1,c2,c3); + $LD r6,`0*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + $ST r10,`6*$BNSZ`(r3) #r[6]=c1; + #mul_add_c(a[0],b[7],c2,c3,c1); + $LD r7,`7*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r0 + #mul_add_c(a[1],b[6],c2,c3,c1); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[2],b[5],c2,c3,c1); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[3],b[4],c2,c3,c1); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[4],b[3],c2,c3,c1); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[5],b[2],c2,c3,c1); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[6],b[1],c2,c3,c1); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[7],b[0],c2,c3,c1); + $LD r6,`7*$BNSZ`(r4) + $LD r7,`0*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + $ST r11,`7*$BNSZ`(r3) #r[7]=c2; + #mul_add_c(a[7],b[1],c3,c1,c2); + $LD r7,`1*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r0 + #mul_add_c(a[6],b[2],c3,c1,c2); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[5],b[3],c3,c1,c2); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[4],b[4],c3,c1,c2); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[3],b[5],c3,c1,c2); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[2],b[6],c3,c1,c2); + $LD r6,`2*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[1],b[7],c3,c1,c2); + $LD r6,`1*$BNSZ`(r4) + $LD r7,`7*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + $ST r12,`8*$BNSZ`(r3) #r[8]=c3; + #mul_add_c(a[2],b[7],c1,c2,c3); + $LD r6,`2*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r0 + #mul_add_c(a[3],b[6],c1,c2,c3); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[4],b[5],c1,c2,c3); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[5],b[4],c1,c2,c3); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[6],b[3],c1,c2,c3); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[7],b[2],c1,c2,c3); + $LD r6,`7*$BNSZ`(r4) + $LD r7,`2*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + $ST r10,`9*$BNSZ`(r3) #r[9]=c1; + #mul_add_c(a[7],b[3],c2,c3,c1); + $LD r7,`3*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r0 + #mul_add_c(a[6],b[4],c2,c3,c1); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[5],b[5],c2,c3,c1); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[4],b[6],c2,c3,c1); + $LD r6,`4*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + #mul_add_c(a[3],b[7],c2,c3,c1); + $LD r6,`3*$BNSZ`(r4) + $LD r7,`7*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + $ST r11,`10*$BNSZ`(r3) #r[10]=c2; + #mul_add_c(a[4],b[7],c3,c1,c2); + $LD r6,`4*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r0 + #mul_add_c(a[5],b[6],c3,c1,c2); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[6],b[5],c3,c1,c2); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + #mul_add_c(a[7],b[4],c3,c1,c2); + $LD r6,`7*$BNSZ`(r4) + $LD r7,`4*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + addze r11,r11 + $ST r12,`11*$BNSZ`(r3) #r[11]=c3; + #mul_add_c(a[7],b[5],c1,c2,c3); + $LD r7,`5*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r0 + #mul_add_c(a[6],b[6],c1,c2,c3); + $LD r6,`6*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + #mul_add_c(a[5],b[7],c1,c2,c3); + $LD r6,`5*$BNSZ`(r4) + $LD r7,`7*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r10,r10,r8 + adde r11,r11,r9 + addze r12,r12 + $ST r10,`12*$BNSZ`(r3) #r[12]=c1; + #mul_add_c(a[6],b[7],c2,c3,c1); + $LD r6,`6*$BNSZ`(r4) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r0 + #mul_add_c(a[7],b[6],c2,c3,c1); + $LD r6,`7*$BNSZ`(r4) + $LD r7,`6*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r11,r11,r8 + adde r12,r12,r9 + addze r10,r10 + $ST r11,`13*$BNSZ`(r3) #r[13]=c2; + #mul_add_c(a[7],b[7],c3,c1,c2); + $LD r7,`7*$BNSZ`(r5) + $UMULL r8,r6,r7 + $UMULH r9,r6,r7 + addc r12,r12,r8 + adde r10,r10,r9 + $ST r12,`14*$BNSZ`(r3) #r[14]=c3; + $ST r10,`15*$BNSZ`(r3) #r[15]=c1; + blr + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_sub_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# +# +.align 4 +.bn_sub_words: +# +# Handcoded version of bn_sub_words +# +#BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n) +# +# r3 = r +# r4 = a +# r5 = b +# r6 = n +# +# Note: No loop unrolling done since this is not a performance +# critical loop. + + xor r0,r0,r0 #set r0 = 0 +# +# check for r6 = 0 AND set carry bit. +# + subfc. r7,r0,r6 # If r6 is 0 then result is 0. + # if r6 > 0 then result !=0 + # In either case carry bit is set. + beq Lppcasm_sub_adios + addi r4,r4,-$BNSZ + addi r3,r3,-$BNSZ + addi r5,r5,-$BNSZ + mtctr r6 +Lppcasm_sub_mainloop: + $LDU r7,$BNSZ(r4) + $LDU r8,$BNSZ(r5) + subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8) + # if carry = 1 this is r7-r8. Else it + # is r7-r8 -1 as we need. + $STU r6,$BNSZ(r3) + bdnz- Lppcasm_sub_mainloop +Lppcasm_sub_adios: + subfze r3,r0 # if carry bit is set then r3 = 0 else -1 + andi. r3,r3,1 # keep only last bit. + blr + .long 0x00000000 + + +# +# NOTE: The following label name should be changed to +# "bn_add_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_add_words: +# +# Handcoded version of bn_add_words +# +#BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n) +# +# r3 = r +# r4 = a +# r5 = b +# r6 = n +# +# Note: No loop unrolling done since this is not a performance +# critical loop. + + xor r0,r0,r0 +# +# check for r6 = 0. Is this needed? +# + addic. r6,r6,0 #test r6 and clear carry bit. + beq Lppcasm_add_adios + addi r4,r4,-$BNSZ + addi r3,r3,-$BNSZ + addi r5,r5,-$BNSZ + mtctr r6 +Lppcasm_add_mainloop: + $LDU r7,$BNSZ(r4) + $LDU r8,$BNSZ(r5) + adde r8,r7,r8 + $STU r8,$BNSZ(r3) + bdnz- Lppcasm_add_mainloop +Lppcasm_add_adios: + addze r3,r0 #return carry bit. + blr + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_div_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_div_words: +# +# This is a cleaned up version of code generated by +# the AIX compiler. The only optimization is to use +# the PPC instruction to count leading zeros instead +# of call to num_bits_word. Since this was compiled +# only at level -O2 we can possibly squeeze it more? +# +# r3 = h +# r4 = l +# r5 = d + + $UCMPI 0,r5,0 # compare r5 and 0 + bne Lppcasm_div1 # proceed if d!=0 + li r3,-1 # d=0 return -1 + blr +Lppcasm_div1: + xor r0,r0,r0 #r0=0 + li r8,$BITS + $CNTLZ. r7,r5 #r7 = num leading 0s in d. + beq Lppcasm_div2 #proceed if no leading zeros + subf r8,r7,r8 #r8 = BN_num_bits_word(d) + $SHR. r9,r3,r8 #are there any bits above r8'th? + $TR 16,r9,r0 #if there're, signal to dump core... +Lppcasm_div2: + $UCMP 0,r3,r5 #h>=d? + blt Lppcasm_div3 #goto Lppcasm_div3 if not + subf r3,r5,r3 #h-=d ; +Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i + cmpi 0,0,r7,0 # is (i == 0)? + beq Lppcasm_div4 + $SHL r3,r3,r7 # h = (h<< i) + $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i) + $SHL r5,r5,r7 # d<<=i + or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i)) + $SHL r4,r4,r7 # l <<=i +Lppcasm_div4: + $SHRI r9,r5,`$BITS/2` # r9 = dh + # dl will be computed when needed + # as it saves registers. + li r6,2 #r6=2 + mtctr r6 #counter will be in count. +Lppcasm_divouterloop: + $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4) + $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4 + # compute here for innerloop. + $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh + bne Lppcasm_div5 # goto Lppcasm_div5 if not + + li r8,-1 + $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l + b Lppcasm_div6 +Lppcasm_div5: + $UDIV r8,r3,r9 #q = h/dh +Lppcasm_div6: + $UMULL r12,r9,r8 #th = q*dh + $CLRU r10,r5,`$BITS/2` #r10=dl + $UMULL r6,r8,r10 #tl = q*dl + +Lppcasm_divinnerloop: + subf r10,r12,r3 #t = h -th + $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of... + addic. r7,r7,0 #test if r7 == 0. used below. + # now want to compute + # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4) + # the following 2 instructions do that + $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4) + or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4) + $UCMP cr1,r6,r7 # compare (tl <= r7) + bne Lppcasm_divinnerexit + ble cr1,Lppcasm_divinnerexit + addi r8,r8,-1 #q-- + subf r12,r9,r12 #th -=dh + $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop. + subf r6,r10,r6 #tl -=dl + b Lppcasm_divinnerloop +Lppcasm_divinnerexit: + $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4) + $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h; + $UCMP cr1,r4,r11 # compare l and tl + add r12,r12,r10 # th+=t + bge cr1,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7 + addi r12,r12,1 # th++ +Lppcasm_div7: + subf r11,r11,r4 #r11=l-tl + $UCMP cr1,r3,r12 #compare h and th + bge cr1,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8 + addi r8,r8,-1 # q-- + add r3,r5,r3 # h+=d +Lppcasm_div8: + subf r12,r12,r3 #r12 = h-th + $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4 + # want to compute + # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2 + # the following 2 instructions will do this. + $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2. + $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3 + bdz Lppcasm_div9 #if (count==0) break ; + $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4 + b Lppcasm_divouterloop +Lppcasm_div9: + or r3,r8,r0 + blr + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_sqr_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# +.align 4 +.bn_sqr_words: +# +# Optimized version of bn_sqr_words +# +# void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n) +# +# r3 = r +# r4 = a +# r5 = n +# +# r6 = a[i]. +# r7,r8 = product. +# +# No unrolling done here. Not performance critical. + + addic. r5,r5,0 #test r5. + beq Lppcasm_sqr_adios + addi r4,r4,-$BNSZ + addi r3,r3,-$BNSZ + mtctr r5 +Lppcasm_sqr_mainloop: + #sqr(r[0],r[1],a[0]); + $LDU r6,$BNSZ(r4) + $UMULL r7,r6,r6 + $UMULH r8,r6,r6 + $STU r7,$BNSZ(r3) + $STU r8,$BNSZ(r3) + bdnz- Lppcasm_sqr_mainloop +Lppcasm_sqr_adios: + blr + .long 0x00000000 + + +# +# NOTE: The following label name should be changed to +# "bn_mul_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_mul_words: +# +# BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) +# +# r3 = rp +# r4 = ap +# r5 = num +# r6 = w + xor r0,r0,r0 + xor r12,r12,r12 # used for carry + rlwinm. r7,r5,30,2,31 # num >> 2 + beq Lppcasm_mw_REM + mtctr r7 +Lppcasm_mw_LOOP: + #mul(rp[0],ap[0],w,c1); + $LD r8,`0*$BNSZ`(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + addc r9,r9,r12 + #addze r10,r10 #carry is NOT ignored. + #will be taken care of + #in second spin below + #using adde. + $ST r9,`0*$BNSZ`(r3) + #mul(rp[1],ap[1],w,c1); + $LD r8,`1*$BNSZ`(r4) + $UMULL r11,r6,r8 + $UMULH r12,r6,r8 + adde r11,r11,r10 + #addze r12,r12 + $ST r11,`1*$BNSZ`(r3) + #mul(rp[2],ap[2],w,c1); + $LD r8,`2*$BNSZ`(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + adde r9,r9,r12 + #addze r10,r10 + $ST r9,`2*$BNSZ`(r3) + #mul_add(rp[3],ap[3],w,c1); + $LD r8,`3*$BNSZ`(r4) + $UMULL r11,r6,r8 + $UMULH r12,r6,r8 + adde r11,r11,r10 + addze r12,r12 #this spin we collect carry into + #r12 + $ST r11,`3*$BNSZ`(r3) + + addi r3,r3,`4*$BNSZ` + addi r4,r4,`4*$BNSZ` + bdnz- Lppcasm_mw_LOOP + +Lppcasm_mw_REM: + andi. r5,r5,0x3 + beq Lppcasm_mw_OVER + #mul(rp[0],ap[0],w,c1); + $LD r8,`0*$BNSZ`(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + addc r9,r9,r12 + addze r10,r10 + $ST r9,`0*$BNSZ`(r3) + addi r12,r10,0 + + addi r5,r5,-1 + cmpli 0,0,r5,0 + beq Lppcasm_mw_OVER + + + #mul(rp[1],ap[1],w,c1); + $LD r8,`1*$BNSZ`(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + addc r9,r9,r12 + addze r10,r10 + $ST r9,`1*$BNSZ`(r3) + addi r12,r10,0 + + addi r5,r5,-1 + cmpli 0,0,r5,0 + beq Lppcasm_mw_OVER + + #mul_add(rp[2],ap[2],w,c1); + $LD r8,`2*$BNSZ`(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + addc r9,r9,r12 + addze r10,r10 + $ST r9,`2*$BNSZ`(r3) + addi r12,r10,0 + +Lppcasm_mw_OVER: + addi r3,r12,0 + blr + .long 0x00000000 + +# +# NOTE: The following label name should be changed to +# "bn_mul_add_words" i.e. remove the first dot +# for the gcc compiler. This should be automatically +# done in the build +# + +.align 4 +.bn_mul_add_words: +# +# BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) +# +# r3 = rp +# r4 = ap +# r5 = num +# r6 = w +# +# empirical evidence suggests that unrolled version performs best!! +# + xor r0,r0,r0 #r0 = 0 + xor r12,r12,r12 #r12 = 0 . used for carry + rlwinm. r7,r5,30,2,31 # num >> 2 + beq Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover + mtctr r7 +Lppcasm_maw_mainloop: + #mul_add(rp[0],ap[0],w,c1); + $LD r8,`0*$BNSZ`(r4) + $LD r11,`0*$BNSZ`(r3) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + addc r9,r9,r12 #r12 is carry. + addze r10,r10 + addc r9,r9,r11 + #addze r10,r10 + #the above instruction addze + #is NOT needed. Carry will NOT + #be ignored. It's not affected + #by multiply and will be collected + #in the next spin + $ST r9,`0*$BNSZ`(r3) + + #mul_add(rp[1],ap[1],w,c1); + $LD r8,`1*$BNSZ`(r4) + $LD r9,`1*$BNSZ`(r3) + $UMULL r11,r6,r8 + $UMULH r12,r6,r8 + adde r11,r11,r10 #r10 is carry. + addze r12,r12 + addc r11,r11,r9 + #addze r12,r12 + $ST r11,`1*$BNSZ`(r3) + + #mul_add(rp[2],ap[2],w,c1); + $LD r8,`2*$BNSZ`(r4) + $UMULL r9,r6,r8 + $LD r11,`2*$BNSZ`(r3) + $UMULH r10,r6,r8 + adde r9,r9,r12 + addze r10,r10 + addc r9,r9,r11 + #addze r10,r10 + $ST r9,`2*$BNSZ`(r3) + + #mul_add(rp[3],ap[3],w,c1); + $LD r8,`3*$BNSZ`(r4) + $UMULL r11,r6,r8 + $LD r9,`3*$BNSZ`(r3) + $UMULH r12,r6,r8 + adde r11,r11,r10 + addze r12,r12 + addc r11,r11,r9 + addze r12,r12 + $ST r11,`3*$BNSZ`(r3) + addi r3,r3,`4*$BNSZ` + addi r4,r4,`4*$BNSZ` + bdnz- Lppcasm_maw_mainloop + +Lppcasm_maw_leftover: + andi. r5,r5,0x3 + beq Lppcasm_maw_adios + addi r3,r3,-$BNSZ + addi r4,r4,-$BNSZ + #mul_add(rp[0],ap[0],w,c1); + mtctr r5 + $LDU r8,$BNSZ(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + $LDU r11,$BNSZ(r3) + addc r9,r9,r11 + addze r10,r10 + addc r9,r9,r12 + addze r12,r10 + $ST r9,0(r3) + + bdz Lppcasm_maw_adios + #mul_add(rp[1],ap[1],w,c1); + $LDU r8,$BNSZ(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + $LDU r11,$BNSZ(r3) + addc r9,r9,r11 + addze r10,r10 + addc r9,r9,r12 + addze r12,r10 + $ST r9,0(r3) + + bdz Lppcasm_maw_adios + #mul_add(rp[2],ap[2],w,c1); + $LDU r8,$BNSZ(r4) + $UMULL r9,r6,r8 + $UMULH r10,r6,r8 + $LDU r11,$BNSZ(r3) + addc r9,r9,r11 + addze r10,r10 + addc r9,r9,r12 + addze r12,r10 + $ST r9,0(r3) + +Lppcasm_maw_adios: + addi r3,r12,0 + blr + .long 0x00000000 + .align 4 +EOF +$data =~ s/\`([^\`]*)\`/eval $1/gem; +print $data; +close STDOUT; diff --git a/openssl/crypto/bn/asm/ppc64-mont.pl b/openssl/crypto/bn/asm/ppc64-mont.pl new file mode 100644 index 00000000..3449b358 --- /dev/null +++ b/openssl/crypto/bn/asm/ppc64-mont.pl @@ -0,0 +1,918 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# December 2007 + +# The reason for undertaken effort is basically following. Even though +# Power 6 CPU operates at incredible 4.7GHz clock frequency, its PKI +# performance was observed to be less than impressive, essentially as +# fast as 1.8GHz PPC970, or 2.6 times(!) slower than one would hope. +# Well, it's not surprising that IBM had to make some sacrifices to +# boost the clock frequency that much, but no overall improvement? +# Having observed how much difference did switching to FPU make on +# UltraSPARC, playing same stunt on Power 6 appeared appropriate... +# Unfortunately the resulting performance improvement is not as +# impressive, ~30%, and in absolute terms is still very far from what +# one would expect from 4.7GHz CPU. There is a chance that I'm doing +# something wrong, but in the lack of assembler level micro-profiling +# data or at least decent platform guide I can't tell... Or better +# results might be achieved with VMX... Anyway, this module provides +# *worse* performance on other PowerPC implementations, ~40-15% slower +# on PPC970 depending on key length and ~40% slower on Power 5 for all +# key lengths. As it's obviously inappropriate as "best all-round" +# alternative, it has to be complemented with run-time CPU family +# detection. Oh! It should also be noted that unlike other PowerPC +# implementation IALU ppc-mont.pl module performs *suboptimaly* on +# >=1024-bit key lengths on Power 6. It should also be noted that +# *everything* said so far applies to 64-bit builds! As far as 32-bit +# application executed on 64-bit CPU goes, this module is likely to +# become preferred choice, because it's easy to adapt it for such +# case and *is* faster than 32-bit ppc-mont.pl on *all* processors. + +# February 2008 + +# Micro-profiling assisted optimization results in ~15% improvement +# over original ppc64-mont.pl version, or overall ~50% improvement +# over ppc.pl module on Power 6. If compared to ppc-mont.pl on same +# Power 6 CPU, this module is 5-150% faster depending on key length, +# [hereafter] more for longer keys. But if compared to ppc-mont.pl +# on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive +# in absolute terms, but it's apparently the way Power 6 is... + +$flavour = shift; + +if ($flavour =~ /32/) { + $SIZE_T=4; + $RZONE= 224; + $FRAME= $SIZE_T*12+8*12; + $fname= "bn_mul_mont_ppc64"; + + $STUX= "stwux"; # store indexed and update + $PUSH= "stw"; + $POP= "lwz"; + die "not implemented yet"; +} elsif ($flavour =~ /64/) { + $SIZE_T=8; + $RZONE= 288; + $FRAME= $SIZE_T*12+8*12; + $fname= "bn_mul_mont"; + + # same as above, but 64-bit mnemonics... + $STUX= "stdux"; # store indexed and update + $PUSH= "std"; + $POP= "ld"; +} else { die "nonsense $flavour"; } + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or +die "can't locate ppc-xlate.pl"; + +open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; + +$FRAME=($FRAME+63)&~63; +$TRANSFER=16*8; + +$carry="r0"; +$sp="r1"; +$toc="r2"; +$rp="r3"; $ovf="r3"; +$ap="r4"; +$bp="r5"; +$np="r6"; +$n0="r7"; +$num="r8"; +$rp="r9"; # $rp is reassigned +$tp="r10"; +$j="r11"; +$i="r12"; +# non-volatile registers +$nap_d="r14"; # interleaved ap and np in double format +$a0="r15"; # ap[0] +$t0="r16"; # temporary registers +$t1="r17"; +$t2="r18"; +$t3="r19"; +$t4="r20"; +$t5="r21"; +$t6="r22"; +$t7="r23"; + +# PPC offers enough register bank capacity to unroll inner loops twice +# +# ..A3A2A1A0 +# dcba +# ----------- +# A0a +# A0b +# A0c +# A0d +# A1a +# A1b +# A1c +# A1d +# A2a +# A2b +# A2c +# A2d +# A3a +# A3b +# A3c +# A3d +# ..a +# ..b +# +$ba="f0"; $bb="f1"; $bc="f2"; $bd="f3"; +$na="f4"; $nb="f5"; $nc="f6"; $nd="f7"; +$dota="f8"; $dotb="f9"; +$A0="f10"; $A1="f11"; $A2="f12"; $A3="f13"; +$N0="f14"; $N1="f15"; $N2="f16"; $N3="f17"; +$T0a="f18"; $T0b="f19"; +$T1a="f20"; $T1b="f21"; +$T2a="f22"; $T2b="f23"; +$T3a="f24"; $T3b="f25"; + +# sp----------->+-------------------------------+ +# | saved sp | +# +-------------------------------+ +# | | +# +-------------------------------+ +# | 10 saved gpr, r14-r23 | +# . . +# . . +# +12*size_t +-------------------------------+ +# | 12 saved fpr, f14-f25 | +# . . +# . . +# +12*8 +-------------------------------+ +# | padding to 64 byte boundary | +# . . +# +X +-------------------------------+ +# | 16 gpr<->fpr transfer zone | +# . . +# . . +# +16*8 +-------------------------------+ +# | __int64 tmp[-1] | +# +-------------------------------+ +# | __int64 tmp[num] | +# . . +# . . +# . . +# +(num+1)*8 +-------------------------------+ +# | padding to 64 byte boundary | +# . . +# +X +-------------------------------+ +# | double nap_d[4*num] | +# . . +# . . +# . . +# +-------------------------------+ + +$code=<<___; +.machine "any" +.text + +.globl .$fname +.align 5 +.$fname: + cmpwi $num,4 + mr $rp,r3 ; $rp is reassigned + li r3,0 ; possible "not handled" return code + bltlr- + andi. r0,$num,1 ; $num has to be even + bnelr- + + slwi $num,$num,3 ; num*=8 + li $i,-4096 + slwi $tp,$num,2 ; place for {an}p_{lh}[num], i.e. 4*num + add $tp,$tp,$num ; place for tp[num+1] + addi $tp,$tp,`$FRAME+$TRANSFER+8+64+$RZONE` + subf $tp,$tp,$sp ; $sp-$tp + and $tp,$tp,$i ; minimize TLB usage + subf $tp,$sp,$tp ; $tp-$sp + $STUX $sp,$sp,$tp ; alloca + + $PUSH r14,`2*$SIZE_T`($sp) + $PUSH r15,`3*$SIZE_T`($sp) + $PUSH r16,`4*$SIZE_T`($sp) + $PUSH r17,`5*$SIZE_T`($sp) + $PUSH r18,`6*$SIZE_T`($sp) + $PUSH r19,`7*$SIZE_T`($sp) + $PUSH r20,`8*$SIZE_T`($sp) + $PUSH r21,`9*$SIZE_T`($sp) + $PUSH r22,`10*$SIZE_T`($sp) + $PUSH r23,`11*$SIZE_T`($sp) + stfd f14,`12*$SIZE_T+0`($sp) + stfd f15,`12*$SIZE_T+8`($sp) + stfd f16,`12*$SIZE_T+16`($sp) + stfd f17,`12*$SIZE_T+24`($sp) + stfd f18,`12*$SIZE_T+32`($sp) + stfd f19,`12*$SIZE_T+40`($sp) + stfd f20,`12*$SIZE_T+48`($sp) + stfd f21,`12*$SIZE_T+56`($sp) + stfd f22,`12*$SIZE_T+64`($sp) + stfd f23,`12*$SIZE_T+72`($sp) + stfd f24,`12*$SIZE_T+80`($sp) + stfd f25,`12*$SIZE_T+88`($sp) + + ld $a0,0($ap) ; pull ap[0] value + ld $n0,0($n0) ; pull n0[0] value + ld $t3,0($bp) ; bp[0] + + addi $tp,$sp,`$FRAME+$TRANSFER+8+64` + li $i,-64 + add $nap_d,$tp,$num + and $nap_d,$nap_d,$i ; align to 64 bytes + + mulld $t7,$a0,$t3 ; ap[0]*bp[0] + ; nap_d is off by 1, because it's used with stfdu/lfdu + addi $nap_d,$nap_d,-8 + srwi $j,$num,`3+1` ; counter register, num/2 + mulld $t7,$t7,$n0 ; tp[0]*n0 + addi $j,$j,-1 + addi $tp,$sp,`$FRAME+$TRANSFER-8` + li $carry,0 + mtctr $j + + ; transfer bp[0] to FPU as 4x16-bit values + extrdi $t0,$t3,16,48 + extrdi $t1,$t3,16,32 + extrdi $t2,$t3,16,16 + extrdi $t3,$t3,16,0 + std $t0,`$FRAME+0`($sp) + std $t1,`$FRAME+8`($sp) + std $t2,`$FRAME+16`($sp) + std $t3,`$FRAME+24`($sp) + ; transfer (ap[0]*bp[0])*n0 to FPU as 4x16-bit values + extrdi $t4,$t7,16,48 + extrdi $t5,$t7,16,32 + extrdi $t6,$t7,16,16 + extrdi $t7,$t7,16,0 + std $t4,`$FRAME+32`($sp) + std $t5,`$FRAME+40`($sp) + std $t6,`$FRAME+48`($sp) + std $t7,`$FRAME+56`($sp) + lwz $t0,4($ap) ; load a[j] as 32-bit word pair + lwz $t1,0($ap) + lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair + lwz $t3,8($ap) + lwz $t4,4($np) ; load n[j] as 32-bit word pair + lwz $t5,0($np) + lwz $t6,12($np) ; load n[j+1] as 32-bit word pair + lwz $t7,8($np) + lfd $ba,`$FRAME+0`($sp) + lfd $bb,`$FRAME+8`($sp) + lfd $bc,`$FRAME+16`($sp) + lfd $bd,`$FRAME+24`($sp) + lfd $na,`$FRAME+32`($sp) + lfd $nb,`$FRAME+40`($sp) + lfd $nc,`$FRAME+48`($sp) + lfd $nd,`$FRAME+56`($sp) + std $t0,`$FRAME+64`($sp) + std $t1,`$FRAME+72`($sp) + std $t2,`$FRAME+80`($sp) + std $t3,`$FRAME+88`($sp) + std $t4,`$FRAME+96`($sp) + std $t5,`$FRAME+104`($sp) + std $t6,`$FRAME+112`($sp) + std $t7,`$FRAME+120`($sp) + fcfid $ba,$ba + fcfid $bb,$bb + fcfid $bc,$bc + fcfid $bd,$bd + fcfid $na,$na + fcfid $nb,$nb + fcfid $nc,$nc + fcfid $nd,$nd + + lfd $A0,`$FRAME+64`($sp) + lfd $A1,`$FRAME+72`($sp) + lfd $A2,`$FRAME+80`($sp) + lfd $A3,`$FRAME+88`($sp) + lfd $N0,`$FRAME+96`($sp) + lfd $N1,`$FRAME+104`($sp) + lfd $N2,`$FRAME+112`($sp) + lfd $N3,`$FRAME+120`($sp) + fcfid $A0,$A0 + fcfid $A1,$A1 + fcfid $A2,$A2 + fcfid $A3,$A3 + fcfid $N0,$N0 + fcfid $N1,$N1 + fcfid $N2,$N2 + fcfid $N3,$N3 + addi $ap,$ap,16 + addi $np,$np,16 + + fmul $T1a,$A1,$ba + fmul $T1b,$A1,$bb + stfd $A0,8($nap_d) ; save a[j] in double format + stfd $A1,16($nap_d) + fmul $T2a,$A2,$ba + fmul $T2b,$A2,$bb + stfd $A2,24($nap_d) ; save a[j+1] in double format + stfd $A3,32($nap_d) + fmul $T3a,$A3,$ba + fmul $T3b,$A3,$bb + stfd $N0,40($nap_d) ; save n[j] in double format + stfd $N1,48($nap_d) + fmul $T0a,$A0,$ba + fmul $T0b,$A0,$bb + stfd $N2,56($nap_d) ; save n[j+1] in double format + stfdu $N3,64($nap_d) + + fmadd $T1a,$A0,$bc,$T1a + fmadd $T1b,$A0,$bd,$T1b + fmadd $T2a,$A1,$bc,$T2a + fmadd $T2b,$A1,$bd,$T2b + fmadd $T3a,$A2,$bc,$T3a + fmadd $T3b,$A2,$bd,$T3b + fmul $dota,$A3,$bc + fmul $dotb,$A3,$bd + + fmadd $T1a,$N1,$na,$T1a + fmadd $T1b,$N1,$nb,$T1b + fmadd $T2a,$N2,$na,$T2a + fmadd $T2b,$N2,$nb,$T2b + fmadd $T3a,$N3,$na,$T3a + fmadd $T3b,$N3,$nb,$T3b + fmadd $T0a,$N0,$na,$T0a + fmadd $T0b,$N0,$nb,$T0b + + fmadd $T1a,$N0,$nc,$T1a + fmadd $T1b,$N0,$nd,$T1b + fmadd $T2a,$N1,$nc,$T2a + fmadd $T2b,$N1,$nd,$T2b + fmadd $T3a,$N2,$nc,$T3a + fmadd $T3b,$N2,$nd,$T3b + fmadd $dota,$N3,$nc,$dota + fmadd $dotb,$N3,$nd,$dotb + + fctid $T0a,$T0a + fctid $T0b,$T0b + fctid $T1a,$T1a + fctid $T1b,$T1b + fctid $T2a,$T2a + fctid $T2b,$T2b + fctid $T3a,$T3a + fctid $T3b,$T3b + + stfd $T0a,`$FRAME+0`($sp) + stfd $T0b,`$FRAME+8`($sp) + stfd $T1a,`$FRAME+16`($sp) + stfd $T1b,`$FRAME+24`($sp) + stfd $T2a,`$FRAME+32`($sp) + stfd $T2b,`$FRAME+40`($sp) + stfd $T3a,`$FRAME+48`($sp) + stfd $T3b,`$FRAME+56`($sp) + +.align 5 +L1st: + lwz $t0,4($ap) ; load a[j] as 32-bit word pair + lwz $t1,0($ap) + lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair + lwz $t3,8($ap) + lwz $t4,4($np) ; load n[j] as 32-bit word pair + lwz $t5,0($np) + lwz $t6,12($np) ; load n[j+1] as 32-bit word pair + lwz $t7,8($np) + std $t0,`$FRAME+64`($sp) + std $t1,`$FRAME+72`($sp) + std $t2,`$FRAME+80`($sp) + std $t3,`$FRAME+88`($sp) + std $t4,`$FRAME+96`($sp) + std $t5,`$FRAME+104`($sp) + std $t6,`$FRAME+112`($sp) + std $t7,`$FRAME+120`($sp) + ld $t0,`$FRAME+0`($sp) + ld $t1,`$FRAME+8`($sp) + ld $t2,`$FRAME+16`($sp) + ld $t3,`$FRAME+24`($sp) + ld $t4,`$FRAME+32`($sp) + ld $t5,`$FRAME+40`($sp) + ld $t6,`$FRAME+48`($sp) + ld $t7,`$FRAME+56`($sp) + lfd $A0,`$FRAME+64`($sp) + lfd $A1,`$FRAME+72`($sp) + lfd $A2,`$FRAME+80`($sp) + lfd $A3,`$FRAME+88`($sp) + lfd $N0,`$FRAME+96`($sp) + lfd $N1,`$FRAME+104`($sp) + lfd $N2,`$FRAME+112`($sp) + lfd $N3,`$FRAME+120`($sp) + fcfid $A0,$A0 + fcfid $A1,$A1 + fcfid $A2,$A2 + fcfid $A3,$A3 + fcfid $N0,$N0 + fcfid $N1,$N1 + fcfid $N2,$N2 + fcfid $N3,$N3 + addi $ap,$ap,16 + addi $np,$np,16 + + fmul $T1a,$A1,$ba + fmul $T1b,$A1,$bb + fmul $T2a,$A2,$ba + fmul $T2b,$A2,$bb + stfd $A0,8($nap_d) ; save a[j] in double format + stfd $A1,16($nap_d) + fmul $T3a,$A3,$ba + fmul $T3b,$A3,$bb + fmadd $T0a,$A0,$ba,$dota + fmadd $T0b,$A0,$bb,$dotb + stfd $A2,24($nap_d) ; save a[j+1] in double format + stfd $A3,32($nap_d) + + fmadd $T1a,$A0,$bc,$T1a + fmadd $T1b,$A0,$bd,$T1b + fmadd $T2a,$A1,$bc,$T2a + fmadd $T2b,$A1,$bd,$T2b + stfd $N0,40($nap_d) ; save n[j] in double format + stfd $N1,48($nap_d) + fmadd $T3a,$A2,$bc,$T3a + fmadd $T3b,$A2,$bd,$T3b + add $t0,$t0,$carry ; can not overflow + fmul $dota,$A3,$bc + fmul $dotb,$A3,$bd + stfd $N2,56($nap_d) ; save n[j+1] in double format + stfdu $N3,64($nap_d) + srdi $carry,$t0,16 + add $t1,$t1,$carry + srdi $carry,$t1,16 + + fmadd $T1a,$N1,$na,$T1a + fmadd $T1b,$N1,$nb,$T1b + insrdi $t0,$t1,16,32 + fmadd $T2a,$N2,$na,$T2a + fmadd $T2b,$N2,$nb,$T2b + add $t2,$t2,$carry + fmadd $T3a,$N3,$na,$T3a + fmadd $T3b,$N3,$nb,$T3b + srdi $carry,$t2,16 + fmadd $T0a,$N0,$na,$T0a + fmadd $T0b,$N0,$nb,$T0b + insrdi $t0,$t2,16,16 + add $t3,$t3,$carry + srdi $carry,$t3,16 + + fmadd $T1a,$N0,$nc,$T1a + fmadd $T1b,$N0,$nd,$T1b + insrdi $t0,$t3,16,0 ; 0..63 bits + fmadd $T2a,$N1,$nc,$T2a + fmadd $T2b,$N1,$nd,$T2b + add $t4,$t4,$carry + fmadd $T3a,$N2,$nc,$T3a + fmadd $T3b,$N2,$nd,$T3b + srdi $carry,$t4,16 + fmadd $dota,$N3,$nc,$dota + fmadd $dotb,$N3,$nd,$dotb + add $t5,$t5,$carry + srdi $carry,$t5,16 + insrdi $t4,$t5,16,32 + + fctid $T0a,$T0a + fctid $T0b,$T0b + add $t6,$t6,$carry + fctid $T1a,$T1a + fctid $T1b,$T1b + srdi $carry,$t6,16 + fctid $T2a,$T2a + fctid $T2b,$T2b + insrdi $t4,$t6,16,16 + fctid $T3a,$T3a + fctid $T3b,$T3b + add $t7,$t7,$carry + insrdi $t4,$t7,16,0 ; 64..127 bits + srdi $carry,$t7,16 ; upper 33 bits + + stfd $T0a,`$FRAME+0`($sp) + stfd $T0b,`$FRAME+8`($sp) + stfd $T1a,`$FRAME+16`($sp) + stfd $T1b,`$FRAME+24`($sp) + stfd $T2a,`$FRAME+32`($sp) + stfd $T2b,`$FRAME+40`($sp) + stfd $T3a,`$FRAME+48`($sp) + stfd $T3b,`$FRAME+56`($sp) + std $t0,8($tp) ; tp[j-1] + stdu $t4,16($tp) ; tp[j] + bdnz- L1st + + fctid $dota,$dota + fctid $dotb,$dotb + + ld $t0,`$FRAME+0`($sp) + ld $t1,`$FRAME+8`($sp) + ld $t2,`$FRAME+16`($sp) + ld $t3,`$FRAME+24`($sp) + ld $t4,`$FRAME+32`($sp) + ld $t5,`$FRAME+40`($sp) + ld $t6,`$FRAME+48`($sp) + ld $t7,`$FRAME+56`($sp) + stfd $dota,`$FRAME+64`($sp) + stfd $dotb,`$FRAME+72`($sp) + + add $t0,$t0,$carry ; can not overflow + srdi $carry,$t0,16 + add $t1,$t1,$carry + srdi $carry,$t1,16 + insrdi $t0,$t1,16,32 + add $t2,$t2,$carry + srdi $carry,$t2,16 + insrdi $t0,$t2,16,16 + add $t3,$t3,$carry + srdi $carry,$t3,16 + insrdi $t0,$t3,16,0 ; 0..63 bits + add $t4,$t4,$carry + srdi $carry,$t4,16 + add $t5,$t5,$carry + srdi $carry,$t5,16 + insrdi $t4,$t5,16,32 + add $t6,$t6,$carry + srdi $carry,$t6,16 + insrdi $t4,$t6,16,16 + add $t7,$t7,$carry + insrdi $t4,$t7,16,0 ; 64..127 bits + srdi $carry,$t7,16 ; upper 33 bits + ld $t6,`$FRAME+64`($sp) + ld $t7,`$FRAME+72`($sp) + + std $t0,8($tp) ; tp[j-1] + stdu $t4,16($tp) ; tp[j] + + add $t6,$t6,$carry ; can not overflow + srdi $carry,$t6,16 + add $t7,$t7,$carry + insrdi $t6,$t7,48,0 + srdi $ovf,$t7,48 + std $t6,8($tp) ; tp[num-1] + + slwi $t7,$num,2 + subf $nap_d,$t7,$nap_d ; rewind pointer + + li $i,8 ; i=1 +.align 5 +Louter: + ldx $t3,$bp,$i ; bp[i] + ld $t6,`$FRAME+$TRANSFER+8`($sp) ; tp[0] + mulld $t7,$a0,$t3 ; ap[0]*bp[i] + + addi $tp,$sp,`$FRAME+$TRANSFER` + add $t7,$t7,$t6 ; ap[0]*bp[i]+tp[0] + li $carry,0 + mulld $t7,$t7,$n0 ; tp[0]*n0 + mtctr $j + + ; transfer bp[i] to FPU as 4x16-bit values + extrdi $t0,$t3,16,48 + extrdi $t1,$t3,16,32 + extrdi $t2,$t3,16,16 + extrdi $t3,$t3,16,0 + std $t0,`$FRAME+0`($sp) + std $t1,`$FRAME+8`($sp) + std $t2,`$FRAME+16`($sp) + std $t3,`$FRAME+24`($sp) + ; transfer (ap[0]*bp[i]+tp[0])*n0 to FPU as 4x16-bit values + extrdi $t4,$t7,16,48 + extrdi $t5,$t7,16,32 + extrdi $t6,$t7,16,16 + extrdi $t7,$t7,16,0 + std $t4,`$FRAME+32`($sp) + std $t5,`$FRAME+40`($sp) + std $t6,`$FRAME+48`($sp) + std $t7,`$FRAME+56`($sp) + + lfd $A0,8($nap_d) ; load a[j] in double format + lfd $A1,16($nap_d) + lfd $A2,24($nap_d) ; load a[j+1] in double format + lfd $A3,32($nap_d) + lfd $N0,40($nap_d) ; load n[j] in double format + lfd $N1,48($nap_d) + lfd $N2,56($nap_d) ; load n[j+1] in double format + lfdu $N3,64($nap_d) + + lfd $ba,`$FRAME+0`($sp) + lfd $bb,`$FRAME+8`($sp) + lfd $bc,`$FRAME+16`($sp) + lfd $bd,`$FRAME+24`($sp) + lfd $na,`$FRAME+32`($sp) + lfd $nb,`$FRAME+40`($sp) + lfd $nc,`$FRAME+48`($sp) + lfd $nd,`$FRAME+56`($sp) + + fcfid $ba,$ba + fcfid $bb,$bb + fcfid $bc,$bc + fcfid $bd,$bd + fcfid $na,$na + fcfid $nb,$nb + fcfid $nc,$nc + fcfid $nd,$nd + + fmul $T1a,$A1,$ba + fmul $T1b,$A1,$bb + fmul $T2a,$A2,$ba + fmul $T2b,$A2,$bb + fmul $T3a,$A3,$ba + fmul $T3b,$A3,$bb + fmul $T0a,$A0,$ba + fmul $T0b,$A0,$bb + + fmadd $T1a,$A0,$bc,$T1a + fmadd $T1b,$A0,$bd,$T1b + fmadd $T2a,$A1,$bc,$T2a + fmadd $T2b,$A1,$bd,$T2b + fmadd $T3a,$A2,$bc,$T3a + fmadd $T3b,$A2,$bd,$T3b + fmul $dota,$A3,$bc + fmul $dotb,$A3,$bd + + fmadd $T1a,$N1,$na,$T1a + fmadd $T1b,$N1,$nb,$T1b + lfd $A0,8($nap_d) ; load a[j] in double format + lfd $A1,16($nap_d) + fmadd $T2a,$N2,$na,$T2a + fmadd $T2b,$N2,$nb,$T2b + lfd $A2,24($nap_d) ; load a[j+1] in double format + lfd $A3,32($nap_d) + fmadd $T3a,$N3,$na,$T3a + fmadd $T3b,$N3,$nb,$T3b + fmadd $T0a,$N0,$na,$T0a + fmadd $T0b,$N0,$nb,$T0b + + fmadd $T1a,$N0,$nc,$T1a + fmadd $T1b,$N0,$nd,$T1b + fmadd $T2a,$N1,$nc,$T2a + fmadd $T2b,$N1,$nd,$T2b + fmadd $T3a,$N2,$nc,$T3a + fmadd $T3b,$N2,$nd,$T3b + fmadd $dota,$N3,$nc,$dota + fmadd $dotb,$N3,$nd,$dotb + + fctid $T0a,$T0a + fctid $T0b,$T0b + fctid $T1a,$T1a + fctid $T1b,$T1b + fctid $T2a,$T2a + fctid $T2b,$T2b + fctid $T3a,$T3a + fctid $T3b,$T3b + + stfd $T0a,`$FRAME+0`($sp) + stfd $T0b,`$FRAME+8`($sp) + stfd $T1a,`$FRAME+16`($sp) + stfd $T1b,`$FRAME+24`($sp) + stfd $T2a,`$FRAME+32`($sp) + stfd $T2b,`$FRAME+40`($sp) + stfd $T3a,`$FRAME+48`($sp) + stfd $T3b,`$FRAME+56`($sp) + +.align 5 +Linner: + fmul $T1a,$A1,$ba + fmul $T1b,$A1,$bb + fmul $T2a,$A2,$ba + fmul $T2b,$A2,$bb + lfd $N0,40($nap_d) ; load n[j] in double format + lfd $N1,48($nap_d) + fmul $T3a,$A3,$ba + fmul $T3b,$A3,$bb + fmadd $T0a,$A0,$ba,$dota + fmadd $T0b,$A0,$bb,$dotb + lfd $N2,56($nap_d) ; load n[j+1] in double format + lfdu $N3,64($nap_d) + + fmadd $T1a,$A0,$bc,$T1a + fmadd $T1b,$A0,$bd,$T1b + fmadd $T2a,$A1,$bc,$T2a + fmadd $T2b,$A1,$bd,$T2b + lfd $A0,8($nap_d) ; load a[j] in double format + lfd $A1,16($nap_d) + fmadd $T3a,$A2,$bc,$T3a + fmadd $T3b,$A2,$bd,$T3b + fmul $dota,$A3,$bc + fmul $dotb,$A3,$bd + lfd $A2,24($nap_d) ; load a[j+1] in double format + lfd $A3,32($nap_d) + + fmadd $T1a,$N1,$na,$T1a + fmadd $T1b,$N1,$nb,$T1b + ld $t0,`$FRAME+0`($sp) + ld $t1,`$FRAME+8`($sp) + fmadd $T2a,$N2,$na,$T2a + fmadd $T2b,$N2,$nb,$T2b + ld $t2,`$FRAME+16`($sp) + ld $t3,`$FRAME+24`($sp) + fmadd $T3a,$N3,$na,$T3a + fmadd $T3b,$N3,$nb,$T3b + add $t0,$t0,$carry ; can not overflow + ld $t4,`$FRAME+32`($sp) + ld $t5,`$FRAME+40`($sp) + fmadd $T0a,$N0,$na,$T0a + fmadd $T0b,$N0,$nb,$T0b + srdi $carry,$t0,16 + add $t1,$t1,$carry + srdi $carry,$t1,16 + ld $t6,`$FRAME+48`($sp) + ld $t7,`$FRAME+56`($sp) + + fmadd $T1a,$N0,$nc,$T1a + fmadd $T1b,$N0,$nd,$T1b + insrdi $t0,$t1,16,32 + ld $t1,8($tp) ; tp[j] + fmadd $T2a,$N1,$nc,$T2a + fmadd $T2b,$N1,$nd,$T2b + add $t2,$t2,$carry + fmadd $T3a,$N2,$nc,$T3a + fmadd $T3b,$N2,$nd,$T3b + srdi $carry,$t2,16 + insrdi $t0,$t2,16,16 + fmadd $dota,$N3,$nc,$dota + fmadd $dotb,$N3,$nd,$dotb + add $t3,$t3,$carry + ldu $t2,16($tp) ; tp[j+1] + srdi $carry,$t3,16 + insrdi $t0,$t3,16,0 ; 0..63 bits + add $t4,$t4,$carry + + fctid $T0a,$T0a + fctid $T0b,$T0b + srdi $carry,$t4,16 + fctid $T1a,$T1a + fctid $T1b,$T1b + add $t5,$t5,$carry + fctid $T2a,$T2a + fctid $T2b,$T2b + srdi $carry,$t5,16 + insrdi $t4,$t5,16,32 + fctid $T3a,$T3a + fctid $T3b,$T3b + add $t6,$t6,$carry + srdi $carry,$t6,16 + insrdi $t4,$t6,16,16 + + stfd $T0a,`$FRAME+0`($sp) + stfd $T0b,`$FRAME+8`($sp) + add $t7,$t7,$carry + addc $t3,$t0,$t1 + stfd $T1a,`$FRAME+16`($sp) + stfd $T1b,`$FRAME+24`($sp) + insrdi $t4,$t7,16,0 ; 64..127 bits + srdi $carry,$t7,16 ; upper 33 bits + stfd $T2a,`$FRAME+32`($sp) + stfd $T2b,`$FRAME+40`($sp) + adde $t5,$t4,$t2 + stfd $T3a,`$FRAME+48`($sp) + stfd $T3b,`$FRAME+56`($sp) + addze $carry,$carry + std $t3,-16($tp) ; tp[j-1] + std $t5,-8($tp) ; tp[j] + bdnz- Linner + + fctid $dota,$dota + fctid $dotb,$dotb + ld $t0,`$FRAME+0`($sp) + ld $t1,`$FRAME+8`($sp) + ld $t2,`$FRAME+16`($sp) + ld $t3,`$FRAME+24`($sp) + ld $t4,`$FRAME+32`($sp) + ld $t5,`$FRAME+40`($sp) + ld $t6,`$FRAME+48`($sp) + ld $t7,`$FRAME+56`($sp) + stfd $dota,`$FRAME+64`($sp) + stfd $dotb,`$FRAME+72`($sp) + + add $t0,$t0,$carry ; can not overflow + srdi $carry,$t0,16 + add $t1,$t1,$carry + srdi $carry,$t1,16 + insrdi $t0,$t1,16,32 + add $t2,$t2,$carry + ld $t1,8($tp) ; tp[j] + srdi $carry,$t2,16 + insrdi $t0,$t2,16,16 + add $t3,$t3,$carry + ldu $t2,16($tp) ; tp[j+1] + srdi $carry,$t3,16 + insrdi $t0,$t3,16,0 ; 0..63 bits + add $t4,$t4,$carry + srdi $carry,$t4,16 + add $t5,$t5,$carry + srdi $carry,$t5,16 + insrdi $t4,$t5,16,32 + add $t6,$t6,$carry + srdi $carry,$t6,16 + insrdi $t4,$t6,16,16 + add $t7,$t7,$carry + insrdi $t4,$t7,16,0 ; 64..127 bits + srdi $carry,$t7,16 ; upper 33 bits + ld $t6,`$FRAME+64`($sp) + ld $t7,`$FRAME+72`($sp) + + addc $t3,$t0,$t1 + adde $t5,$t4,$t2 + addze $carry,$carry + + std $t3,-16($tp) ; tp[j-1] + std $t5,-8($tp) ; tp[j] + + add $carry,$carry,$ovf ; comsume upmost overflow + add $t6,$t6,$carry ; can not overflow + srdi $carry,$t6,16 + add $t7,$t7,$carry + insrdi $t6,$t7,48,0 + srdi $ovf,$t7,48 + std $t6,0($tp) ; tp[num-1] + + slwi $t7,$num,2 + addi $i,$i,8 + subf $nap_d,$t7,$nap_d ; rewind pointer + cmpw $i,$num + blt- Louter + + subf $np,$num,$np ; rewind np + addi $j,$j,1 ; restore counter + subfc $i,$i,$i ; j=0 and "clear" XER[CA] + addi $tp,$sp,`$FRAME+$TRANSFER+8` + addi $t4,$sp,`$FRAME+$TRANSFER+16` + addi $t5,$np,8 + addi $t6,$rp,8 + mtctr $j + +.align 4 +Lsub: ldx $t0,$tp,$i + ldx $t1,$np,$i + ldx $t2,$t4,$i + ldx $t3,$t5,$i + subfe $t0,$t1,$t0 ; tp[j]-np[j] + subfe $t2,$t3,$t2 ; tp[j+1]-np[j+1] + stdx $t0,$rp,$i + stdx $t2,$t6,$i + addi $i,$i,16 + bdnz- Lsub + + li $i,0 + subfe $ovf,$i,$ovf ; handle upmost overflow bit + and $ap,$tp,$ovf + andc $np,$rp,$ovf + or $ap,$ap,$np ; ap=borrow?tp:rp + addi $t7,$ap,8 + mtctr $j + +.align 4 +Lcopy: ; copy or in-place refresh + ldx $t0,$ap,$i + ldx $t1,$t7,$i + std $i,8($nap_d) ; zap nap_d + std $i,16($nap_d) + std $i,24($nap_d) + std $i,32($nap_d) + std $i,40($nap_d) + std $i,48($nap_d) + std $i,56($nap_d) + stdu $i,64($nap_d) + stdx $t0,$rp,$i + stdx $t1,$t6,$i + stdx $i,$tp,$i ; zap tp at once + stdx $i,$t4,$i + addi $i,$i,16 + bdnz- Lcopy + + $POP r14,`2*$SIZE_T`($sp) + $POP r15,`3*$SIZE_T`($sp) + $POP r16,`4*$SIZE_T`($sp) + $POP r17,`5*$SIZE_T`($sp) + $POP r18,`6*$SIZE_T`($sp) + $POP r19,`7*$SIZE_T`($sp) + $POP r20,`8*$SIZE_T`($sp) + $POP r21,`9*$SIZE_T`($sp) + $POP r22,`10*$SIZE_T`($sp) + $POP r23,`11*$SIZE_T`($sp) + lfd f14,`12*$SIZE_T+0`($sp) + lfd f15,`12*$SIZE_T+8`($sp) + lfd f16,`12*$SIZE_T+16`($sp) + lfd f17,`12*$SIZE_T+24`($sp) + lfd f18,`12*$SIZE_T+32`($sp) + lfd f19,`12*$SIZE_T+40`($sp) + lfd f20,`12*$SIZE_T+48`($sp) + lfd f21,`12*$SIZE_T+56`($sp) + lfd f22,`12*$SIZE_T+64`($sp) + lfd f23,`12*$SIZE_T+72`($sp) + lfd f24,`12*$SIZE_T+80`($sp) + lfd f25,`12*$SIZE_T+88`($sp) + $POP $sp,0($sp) + li r3,1 ; signal "handled" + blr + .long 0 +.asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@fy.chalmers.se>" +___ + +$code =~ s/\`([^\`]*)\`/eval $1/gem; +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/s390x-mont.pl b/openssl/crypto/bn/asm/s390x-mont.pl new file mode 100644 index 00000000..f61246f5 --- /dev/null +++ b/openssl/crypto/bn/asm/s390x-mont.pl @@ -0,0 +1,225 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# April 2007. +# +# Performance improvement over vanilla C code varies from 85% to 45% +# depending on key length and benchmark. Unfortunately in this context +# these are not very impressive results [for code that utilizes "wide" +# 64x64=128-bit multiplication, which is not commonly available to C +# programmers], at least hand-coded bn_asm.c replacement is known to +# provide 30-40% better results for longest keys. Well, on a second +# thought it's not very surprising, because z-CPUs are single-issue +# and _strictly_ in-order execution, while bn_mul_mont is more or less +# dependent on CPU ability to pipe-line instructions and have several +# of them "in-flight" at the same time. I mean while other methods, +# for example Karatsuba, aim to minimize amount of multiplications at +# the cost of other operations increase, bn_mul_mont aim to neatly +# "overlap" multiplications and the other operations [and on most +# platforms even minimize the amount of the other operations, in +# particular references to memory]. But it's possible to improve this +# module performance by implementing dedicated squaring code-path and +# possibly by unrolling loops... + +# January 2009. +# +# Reschedule to minimize/avoid Address Generation Interlock hazard, +# make inner loops counter-based. + +$mn0="%r0"; +$num="%r1"; + +# int bn_mul_mont( +$rp="%r2"; # BN_ULONG *rp, +$ap="%r3"; # const BN_ULONG *ap, +$bp="%r4"; # const BN_ULONG *bp, +$np="%r5"; # const BN_ULONG *np, +$n0="%r6"; # const BN_ULONG *n0, +#$num="160(%r15)" # int num); + +$bi="%r2"; # zaps rp +$j="%r7"; + +$ahi="%r8"; +$alo="%r9"; +$nhi="%r10"; +$nlo="%r11"; +$AHI="%r12"; +$NHI="%r13"; +$count="%r14"; +$sp="%r15"; + +$code.=<<___; +.text +.globl bn_mul_mont +.type bn_mul_mont,\@function +bn_mul_mont: + lgf $num,164($sp) # pull $num + sla $num,3 # $num to enumerate bytes + la $bp,0($num,$bp) + + stg %r2,16($sp) + + cghi $num,16 # + lghi %r2,0 # + blr %r14 # if($num<16) return 0; + cghi $num,96 # + bhr %r14 # if($num>96) return 0; + + stmg %r3,%r15,24($sp) + + lghi $rp,-160-8 # leave room for carry bit + lcgr $j,$num # -$num + lgr %r0,$sp + la $rp,0($rp,$sp) + la $sp,0($j,$rp) # alloca + stg %r0,0($sp) # back chain + + sra $num,3 # restore $num + la $bp,0($j,$bp) # restore $bp + ahi $num,-1 # adjust $num for inner loop + lg $n0,0($n0) # pull n0 + + lg $bi,0($bp) + lg $alo,0($ap) + mlgr $ahi,$bi # ap[0]*bp[0] + lgr $AHI,$ahi + + lgr $mn0,$alo # "tp[0]"*n0 + msgr $mn0,$n0 + + lg $nlo,0($np) # + mlgr $nhi,$mn0 # np[0]*m1 + algr $nlo,$alo # +="tp[0]" + lghi $NHI,0 + alcgr $NHI,$nhi + + la $j,8(%r0) # j=1 + lr $count,$num + +.align 16 +.L1st: + lg $alo,0($j,$ap) + mlgr $ahi,$bi # ap[j]*bp[0] + algr $alo,$AHI + lghi $AHI,0 + alcgr $AHI,$ahi + + lg $nlo,0($j,$np) + mlgr $nhi,$mn0 # np[j]*m1 + algr $nlo,$NHI + lghi $NHI,0 + alcgr $nhi,$NHI # +="tp[j]" + algr $nlo,$alo + alcgr $NHI,$nhi + + stg $nlo,160-8($j,$sp) # tp[j-1]= + la $j,8($j) # j++ + brct $count,.L1st + + algr $NHI,$AHI + lghi $AHI,0 + alcgr $AHI,$AHI # upmost overflow bit + stg $NHI,160-8($j,$sp) + stg $AHI,160($j,$sp) + la $bp,8($bp) # bp++ + +.Louter: + lg $bi,0($bp) # bp[i] + lg $alo,0($ap) + mlgr $ahi,$bi # ap[0]*bp[i] + alg $alo,160($sp) # +=tp[0] + lghi $AHI,0 + alcgr $AHI,$ahi + + lgr $mn0,$alo + msgr $mn0,$n0 # tp[0]*n0 + + lg $nlo,0($np) # np[0] + mlgr $nhi,$mn0 # np[0]*m1 + algr $nlo,$alo # +="tp[0]" + lghi $NHI,0 + alcgr $NHI,$nhi + + la $j,8(%r0) # j=1 + lr $count,$num + +.align 16 +.Linner: + lg $alo,0($j,$ap) + mlgr $ahi,$bi # ap[j]*bp[i] + algr $alo,$AHI + lghi $AHI,0 + alcgr $ahi,$AHI + alg $alo,160($j,$sp)# +=tp[j] + alcgr $AHI,$ahi + + lg $nlo,0($j,$np) + mlgr $nhi,$mn0 # np[j]*m1 + algr $nlo,$NHI + lghi $NHI,0 + alcgr $nhi,$NHI + algr $nlo,$alo # +="tp[j]" + alcgr $NHI,$nhi + + stg $nlo,160-8($j,$sp) # tp[j-1]= + la $j,8($j) # j++ + brct $count,.Linner + + algr $NHI,$AHI + lghi $AHI,0 + alcgr $AHI,$AHI + alg $NHI,160($j,$sp)# accumulate previous upmost overflow bit + lghi $ahi,0 + alcgr $AHI,$ahi # new upmost overflow bit + stg $NHI,160-8($j,$sp) + stg $AHI,160($j,$sp) + + la $bp,8($bp) # bp++ + clg $bp,160+8+32($j,$sp) # compare to &bp[num] + jne .Louter + + lg $rp,160+8+16($j,$sp) # reincarnate rp + la $ap,160($sp) + ahi $num,1 # restore $num, incidentally clears "borrow" + + la $j,0(%r0) + lr $count,$num +.Lsub: lg $alo,0($j,$ap) + slbg $alo,0($j,$np) + stg $alo,0($j,$rp) + la $j,8($j) + brct $count,.Lsub + lghi $ahi,0 + slbgr $AHI,$ahi # handle upmost carry + + ngr $ap,$AHI + lghi $np,-1 + xgr $np,$AHI + ngr $np,$rp + ogr $ap,$np # ap=borrow?tp:rp + + la $j,0(%r0) + lgr $count,$num +.Lcopy: lg $alo,0($j,$ap) # copy or in-place refresh + stg $j,160($j,$sp) # zap tp + stg $alo,0($j,$rp) + la $j,8($j) + brct $count,.Lcopy + + la %r1,160+8+48($j,$sp) + lmg %r6,%r15,0(%r1) + lghi %r2,1 # signal "processed" + br %r14 +.size bn_mul_mont,.-bn_mul_mont +.string "Montgomery Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>" +___ + +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/s390x.S b/openssl/crypto/bn/asm/s390x.S new file mode 100755 index 00000000..43fcb79b --- /dev/null +++ b/openssl/crypto/bn/asm/s390x.S @@ -0,0 +1,678 @@ +.ident "s390x.S, version 1.1" +// ==================================================================== +// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +// project. +// +// Rights for redistribution and usage in source and binary forms are +// granted according to the OpenSSL license. Warranty of any kind is +// disclaimed. +// ==================================================================== + +.text + +#define zero %r0 + +// BN_ULONG bn_mul_add_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5); +.globl bn_mul_add_words +.type bn_mul_add_words,@function +.align 4 +bn_mul_add_words: + lghi zero,0 // zero = 0 + la %r1,0(%r2) // put rp aside + lghi %r2,0 // i=0; + ltgfr %r4,%r4 + bler %r14 // if (len<=0) return 0; + + stmg %r6,%r10,48(%r15) + lghi %r10,3 + lghi %r8,0 // carry = 0 + nr %r10,%r4 // len%4 + sra %r4,2 // cnt=len/4 + jz .Loop1_madd // carry is incidentally cleared if branch taken + algr zero,zero // clear carry + +.Loop4_madd: + lg %r7,0(%r2,%r3) // ap[i] + mlgr %r6,%r5 // *=w + alcgr %r7,%r8 // +=carry + alcgr %r6,zero + alg %r7,0(%r2,%r1) // +=rp[i] + stg %r7,0(%r2,%r1) // rp[i]= + + lg %r9,8(%r2,%r3) + mlgr %r8,%r5 + alcgr %r9,%r6 + alcgr %r8,zero + alg %r9,8(%r2,%r1) + stg %r9,8(%r2,%r1) + + lg %r7,16(%r2,%r3) + mlgr %r6,%r5 + alcgr %r7,%r8 + alcgr %r6,zero + alg %r7,16(%r2,%r1) + stg %r7,16(%r2,%r1) + + lg %r9,24(%r2,%r3) + mlgr %r8,%r5 + alcgr %r9,%r6 + alcgr %r8,zero + alg %r9,24(%r2,%r1) + stg %r9,24(%r2,%r1) + + la %r2,32(%r2) // i+=4 + brct %r4,.Loop4_madd + + la %r10,1(%r10) // see if len%4 is zero ... + brct %r10,.Loop1_madd // without touching condition code:-) + +.Lend_madd: + alcgr %r8,zero // collect carry bit + lgr %r2,%r8 + lmg %r6,%r10,48(%r15) + br %r14 + +.Loop1_madd: + lg %r7,0(%r2,%r3) // ap[i] + mlgr %r6,%r5 // *=w + alcgr %r7,%r8 // +=carry + alcgr %r6,zero + alg %r7,0(%r2,%r1) // +=rp[i] + stg %r7,0(%r2,%r1) // rp[i]= + + lgr %r8,%r6 + la %r2,8(%r2) // i++ + brct %r10,.Loop1_madd + + j .Lend_madd +.size bn_mul_add_words,.-bn_mul_add_words + +// BN_ULONG bn_mul_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5); +.globl bn_mul_words +.type bn_mul_words,@function +.align 4 +bn_mul_words: + lghi zero,0 // zero = 0 + la %r1,0(%r2) // put rp aside + lghi %r2,0 // i=0; + ltgfr %r4,%r4 + bler %r14 // if (len<=0) return 0; + + stmg %r6,%r10,48(%r15) + lghi %r10,3 + lghi %r8,0 // carry = 0 + nr %r10,%r4 // len%4 + sra %r4,2 // cnt=len/4 + jz .Loop1_mul // carry is incidentally cleared if branch taken + algr zero,zero // clear carry + +.Loop4_mul: + lg %r7,0(%r2,%r3) // ap[i] + mlgr %r6,%r5 // *=w + alcgr %r7,%r8 // +=carry + stg %r7,0(%r2,%r1) // rp[i]= + + lg %r9,8(%r2,%r3) + mlgr %r8,%r5 + alcgr %r9,%r6 + stg %r9,8(%r2,%r1) + + lg %r7,16(%r2,%r3) + mlgr %r6,%r5 + alcgr %r7,%r8 + stg %r7,16(%r2,%r1) + + lg %r9,24(%r2,%r3) + mlgr %r8,%r5 + alcgr %r9,%r6 + stg %r9,24(%r2,%r1) + + la %r2,32(%r2) // i+=4 + brct %r4,.Loop4_mul + + la %r10,1(%r10) // see if len%4 is zero ... + brct %r10,.Loop1_mul // without touching condition code:-) + +.Lend_mul: + alcgr %r8,zero // collect carry bit + lgr %r2,%r8 + lmg %r6,%r10,48(%r15) + br %r14 + +.Loop1_mul: + lg %r7,0(%r2,%r3) // ap[i] + mlgr %r6,%r5 // *=w + alcgr %r7,%r8 // +=carry + stg %r7,0(%r2,%r1) // rp[i]= + + lgr %r8,%r6 + la %r2,8(%r2) // i++ + brct %r10,.Loop1_mul + + j .Lend_mul +.size bn_mul_words,.-bn_mul_words + +// void bn_sqr_words(BN_ULONG *r2,BN_ULONG *r2,int r4) +.globl bn_sqr_words +.type bn_sqr_words,@function +.align 4 +bn_sqr_words: + ltgfr %r4,%r4 + bler %r14 + + stmg %r6,%r7,48(%r15) + srag %r1,%r4,2 // cnt=len/4 + jz .Loop1_sqr + +.Loop4_sqr: + lg %r7,0(%r3) + mlgr %r6,%r7 + stg %r7,0(%r2) + stg %r6,8(%r2) + + lg %r7,8(%r3) + mlgr %r6,%r7 + stg %r7,16(%r2) + stg %r6,24(%r2) + + lg %r7,16(%r3) + mlgr %r6,%r7 + stg %r7,32(%r2) + stg %r6,40(%r2) + + lg %r7,24(%r3) + mlgr %r6,%r7 + stg %r7,48(%r2) + stg %r6,56(%r2) + + la %r3,32(%r3) + la %r2,64(%r2) + brct %r1,.Loop4_sqr + + lghi %r1,3 + nr %r4,%r1 // cnt=len%4 + jz .Lend_sqr + +.Loop1_sqr: + lg %r7,0(%r3) + mlgr %r6,%r7 + stg %r7,0(%r2) + stg %r6,8(%r2) + + la %r3,8(%r3) + la %r2,16(%r2) + brct %r4,.Loop1_sqr + +.Lend_sqr: + lmg %r6,%r7,48(%r15) + br %r14 +.size bn_sqr_words,.-bn_sqr_words + +// BN_ULONG bn_div_words(BN_ULONG h,BN_ULONG l,BN_ULONG d); +.globl bn_div_words +.type bn_div_words,@function +.align 4 +bn_div_words: + dlgr %r2,%r4 + lgr %r2,%r3 + br %r14 +.size bn_div_words,.-bn_div_words + +// BN_ULONG bn_add_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5); +.globl bn_add_words +.type bn_add_words,@function +.align 4 +bn_add_words: + la %r1,0(%r2) // put rp aside + lghi %r2,0 // i=0 + ltgfr %r5,%r5 + bler %r14 // if (len<=0) return 0; + + stg %r6,48(%r15) + lghi %r6,3 + nr %r6,%r5 // len%4 + sra %r5,2 // len/4, use sra because it sets condition code + jz .Loop1_add // carry is incidentally cleared if branch taken + algr %r2,%r2 // clear carry + +.Loop4_add: + lg %r0,0(%r2,%r3) + alcg %r0,0(%r2,%r4) + stg %r0,0(%r2,%r1) + lg %r0,8(%r2,%r3) + alcg %r0,8(%r2,%r4) + stg %r0,8(%r2,%r1) + lg %r0,16(%r2,%r3) + alcg %r0,16(%r2,%r4) + stg %r0,16(%r2,%r1) + lg %r0,24(%r2,%r3) + alcg %r0,24(%r2,%r4) + stg %r0,24(%r2,%r1) + + la %r2,32(%r2) // i+=4 + brct %r5,.Loop4_add + + la %r6,1(%r6) // see if len%4 is zero ... + brct %r6,.Loop1_add // without touching condition code:-) + +.Lexit_add: + lghi %r2,0 + alcgr %r2,%r2 + lg %r6,48(%r15) + br %r14 + +.Loop1_add: + lg %r0,0(%r2,%r3) + alcg %r0,0(%r2,%r4) + stg %r0,0(%r2,%r1) + + la %r2,8(%r2) // i++ + brct %r6,.Loop1_add + + j .Lexit_add +.size bn_add_words,.-bn_add_words + +// BN_ULONG bn_sub_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5); +.globl bn_sub_words +.type bn_sub_words,@function +.align 4 +bn_sub_words: + la %r1,0(%r2) // put rp aside + lghi %r2,0 // i=0 + ltgfr %r5,%r5 + bler %r14 // if (len<=0) return 0; + + stg %r6,48(%r15) + lghi %r6,3 + nr %r6,%r5 // len%4 + sra %r5,2 // len/4, use sra because it sets condition code + jnz .Loop4_sub // borrow is incidentally cleared if branch taken + slgr %r2,%r2 // clear borrow + +.Loop1_sub: + lg %r0,0(%r2,%r3) + slbg %r0,0(%r2,%r4) + stg %r0,0(%r2,%r1) + + la %r2,8(%r2) // i++ + brct %r6,.Loop1_sub + j .Lexit_sub + +.Loop4_sub: + lg %r0,0(%r2,%r3) + slbg %r0,0(%r2,%r4) + stg %r0,0(%r2,%r1) + lg %r0,8(%r2,%r3) + slbg %r0,8(%r2,%r4) + stg %r0,8(%r2,%r1) + lg %r0,16(%r2,%r3) + slbg %r0,16(%r2,%r4) + stg %r0,16(%r2,%r1) + lg %r0,24(%r2,%r3) + slbg %r0,24(%r2,%r4) + stg %r0,24(%r2,%r1) + + la %r2,32(%r2) // i+=4 + brct %r5,.Loop4_sub + + la %r6,1(%r6) // see if len%4 is zero ... + brct %r6,.Loop1_sub // without touching condition code:-) + +.Lexit_sub: + lghi %r2,0 + slbgr %r2,%r2 + lcgr %r2,%r2 + lg %r6,48(%r15) + br %r14 +.size bn_sub_words,.-bn_sub_words + +#define c1 %r1 +#define c2 %r5 +#define c3 %r8 + +#define mul_add_c(ai,bi,c1,c2,c3) \ + lg %r7,ai*8(%r3); \ + mlg %r6,bi*8(%r4); \ + algr c1,%r7; \ + alcgr c2,%r6; \ + alcgr c3,zero + +// void bn_mul_comba8(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4); +.globl bn_mul_comba8 +.type bn_mul_comba8,@function +.align 4 +bn_mul_comba8: + stmg %r6,%r8,48(%r15) + + lghi c1,0 + lghi c2,0 + lghi c3,0 + lghi zero,0 + + mul_add_c(0,0,c1,c2,c3); + stg c1,0*8(%r2) + lghi c1,0 + + mul_add_c(0,1,c2,c3,c1); + mul_add_c(1,0,c2,c3,c1); + stg c2,1*8(%r2) + lghi c2,0 + + mul_add_c(2,0,c3,c1,c2); + mul_add_c(1,1,c3,c1,c2); + mul_add_c(0,2,c3,c1,c2); + stg c3,2*8(%r2) + lghi c3,0 + + mul_add_c(0,3,c1,c2,c3); + mul_add_c(1,2,c1,c2,c3); + mul_add_c(2,1,c1,c2,c3); + mul_add_c(3,0,c1,c2,c3); + stg c1,3*8(%r2) + lghi c1,0 + + mul_add_c(4,0,c2,c3,c1); + mul_add_c(3,1,c2,c3,c1); + mul_add_c(2,2,c2,c3,c1); + mul_add_c(1,3,c2,c3,c1); + mul_add_c(0,4,c2,c3,c1); + stg c2,4*8(%r2) + lghi c2,0 + + mul_add_c(0,5,c3,c1,c2); + mul_add_c(1,4,c3,c1,c2); + mul_add_c(2,3,c3,c1,c2); + mul_add_c(3,2,c3,c1,c2); + mul_add_c(4,1,c3,c1,c2); + mul_add_c(5,0,c3,c1,c2); + stg c3,5*8(%r2) + lghi c3,0 + + mul_add_c(6,0,c1,c2,c3); + mul_add_c(5,1,c1,c2,c3); + mul_add_c(4,2,c1,c2,c3); + mul_add_c(3,3,c1,c2,c3); + mul_add_c(2,4,c1,c2,c3); + mul_add_c(1,5,c1,c2,c3); + mul_add_c(0,6,c1,c2,c3); + stg c1,6*8(%r2) + lghi c1,0 + + mul_add_c(0,7,c2,c3,c1); + mul_add_c(1,6,c2,c3,c1); + mul_add_c(2,5,c2,c3,c1); + mul_add_c(3,4,c2,c3,c1); + mul_add_c(4,3,c2,c3,c1); + mul_add_c(5,2,c2,c3,c1); + mul_add_c(6,1,c2,c3,c1); + mul_add_c(7,0,c2,c3,c1); + stg c2,7*8(%r2) + lghi c2,0 + + mul_add_c(7,1,c3,c1,c2); + mul_add_c(6,2,c3,c1,c2); + mul_add_c(5,3,c3,c1,c2); + mul_add_c(4,4,c3,c1,c2); + mul_add_c(3,5,c3,c1,c2); + mul_add_c(2,6,c3,c1,c2); + mul_add_c(1,7,c3,c1,c2); + stg c3,8*8(%r2) + lghi c3,0 + + mul_add_c(2,7,c1,c2,c3); + mul_add_c(3,6,c1,c2,c3); + mul_add_c(4,5,c1,c2,c3); + mul_add_c(5,4,c1,c2,c3); + mul_add_c(6,3,c1,c2,c3); + mul_add_c(7,2,c1,c2,c3); + stg c1,9*8(%r2) + lghi c1,0 + + mul_add_c(7,3,c2,c3,c1); + mul_add_c(6,4,c2,c3,c1); + mul_add_c(5,5,c2,c3,c1); + mul_add_c(4,6,c2,c3,c1); + mul_add_c(3,7,c2,c3,c1); + stg c2,10*8(%r2) + lghi c2,0 + + mul_add_c(4,7,c3,c1,c2); + mul_add_c(5,6,c3,c1,c2); + mul_add_c(6,5,c3,c1,c2); + mul_add_c(7,4,c3,c1,c2); + stg c3,11*8(%r2) + lghi c3,0 + + mul_add_c(7,5,c1,c2,c3); + mul_add_c(6,6,c1,c2,c3); + mul_add_c(5,7,c1,c2,c3); + stg c1,12*8(%r2) + lghi c1,0 + + + mul_add_c(6,7,c2,c3,c1); + mul_add_c(7,6,c2,c3,c1); + stg c2,13*8(%r2) + lghi c2,0 + + mul_add_c(7,7,c3,c1,c2); + stg c3,14*8(%r2) + stg c1,15*8(%r2) + + lmg %r6,%r8,48(%r15) + br %r14 +.size bn_mul_comba8,.-bn_mul_comba8 + +// void bn_mul_comba4(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4); +.globl bn_mul_comba4 +.type bn_mul_comba4,@function +.align 4 +bn_mul_comba4: + stmg %r6,%r8,48(%r15) + + lghi c1,0 + lghi c2,0 + lghi c3,0 + lghi zero,0 + + mul_add_c(0,0,c1,c2,c3); + stg c1,0*8(%r3) + lghi c1,0 + + mul_add_c(0,1,c2,c3,c1); + mul_add_c(1,0,c2,c3,c1); + stg c2,1*8(%r2) + lghi c2,0 + + mul_add_c(2,0,c3,c1,c2); + mul_add_c(1,1,c3,c1,c2); + mul_add_c(0,2,c3,c1,c2); + stg c3,2*8(%r2) + lghi c3,0 + + mul_add_c(0,3,c1,c2,c3); + mul_add_c(1,2,c1,c2,c3); + mul_add_c(2,1,c1,c2,c3); + mul_add_c(3,0,c1,c2,c3); + stg c1,3*8(%r2) + lghi c1,0 + + mul_add_c(3,1,c2,c3,c1); + mul_add_c(2,2,c2,c3,c1); + mul_add_c(1,3,c2,c3,c1); + stg c2,4*8(%r2) + lghi c2,0 + + mul_add_c(2,3,c3,c1,c2); + mul_add_c(3,2,c3,c1,c2); + stg c3,5*8(%r2) + lghi c3,0 + + mul_add_c(3,3,c1,c2,c3); + stg c1,6*8(%r2) + stg c2,7*8(%r2) + + stmg %r6,%r8,48(%r15) + br %r14 +.size bn_mul_comba4,.-bn_mul_comba4 + +#define sqr_add_c(ai,c1,c2,c3) \ + lg %r7,ai*8(%r3); \ + mlgr %r6,%r7; \ + algr c1,%r7; \ + alcgr c2,%r6; \ + alcgr c3,zero + +#define sqr_add_c2(ai,aj,c1,c2,c3) \ + lg %r7,ai*8(%r3); \ + mlg %r6,aj*8(%r3); \ + algr c1,%r7; \ + alcgr c2,%r6; \ + alcgr c3,zero; \ + algr c1,%r7; \ + alcgr c2,%r6; \ + alcgr c3,zero + +// void bn_sqr_comba8(BN_ULONG *r2,BN_ULONG *r3); +.globl bn_sqr_comba8 +.type bn_sqr_comba8,@function +.align 4 +bn_sqr_comba8: + stmg %r6,%r8,48(%r15) + + lghi c1,0 + lghi c2,0 + lghi c3,0 + lghi zero,0 + + sqr_add_c(0,c1,c2,c3); + stg c1,0*8(%r2) + lghi c1,0 + + sqr_add_c2(1,0,c2,c3,c1); + stg c2,1*8(%r2) + lghi c2,0 + + sqr_add_c(1,c3,c1,c2); + sqr_add_c2(2,0,c3,c1,c2); + stg c3,2*8(%r2) + lghi c3,0 + + sqr_add_c2(3,0,c1,c2,c3); + sqr_add_c2(2,1,c1,c2,c3); + stg c1,3*8(%r2) + lghi c1,0 + + sqr_add_c(2,c2,c3,c1); + sqr_add_c2(3,1,c2,c3,c1); + sqr_add_c2(4,0,c2,c3,c1); + stg c2,4*8(%r2) + lghi c2,0 + + sqr_add_c2(5,0,c3,c1,c2); + sqr_add_c2(4,1,c3,c1,c2); + sqr_add_c2(3,2,c3,c1,c2); + stg c3,5*8(%r2) + lghi c3,0 + + sqr_add_c(3,c1,c2,c3); + sqr_add_c2(4,2,c1,c2,c3); + sqr_add_c2(5,1,c1,c2,c3); + sqr_add_c2(6,0,c1,c2,c3); + stg c1,6*8(%r2) + lghi c1,0 + + sqr_add_c2(7,0,c2,c3,c1); + sqr_add_c2(6,1,c2,c3,c1); + sqr_add_c2(5,2,c2,c3,c1); + sqr_add_c2(4,3,c2,c3,c1); + stg c2,7*8(%r2) + lghi c2,0 + + sqr_add_c(4,c3,c1,c2); + sqr_add_c2(5,3,c3,c1,c2); + sqr_add_c2(6,2,c3,c1,c2); + sqr_add_c2(7,1,c3,c1,c2); + stg c3,8*8(%r2) + lghi c3,0 + + sqr_add_c2(7,2,c1,c2,c3); + sqr_add_c2(6,3,c1,c2,c3); + sqr_add_c2(5,4,c1,c2,c3); + stg c1,9*8(%r2) + lghi c1,0 + + sqr_add_c(5,c2,c3,c1); + sqr_add_c2(6,4,c2,c3,c1); + sqr_add_c2(7,3,c2,c3,c1); + stg c2,10*8(%r2) + lghi c2,0 + + sqr_add_c2(7,4,c3,c1,c2); + sqr_add_c2(6,5,c3,c1,c2); + stg c3,11*8(%r2) + lghi c3,0 + + sqr_add_c(6,c1,c2,c3); + sqr_add_c2(7,5,c1,c2,c3); + stg c1,12*8(%r2) + lghi c1,0 + + sqr_add_c2(7,6,c2,c3,c1); + stg c2,13*8(%r2) + lghi c2,0 + + sqr_add_c(7,c3,c1,c2); + stg c3,14*8(%r2) + stg c1,15*8(%r2) + + lmg %r6,%r8,48(%r15) + br %r14 +.size bn_sqr_comba8,.-bn_sqr_comba8 + +// void bn_sqr_comba4(BN_ULONG *r2,BN_ULONG *r3); +.globl bn_sqr_comba4 +.type bn_sqr_comba4,@function +.align 4 +bn_sqr_comba4: + stmg %r6,%r8,48(%r15) + + lghi c1,0 + lghi c2,0 + lghi c3,0 + lghi zero,0 + + sqr_add_c(0,c1,c2,c3); + stg c1,0*8(%r2) + lghi c1,0 + + sqr_add_c2(1,0,c2,c3,c1); + stg c2,1*8(%r2) + lghi c2,0 + + sqr_add_c(1,c3,c1,c2); + sqr_add_c2(2,0,c3,c1,c2); + stg c3,2*8(%r2) + lghi c3,0 + + sqr_add_c2(3,0,c1,c2,c3); + sqr_add_c2(2,1,c1,c2,c3); + stg c1,3*8(%r2) + lghi c1,0 + + sqr_add_c(2,c2,c3,c1); + sqr_add_c2(3,1,c2,c3,c1); + stg c2,4*8(%r2) + lghi c2,0 + + sqr_add_c2(3,2,c3,c1,c2); + stg c3,5*8(%r2) + lghi c3,0 + + sqr_add_c(3,c1,c2,c3); + stg c1,6*8(%r2) + stg c2,7*8(%r2) + + lmg %r6,%r8,48(%r15) + br %r14 +.size bn_sqr_comba4,.-bn_sqr_comba4 diff --git a/openssl/crypto/bn/asm/sparcv8.S b/openssl/crypto/bn/asm/sparcv8.S new file mode 100644 index 00000000..88c5dc48 --- /dev/null +++ b/openssl/crypto/bn/asm/sparcv8.S @@ -0,0 +1,1458 @@ +.ident "sparcv8.s, Version 1.4" +.ident "SPARC v8 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>" + +/* + * ==================================================================== + * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL + * project. + * + * Rights for redistribution and usage in source and binary forms are + * granted according to the OpenSSL license. Warranty of any kind is + * disclaimed. + * ==================================================================== + */ + +/* + * This is my modest contributon to OpenSSL project (see + * http://www.openssl.org/ for more information about it) and is + * a drop-in SuperSPARC ISA replacement for crypto/bn/bn_asm.c + * module. For updates see http://fy.chalmers.se/~appro/hpe/. + * + * See bn_asm.sparc.v8plus.S for more details. + */ + +/* + * Revision history. + * + * 1.1 - new loop unrolling model(*); + * 1.2 - made gas friendly; + * 1.3 - fixed problem with /usr/ccs/lib/cpp; + * 1.4 - some retunes; + * + * (*) see bn_asm.sparc.v8plus.S for details + */ + +.section ".text",#alloc,#execinstr +.file "bn_asm.sparc.v8.S" + +.align 32 + +.global bn_mul_add_words +/* + * BN_ULONG bn_mul_add_words(rp,ap,num,w) + * BN_ULONG *rp,*ap; + * int num; + * BN_ULONG w; + */ +bn_mul_add_words: + cmp %o2,0 + bg,a .L_bn_mul_add_words_proceed + ld [%o1],%g2 + retl + clr %o0 + +.L_bn_mul_add_words_proceed: + andcc %o2,-4,%g0 + bz .L_bn_mul_add_words_tail + clr %o5 + +.L_bn_mul_add_words_loop: + ld [%o0],%o4 + ld [%o1+4],%g3 + umul %o3,%g2,%g2 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g2,%o4 + st %o4,[%o0] + addx %g1,0,%o5 + + ld [%o0+4],%o4 + ld [%o1+8],%g2 + umul %o3,%g3,%g3 + dec 4,%o2 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g3,%o4 + st %o4,[%o0+4] + addx %g1,0,%o5 + + ld [%o0+8],%o4 + ld [%o1+12],%g3 + umul %o3,%g2,%g2 + inc 16,%o1 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g2,%o4 + st %o4,[%o0+8] + addx %g1,0,%o5 + + ld [%o0+12],%o4 + umul %o3,%g3,%g3 + inc 16,%o0 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g3,%o4 + st %o4,[%o0-4] + addx %g1,0,%o5 + andcc %o2,-4,%g0 + bnz,a .L_bn_mul_add_words_loop + ld [%o1],%g2 + + tst %o2 + bnz,a .L_bn_mul_add_words_tail + ld [%o1],%g2 +.L_bn_mul_add_words_return: + retl + mov %o5,%o0 + nop + +.L_bn_mul_add_words_tail: + ld [%o0],%o4 + umul %o3,%g2,%g2 + addcc %o4,%o5,%o4 + rd %y,%g1 + addx %g1,0,%g1 + addcc %o4,%g2,%o4 + addx %g1,0,%o5 + deccc %o2 + bz .L_bn_mul_add_words_return + st %o4,[%o0] + + ld [%o1+4],%g2 + ld [%o0+4],%o4 + umul %o3,%g2,%g2 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g2,%o4 + addx %g1,0,%o5 + deccc %o2 + bz .L_bn_mul_add_words_return + st %o4,[%o0+4] + + ld [%o1+8],%g2 + ld [%o0+8],%o4 + umul %o3,%g2,%g2 + rd %y,%g1 + addcc %o4,%o5,%o4 + addx %g1,0,%g1 + addcc %o4,%g2,%o4 + st %o4,[%o0+8] + retl + addx %g1,0,%o0 + +.type bn_mul_add_words,#function +.size bn_mul_add_words,(.-bn_mul_add_words) + +.align 32 + +.global bn_mul_words +/* + * BN_ULONG bn_mul_words(rp,ap,num,w) + * BN_ULONG *rp,*ap; + * int num; + * BN_ULONG w; + */ +bn_mul_words: + cmp %o2,0 + bg,a .L_bn_mul_words_proceeed + ld [%o1],%g2 + retl + clr %o0 + +.L_bn_mul_words_proceeed: + andcc %o2,-4,%g0 + bz .L_bn_mul_words_tail + clr %o5 + +.L_bn_mul_words_loop: + ld [%o1+4],%g3 + umul %o3,%g2,%g2 + addcc %g2,%o5,%g2 + rd %y,%g1 + addx %g1,0,%o5 + st %g2,[%o0] + + ld [%o1+8],%g2 + umul %o3,%g3,%g3 + addcc %g3,%o5,%g3 + rd %y,%g1 + dec 4,%o2 + addx %g1,0,%o5 + st %g3,[%o0+4] + + ld [%o1+12],%g3 + umul %o3,%g2,%g2 + addcc %g2,%o5,%g2 + rd %y,%g1 + inc 16,%o1 + st %g2,[%o0+8] + addx %g1,0,%o5 + + umul %o3,%g3,%g3 + addcc %g3,%o5,%g3 + rd %y,%g1 + inc 16,%o0 + addx %g1,0,%o5 + st %g3,[%o0-4] + andcc %o2,-4,%g0 + nop + bnz,a .L_bn_mul_words_loop + ld [%o1],%g2 + + tst %o2 + bnz,a .L_bn_mul_words_tail + ld [%o1],%g2 +.L_bn_mul_words_return: + retl + mov %o5,%o0 + nop + +.L_bn_mul_words_tail: + umul %o3,%g2,%g2 + addcc %g2,%o5,%g2 + rd %y,%g1 + addx %g1,0,%o5 + deccc %o2 + bz .L_bn_mul_words_return + st %g2,[%o0] + nop + + ld [%o1+4],%g2 + umul %o3,%g2,%g2 + addcc %g2,%o5,%g2 + rd %y,%g1 + addx %g1,0,%o5 + deccc %o2 + bz .L_bn_mul_words_return + st %g2,[%o0+4] + + ld [%o1+8],%g2 + umul %o3,%g2,%g2 + addcc %g2,%o5,%g2 + rd %y,%g1 + st %g2,[%o0+8] + retl + addx %g1,0,%o0 + +.type bn_mul_words,#function +.size bn_mul_words,(.-bn_mul_words) + +.align 32 +.global bn_sqr_words +/* + * void bn_sqr_words(r,a,n) + * BN_ULONG *r,*a; + * int n; + */ +bn_sqr_words: + cmp %o2,0 + bg,a .L_bn_sqr_words_proceeed + ld [%o1],%g2 + retl + clr %o0 + +.L_bn_sqr_words_proceeed: + andcc %o2,-4,%g0 + bz .L_bn_sqr_words_tail + clr %o5 + +.L_bn_sqr_words_loop: + ld [%o1+4],%g3 + umul %g2,%g2,%o4 + st %o4,[%o0] + rd %y,%o5 + st %o5,[%o0+4] + + ld [%o1+8],%g2 + umul %g3,%g3,%o4 + dec 4,%o2 + st %o4,[%o0+8] + rd %y,%o5 + st %o5,[%o0+12] + nop + + ld [%o1+12],%g3 + umul %g2,%g2,%o4 + st %o4,[%o0+16] + rd %y,%o5 + inc 16,%o1 + st %o5,[%o0+20] + + umul %g3,%g3,%o4 + inc 32,%o0 + st %o4,[%o0-8] + rd %y,%o5 + st %o5,[%o0-4] + andcc %o2,-4,%g2 + bnz,a .L_bn_sqr_words_loop + ld [%o1],%g2 + + tst %o2 + nop + bnz,a .L_bn_sqr_words_tail + ld [%o1],%g2 +.L_bn_sqr_words_return: + retl + clr %o0 + +.L_bn_sqr_words_tail: + umul %g2,%g2,%o4 + st %o4,[%o0] + deccc %o2 + rd %y,%o5 + bz .L_bn_sqr_words_return + st %o5,[%o0+4] + + ld [%o1+4],%g2 + umul %g2,%g2,%o4 + st %o4,[%o0+8] + deccc %o2 + rd %y,%o5 + nop + bz .L_bn_sqr_words_return + st %o5,[%o0+12] + + ld [%o1+8],%g2 + umul %g2,%g2,%o4 + st %o4,[%o0+16] + rd %y,%o5 + st %o5,[%o0+20] + retl + clr %o0 + +.type bn_sqr_words,#function +.size bn_sqr_words,(.-bn_sqr_words) + +.align 32 + +.global bn_div_words +/* + * BN_ULONG bn_div_words(h,l,d) + * BN_ULONG h,l,d; + */ +bn_div_words: + wr %o0,%y + udiv %o1,%o2,%o0 + retl + nop + +.type bn_div_words,#function +.size bn_div_words,(.-bn_div_words) + +.align 32 + +.global bn_add_words +/* + * BN_ULONG bn_add_words(rp,ap,bp,n) + * BN_ULONG *rp,*ap,*bp; + * int n; + */ +bn_add_words: + cmp %o3,0 + bg,a .L_bn_add_words_proceed + ld [%o1],%o4 + retl + clr %o0 + +.L_bn_add_words_proceed: + andcc %o3,-4,%g0 + bz .L_bn_add_words_tail + clr %g1 + ba .L_bn_add_words_warn_loop + addcc %g0,0,%g0 ! clear carry flag + +.L_bn_add_words_loop: + ld [%o1],%o4 +.L_bn_add_words_warn_loop: + ld [%o2],%o5 + ld [%o1+4],%g3 + ld [%o2+4],%g4 + dec 4,%o3 + addxcc %o5,%o4,%o5 + st %o5,[%o0] + + ld [%o1+8],%o4 + ld [%o2+8],%o5 + inc 16,%o1 + addxcc %g3,%g4,%g3 + st %g3,[%o0+4] + + ld [%o1-4],%g3 + ld [%o2+12],%g4 + inc 16,%o2 + addxcc %o5,%o4,%o5 + st %o5,[%o0+8] + + inc 16,%o0 + addxcc %g3,%g4,%g3 + st %g3,[%o0-4] + addx %g0,0,%g1 + andcc %o3,-4,%g0 + bnz,a .L_bn_add_words_loop + addcc %g1,-1,%g0 + + tst %o3 + bnz,a .L_bn_add_words_tail + ld [%o1],%o4 +.L_bn_add_words_return: + retl + mov %g1,%o0 + +.L_bn_add_words_tail: + addcc %g1,-1,%g0 + ld [%o2],%o5 + addxcc %o5,%o4,%o5 + addx %g0,0,%g1 + deccc %o3 + bz .L_bn_add_words_return + st %o5,[%o0] + + ld [%o1+4],%o4 + addcc %g1,-1,%g0 + ld [%o2+4],%o5 + addxcc %o5,%o4,%o5 + addx %g0,0,%g1 + deccc %o3 + bz .L_bn_add_words_return + st %o5,[%o0+4] + + ld [%o1+8],%o4 + addcc %g1,-1,%g0 + ld [%o2+8],%o5 + addxcc %o5,%o4,%o5 + st %o5,[%o0+8] + retl + addx %g0,0,%o0 + +.type bn_add_words,#function +.size bn_add_words,(.-bn_add_words) + +.align 32 + +.global bn_sub_words +/* + * BN_ULONG bn_sub_words(rp,ap,bp,n) + * BN_ULONG *rp,*ap,*bp; + * int n; + */ +bn_sub_words: + cmp %o3,0 + bg,a .L_bn_sub_words_proceed + ld [%o1],%o4 + retl + clr %o0 + +.L_bn_sub_words_proceed: + andcc %o3,-4,%g0 + bz .L_bn_sub_words_tail + clr %g1 + ba .L_bn_sub_words_warm_loop + addcc %g0,0,%g0 ! clear carry flag + +.L_bn_sub_words_loop: + ld [%o1],%o4 +.L_bn_sub_words_warm_loop: + ld [%o2],%o5 + ld [%o1+4],%g3 + ld [%o2+4],%g4 + dec 4,%o3 + subxcc %o4,%o5,%o5 + st %o5,[%o0] + + ld [%o1+8],%o4 + ld [%o2+8],%o5 + inc 16,%o1 + subxcc %g3,%g4,%g4 + st %g4,[%o0+4] + + ld [%o1-4],%g3 + ld [%o2+12],%g4 + inc 16,%o2 + subxcc %o4,%o5,%o5 + st %o5,[%o0+8] + + inc 16,%o0 + subxcc %g3,%g4,%g4 + st %g4,[%o0-4] + addx %g0,0,%g1 + andcc %o3,-4,%g0 + bnz,a .L_bn_sub_words_loop + addcc %g1,-1,%g0 + + tst %o3 + nop + bnz,a .L_bn_sub_words_tail + ld [%o1],%o4 +.L_bn_sub_words_return: + retl + mov %g1,%o0 + +.L_bn_sub_words_tail: + addcc %g1,-1,%g0 + ld [%o2],%o5 + subxcc %o4,%o5,%o5 + addx %g0,0,%g1 + deccc %o3 + bz .L_bn_sub_words_return + st %o5,[%o0] + nop + + ld [%o1+4],%o4 + addcc %g1,-1,%g0 + ld [%o2+4],%o5 + subxcc %o4,%o5,%o5 + addx %g0,0,%g1 + deccc %o3 + bz .L_bn_sub_words_return + st %o5,[%o0+4] + + ld [%o1+8],%o4 + addcc %g1,-1,%g0 + ld [%o2+8],%o5 + subxcc %o4,%o5,%o5 + st %o5,[%o0+8] + retl + addx %g0,0,%o0 + +.type bn_sub_words,#function +.size bn_sub_words,(.-bn_sub_words) + +#define FRAME_SIZE -96 + +/* + * Here is register usage map for *all* routines below. + */ +#define t_1 %o0 +#define t_2 %o1 +#define c_1 %o2 +#define c_2 %o3 +#define c_3 %o4 + +#define ap(I) [%i1+4*I] +#define bp(I) [%i2+4*I] +#define rp(I) [%i0+4*I] + +#define a_0 %l0 +#define a_1 %l1 +#define a_2 %l2 +#define a_3 %l3 +#define a_4 %l4 +#define a_5 %l5 +#define a_6 %l6 +#define a_7 %l7 + +#define b_0 %i3 +#define b_1 %i4 +#define b_2 %i5 +#define b_3 %o5 +#define b_4 %g1 +#define b_5 %g2 +#define b_6 %g3 +#define b_7 %g4 + +.align 32 +.global bn_mul_comba8 +/* + * void bn_mul_comba8(r,a,b) + * BN_ULONG *r,*a,*b; + */ +bn_mul_comba8: + save %sp,FRAME_SIZE,%sp + ld ap(0),a_0 + ld bp(0),b_0 + umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3); + ld bp(1),b_1 + rd %y,c_2 + st c_1,rp(0) !r[0]=c1; + + umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1); + ld ap(1),a_1 + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc %g0,t_2,c_3 != + addx %g0,%g0,c_1 + ld ap(2),a_2 + umul a_1,b_0,t_1 !mul_add_c(a[1],b[0],c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + st c_2,rp(1) !r[1]=c2; + addx c_1,%g0,c_1 != + + umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx %g0,%g0,c_2 + ld bp(2),b_2 + umul a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + ld bp(3),b_3 + addx c_2,%g0,c_2 != + umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + st c_3,rp(2) !r[2]=c3; + + umul a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 + umul a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + ld ap(3),a_3 + umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + ld ap(4),a_4 + umul a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!= + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(3) !r[3]=c1; + + umul a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + umul a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + ld bp(4),b_4 + umul a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + ld bp(5),b_5 + umul a_0,b_4,t_1 !=!mul_add_c(a[0],b[4],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + st c_2,rp(4) !r[4]=c2; + + umul a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + umul a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_2,b_3,t_1 !=!mul_add_c(a[2],b[3],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + ld ap(5),a_5 + umul a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + ld ap(6),a_6 + addx c_2,%g0,c_2 != + umul a_5,b_0,t_1 !mul_add_c(a[5],b[0],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + st c_3,rp(5) !r[5]=c3; + + umul a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 + umul a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_4,b_2,t_1 !mul_add_c(a[4],b[2],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_2,b_4,t_1 !mul_add_c(a[2],b[4],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + ld bp(6),b_6 + addx c_3,%g0,c_3 != + umul a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + ld bp(7),b_7 + umul a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + st c_1,rp(6) !r[6]=c1; + addx c_3,%g0,c_3 != + + umul a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 != + addx %g0,%g0,c_1 + umul a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + umul a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + umul a_3,b_4,t_1 !=!mul_add_c(a[3],b[4],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + umul a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + ld ap(7),a_7 + umul a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + umul a_7,b_0,t_1 !mul_add_c(a[7],b[0],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + st c_2,rp(7) !r[7]=c2; + + umul a_7,b_1,t_1 !mul_add_c(a[7],b[1],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + umul a_6,b_2,t_1 !=!mul_add_c(a[6],b[2],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + umul a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + umul a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_2,b_6,t_1 !=!mul_add_c(a[2],b[6],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + umul a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 ! + addx c_2,%g0,c_2 + st c_3,rp(8) !r[8]=c3; + + umul a_2,b_7,t_1 !mul_add_c(a[2],b[7],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 + umul a_3,b_6,t_1 !=!mul_add_c(a[3],b[6],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + umul a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_7,b_2,t_1 !=!mul_add_c(a[7],b[2],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(9) !r[9]=c1; + + umul a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + umul a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + umul a_5,b_5,t_1 !=!mul_add_c(a[5],b[5],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + umul a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + st c_2,rp(10) !r[10]=c2; + + umul a_4,b_7,t_1 !=!mul_add_c(a[4],b[7],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 != + umul a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + umul a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + st c_3,rp(11) !r[11]=c3; + addx c_2,%g0,c_2 != + + umul a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx %g0,%g0,c_3 + umul a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + st c_1,rp(12) !r[12]=c1; + addx c_3,%g0,c_3 != + + umul a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 != + addx %g0,%g0,c_1 + umul a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + st c_2,rp(13) !r[13]=c2; + + umul a_7,b_7,t_1 !=!mul_add_c(a[7],b[7],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + nop != + st c_3,rp(14) !r[14]=c3; + st c_1,rp(15) !r[15]=c1; + + ret + restore %g0,%g0,%o0 + +.type bn_mul_comba8,#function +.size bn_mul_comba8,(.-bn_mul_comba8) + +.align 32 + +.global bn_mul_comba4 +/* + * void bn_mul_comba4(r,a,b) + * BN_ULONG *r,*a,*b; + */ +bn_mul_comba4: + save %sp,FRAME_SIZE,%sp + ld ap(0),a_0 + ld bp(0),b_0 + umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3); + ld bp(1),b_1 + rd %y,c_2 + st c_1,rp(0) !r[0]=c1; + + umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1); + ld ap(1),a_1 + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc %g0,t_2,c_3 + addx %g0,%g0,c_1 + ld ap(2),a_2 + umul a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + st c_2,rp(1) !r[1]=c2; + + umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + ld bp(2),b_2 + umul a_1,b_1,t_1 !=!mul_add_c(a[1],b[1],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + ld bp(3),b_3 + umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + st c_3,rp(2) !r[2]=c3; + + umul a_0,b_3,t_1 !=!mul_add_c(a[0],b[3],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 != + umul a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + ld ap(3),a_3 + umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(3) !r[3]=c1; + + umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + umul a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + umul a_1,b_3,t_1 !=!mul_add_c(a[1],b[3],c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + st c_2,rp(4) !r[4]=c2; + + umul a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + st c_3,rp(5) !r[5]=c3; + addx c_2,%g0,c_2 != + + umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + st c_1,rp(6) !r[6]=c1; + st c_2,rp(7) !r[7]=c2; + + ret + restore %g0,%g0,%o0 + +.type bn_mul_comba4,#function +.size bn_mul_comba4,(.-bn_mul_comba4) + +.align 32 + +.global bn_sqr_comba8 +bn_sqr_comba8: + save %sp,FRAME_SIZE,%sp + ld ap(0),a_0 + ld ap(1),a_1 + umul a_0,a_0,c_1 !=!sqr_add_c(a,0,c1,c2,c3); + rd %y,c_2 + st c_1,rp(0) !r[0]=c1; + + ld ap(2),a_2 + umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc %g0,t_2,c_3 + addx %g0,%g0,c_1 != + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 + st c_2,rp(1) !r[1]=c2; + addx c_1,%g0,c_1 != + + umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx %g0,%g0,c_2 + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + ld ap(3),a_3 + umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + st c_3,rp(2) !r[2]=c3; + + umul a_0,a_3,t_1 !=!sqr_add_c2(a,3,0,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 != + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + ld ap(4),a_4 + addx c_3,%g0,c_3 != + umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(3) !r[3]=c1; + + umul a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + ld ap(5),a_5 + umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + st c_2,rp(4) !r[4]=c2; + addx c_1,%g0,c_1 != + + umul a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx %g0,%g0,c_2 + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + umul a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + ld ap(6),a_6 + umul a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + st c_3,rp(5) !r[5]=c3; + + umul a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx %g0,%g0,c_3 + addcc c_1,t_1,c_1 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + umul a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3); + addcc c_1,t_1,c_1 != + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 != + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 + ld ap(7),a_7 + umul a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(6) !r[6]=c1; + + umul a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + st c_2,rp(7) !r[7]=c2; + + umul a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + addcc c_3,t_1,c_3 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + addcc c_3,t_1,c_3 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + addcc c_3,t_1,c_3 != + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + st c_3,rp(8) !r[8]=c3; + addx c_2,%g0,c_2 != + + umul a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx %g0,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(9) !r[9]=c1; + + umul a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + st c_2,rp(10) !r[10]=c2; + + umul a_4,a_7,t_1 !=!sqr_add_c2(a,7,4,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 != + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 + umul a_5,a_6,t_1 !=!sqr_add_c2(a,6,5,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx c_2,%g0,c_2 != + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + st c_3,rp(11) !r[11]=c3; + addx c_2,%g0,c_2 != + + umul a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx %g0,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + st c_1,rp(12) !r[12]=c1; + + umul a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1); + addcc c_2,t_1,c_2 != + rd %y,t_2 + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + addcc c_2,t_1,c_2 != + addxcc c_3,t_2,c_3 + st c_2,rp(13) !r[13]=c2; + addx c_1,%g0,c_1 != + + umul a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 != + st c_3,rp(14) !r[14]=c3; + st c_1,rp(15) !r[15]=c1; + + ret + restore %g0,%g0,%o0 + +.type bn_sqr_comba8,#function +.size bn_sqr_comba8,(.-bn_sqr_comba8) + +.align 32 + +.global bn_sqr_comba4 +/* + * void bn_sqr_comba4(r,a) + * BN_ULONG *r,*a; + */ +bn_sqr_comba4: + save %sp,FRAME_SIZE,%sp + ld ap(0),a_0 + umul a_0,a_0,c_1 !sqr_add_c(a,0,c1,c2,c3); + ld ap(1),a_1 != + rd %y,c_2 + st c_1,rp(0) !r[0]=c1; + + ld ap(2),a_2 + umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 + addxcc %g0,t_2,c_3 + addx %g0,%g0,c_1 != + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 != + st c_2,rp(1) !r[1]=c2; + + umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 != + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 != + addx c_2,%g0,c_2 + ld ap(3),a_3 + umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2); + addcc c_3,t_1,c_3 != + rd %y,t_2 + addxcc c_1,t_2,c_1 + st c_3,rp(2) !r[2]=c3; + addx c_2,%g0,c_2 != + + umul a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx %g0,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + addx c_3,%g0,c_3 + addcc c_1,t_1,c_1 + addxcc c_2,t_2,c_2 + addx c_3,%g0,c_3 != + st c_1,rp(3) !r[3]=c1; + + umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx %g0,%g0,c_1 + addcc c_2,t_1,c_2 + addxcc c_3,t_2,c_3 != + addx c_1,%g0,c_1 + umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1); + addcc c_2,t_1,c_2 + rd %y,t_2 != + addxcc c_3,t_2,c_3 + addx c_1,%g0,c_1 + st c_2,rp(4) !r[4]=c2; + + umul a_2,a_3,t_1 !=!sqr_add_c2(a,3,2,c3,c1,c2); + addcc c_3,t_1,c_3 + rd %y,t_2 + addxcc c_1,t_2,c_1 + addx %g0,%g0,c_2 != + addcc c_3,t_1,c_3 + addxcc c_1,t_2,c_1 + st c_3,rp(5) !r[5]=c3; + addx c_2,%g0,c_2 != + + umul a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3); + addcc c_1,t_1,c_1 + rd %y,t_2 + addxcc c_2,t_2,c_2 != + st c_1,rp(6) !r[6]=c1; + st c_2,rp(7) !r[7]=c2; + + ret + restore %g0,%g0,%o0 + +.type bn_sqr_comba4,#function +.size bn_sqr_comba4,(.-bn_sqr_comba4) + +.align 32 diff --git a/openssl/crypto/bn/asm/sparcv8plus.S b/openssl/crypto/bn/asm/sparcv8plus.S new file mode 100644 index 00000000..63de1860 --- /dev/null +++ b/openssl/crypto/bn/asm/sparcv8plus.S @@ -0,0 +1,1558 @@ +.ident "sparcv8plus.s, Version 1.4" +.ident "SPARC v9 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>" + +/* + * ==================================================================== + * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL + * project. + * + * Rights for redistribution and usage in source and binary forms are + * granted according to the OpenSSL license. Warranty of any kind is + * disclaimed. + * ==================================================================== + */ + +/* + * This is my modest contributon to OpenSSL project (see + * http://www.openssl.org/ for more information about it) and is + * a drop-in UltraSPARC ISA replacement for crypto/bn/bn_asm.c + * module. For updates see http://fy.chalmers.se/~appro/hpe/. + * + * Questions-n-answers. + * + * Q. How to compile? + * A. With SC4.x/SC5.x: + * + * cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o + * + * and with gcc: + * + * gcc -mcpu=ultrasparc -c bn_asm.sparc.v8plus.S -o bn_asm.o + * + * or if above fails (it does if you have gas installed): + * + * gcc -E bn_asm.sparc.v8plus.S | as -xarch=v8plus /dev/fd/0 -o bn_asm.o + * + * Quick-n-dirty way to fuse the module into the library. + * Provided that the library is already configured and built + * (in 0.9.2 case with no-asm option): + * + * # cd crypto/bn + * # cp /some/place/bn_asm.sparc.v8plus.S . + * # cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o + * # make + * # cd ../.. + * # make; make test + * + * Quick-n-dirty way to get rid of it: + * + * # cd crypto/bn + * # touch bn_asm.c + * # make + * # cd ../.. + * # make; make test + * + * Q. V8plus achitecture? What kind of beast is that? + * A. Well, it's rather a programming model than an architecture... + * It's actually v9-compliant, i.e. *any* UltraSPARC, CPU under + * special conditions, namely when kernel doesn't preserve upper + * 32 bits of otherwise 64-bit registers during a context switch. + * + * Q. Why just UltraSPARC? What about SuperSPARC? + * A. Original release did target UltraSPARC only. Now SuperSPARC + * version is provided along. Both version share bn_*comba[48] + * implementations (see comment later in code for explanation). + * But what's so special about this UltraSPARC implementation? + * Why didn't I let compiler do the job? Trouble is that most of + * available compilers (well, SC5.0 is the only exception) don't + * attempt to take advantage of UltraSPARC's 64-bitness under + * 32-bit kernels even though it's perfectly possible (see next + * question). + * + * Q. 64-bit registers under 32-bit kernels? Didn't you just say it + * doesn't work? + * A. You can't adress *all* registers as 64-bit wide:-( The catch is + * that you actually may rely upon %o0-%o5 and %g1-%g4 being fully + * preserved if you're in a leaf function, i.e. such never calling + * any other functions. All functions in this module are leaf and + * 10 registers is a handful. And as a matter of fact none-"comba" + * routines don't require even that much and I could even afford to + * not allocate own stack frame for 'em:-) + * + * Q. What about 64-bit kernels? + * A. What about 'em? Just kidding:-) Pure 64-bit version is currently + * under evaluation and development... + * + * Q. What about shared libraries? + * A. What about 'em? Kidding again:-) Code does *not* contain any + * code position dependencies and it's safe to include it into + * shared library as is. + * + * Q. How much faster does it go? + * A. Do you have a good benchmark? In either case below is what I + * experience with crypto/bn/expspeed.c test program: + * + * v8plus module on U10/300MHz against bn_asm.c compiled with: + * + * cc-5.0 -xarch=v8plus -xO5 -xdepend +7-12% + * cc-4.2 -xarch=v8plus -xO5 -xdepend +25-35% + * egcs-1.1.2 -mcpu=ultrasparc -O3 +35-45% + * + * v8 module on SS10/60MHz against bn_asm.c compiled with: + * + * cc-5.0 -xarch=v8 -xO5 -xdepend +7-10% + * cc-4.2 -xarch=v8 -xO5 -xdepend +10% + * egcs-1.1.2 -mv8 -O3 +35-45% + * + * As you can see it's damn hard to beat the new Sun C compiler + * and it's in first place GNU C users who will appreciate this + * assembler implementation:-) + */ + +/* + * Revision history. + * + * 1.0 - initial release; + * 1.1 - new loop unrolling model(*); + * - some more fine tuning; + * 1.2 - made gas friendly; + * - updates to documentation concerning v9; + * - new performance comparison matrix; + * 1.3 - fixed problem with /usr/ccs/lib/cpp; + * 1.4 - native V9 bn_*_comba[48] implementation (15% more efficient) + * resulting in slight overall performance kick; + * - some retunes; + * - support for GNU as added; + * + * (*) Originally unrolled loop looked like this: + * for (;;) { + * op(p+0); if (--n==0) break; + * op(p+1); if (--n==0) break; + * op(p+2); if (--n==0) break; + * op(p+3); if (--n==0) break; + * p+=4; + * } + * I unroll according to following: + * while (n&~3) { + * op(p+0); op(p+1); op(p+2); op(p+3); + * p+=4; n=-4; + * } + * if (n) { + * op(p+0); if (--n==0) return; + * op(p+2); if (--n==0) return; + * op(p+3); return; + * } + */ + +#if defined(__SUNPRO_C) && defined(__sparcv9) + /* They've said -xarch=v9 at command line */ + .register %g2,#scratch + .register %g3,#scratch +# define FRAME_SIZE -192 +#elif defined(__GNUC__) && defined(__arch64__) + /* They've said -m64 at command line */ + .register %g2,#scratch + .register %g3,#scratch +# define FRAME_SIZE -192 +#else +# define FRAME_SIZE -96 +#endif +/* + * GNU assembler can't stand stuw:-( + */ +#define stuw st + +.section ".text",#alloc,#execinstr +.file "bn_asm.sparc.v8plus.S" + +.align 32 + +.global bn_mul_add_words +/* + * BN_ULONG bn_mul_add_words(rp,ap,num,w) + * BN_ULONG *rp,*ap; + * int num; + * BN_ULONG w; + */ +bn_mul_add_words: + sra %o2,%g0,%o2 ! signx %o2 + brgz,a %o2,.L_bn_mul_add_words_proceed + lduw [%o1],%g2 + retl + clr %o0 + nop + nop + nop + +.L_bn_mul_add_words_proceed: + srl %o3,%g0,%o3 ! clruw %o3 + andcc %o2,-4,%g0 + bz,pn %icc,.L_bn_mul_add_words_tail + clr %o5 + +.L_bn_mul_add_words_loop: ! wow! 32 aligned! + lduw [%o0],%g1 + lduw [%o1+4],%g3 + mulx %o3,%g2,%g2 + add %g1,%o5,%o4 + nop + add %o4,%g2,%o4 + stuw %o4,[%o0] + srlx %o4,32,%o5 + + lduw [%o0+4],%g1 + lduw [%o1+8],%g2 + mulx %o3,%g3,%g3 + add %g1,%o5,%o4 + dec 4,%o2 + add %o4,%g3,%o4 + stuw %o4,[%o0+4] + srlx %o4,32,%o5 + + lduw [%o0+8],%g1 + lduw [%o1+12],%g3 + mulx %o3,%g2,%g2 + add %g1,%o5,%o4 + inc 16,%o1 + add %o4,%g2,%o4 + stuw %o4,[%o0+8] + srlx %o4,32,%o5 + + lduw [%o0+12],%g1 + mulx %o3,%g3,%g3 + add %g1,%o5,%o4 + inc 16,%o0 + add %o4,%g3,%o4 + andcc %o2,-4,%g0 + stuw %o4,[%o0-4] + srlx %o4,32,%o5 + bnz,a,pt %icc,.L_bn_mul_add_words_loop + lduw [%o1],%g2 + + brnz,a,pn %o2,.L_bn_mul_add_words_tail + lduw [%o1],%g2 +.L_bn_mul_add_words_return: + retl + mov %o5,%o0 + +.L_bn_mul_add_words_tail: + lduw [%o0],%g1 + mulx %o3,%g2,%g2 + add %g1,%o5,%o4 + dec %o2 + add %o4,%g2,%o4 + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_mul_add_words_return + stuw %o4,[%o0] + + lduw [%o1+4],%g2 + lduw [%o0+4],%g1 + mulx %o3,%g2,%g2 + add %g1,%o5,%o4 + dec %o2 + add %o4,%g2,%o4 + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_mul_add_words_return + stuw %o4,[%o0+4] + + lduw [%o1+8],%g2 + lduw [%o0+8],%g1 + mulx %o3,%g2,%g2 + add %g1,%o5,%o4 + add %o4,%g2,%o4 + stuw %o4,[%o0+8] + retl + srlx %o4,32,%o0 + +.type bn_mul_add_words,#function +.size bn_mul_add_words,(.-bn_mul_add_words) + +.align 32 + +.global bn_mul_words +/* + * BN_ULONG bn_mul_words(rp,ap,num,w) + * BN_ULONG *rp,*ap; + * int num; + * BN_ULONG w; + */ +bn_mul_words: + sra %o2,%g0,%o2 ! signx %o2 + brgz,a %o2,.L_bn_mul_words_proceeed + lduw [%o1],%g2 + retl + clr %o0 + nop + nop + nop + +.L_bn_mul_words_proceeed: + srl %o3,%g0,%o3 ! clruw %o3 + andcc %o2,-4,%g0 + bz,pn %icc,.L_bn_mul_words_tail + clr %o5 + +.L_bn_mul_words_loop: ! wow! 32 aligned! + lduw [%o1+4],%g3 + mulx %o3,%g2,%g2 + add %g2,%o5,%o4 + nop + stuw %o4,[%o0] + srlx %o4,32,%o5 + + lduw [%o1+8],%g2 + mulx %o3,%g3,%g3 + add %g3,%o5,%o4 + dec 4,%o2 + stuw %o4,[%o0+4] + srlx %o4,32,%o5 + + lduw [%o1+12],%g3 + mulx %o3,%g2,%g2 + add %g2,%o5,%o4 + inc 16,%o1 + stuw %o4,[%o0+8] + srlx %o4,32,%o5 + + mulx %o3,%g3,%g3 + add %g3,%o5,%o4 + inc 16,%o0 + stuw %o4,[%o0-4] + srlx %o4,32,%o5 + andcc %o2,-4,%g0 + bnz,a,pt %icc,.L_bn_mul_words_loop + lduw [%o1],%g2 + nop + nop + + brnz,a,pn %o2,.L_bn_mul_words_tail + lduw [%o1],%g2 +.L_bn_mul_words_return: + retl + mov %o5,%o0 + +.L_bn_mul_words_tail: + mulx %o3,%g2,%g2 + add %g2,%o5,%o4 + dec %o2 + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_mul_words_return + stuw %o4,[%o0] + + lduw [%o1+4],%g2 + mulx %o3,%g2,%g2 + add %g2,%o5,%o4 + dec %o2 + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_mul_words_return + stuw %o4,[%o0+4] + + lduw [%o1+8],%g2 + mulx %o3,%g2,%g2 + add %g2,%o5,%o4 + stuw %o4,[%o0+8] + retl + srlx %o4,32,%o0 + +.type bn_mul_words,#function +.size bn_mul_words,(.-bn_mul_words) + +.align 32 +.global bn_sqr_words +/* + * void bn_sqr_words(r,a,n) + * BN_ULONG *r,*a; + * int n; + */ +bn_sqr_words: + sra %o2,%g0,%o2 ! signx %o2 + brgz,a %o2,.L_bn_sqr_words_proceeed + lduw [%o1],%g2 + retl + clr %o0 + nop + nop + nop + +.L_bn_sqr_words_proceeed: + andcc %o2,-4,%g0 + nop + bz,pn %icc,.L_bn_sqr_words_tail + nop + +.L_bn_sqr_words_loop: ! wow! 32 aligned! + lduw [%o1+4],%g3 + mulx %g2,%g2,%o4 + stuw %o4,[%o0] + srlx %o4,32,%o5 + stuw %o5,[%o0+4] + nop + + lduw [%o1+8],%g2 + mulx %g3,%g3,%o4 + dec 4,%o2 + stuw %o4,[%o0+8] + srlx %o4,32,%o5 + stuw %o5,[%o0+12] + + lduw [%o1+12],%g3 + mulx %g2,%g2,%o4 + srlx %o4,32,%o5 + stuw %o4,[%o0+16] + inc 16,%o1 + stuw %o5,[%o0+20] + + mulx %g3,%g3,%o4 + inc 32,%o0 + stuw %o4,[%o0-8] + srlx %o4,32,%o5 + andcc %o2,-4,%g2 + stuw %o5,[%o0-4] + bnz,a,pt %icc,.L_bn_sqr_words_loop + lduw [%o1],%g2 + nop + + brnz,a,pn %o2,.L_bn_sqr_words_tail + lduw [%o1],%g2 +.L_bn_sqr_words_return: + retl + clr %o0 + +.L_bn_sqr_words_tail: + mulx %g2,%g2,%o4 + dec %o2 + stuw %o4,[%o0] + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_sqr_words_return + stuw %o5,[%o0+4] + + lduw [%o1+4],%g2 + mulx %g2,%g2,%o4 + dec %o2 + stuw %o4,[%o0+8] + srlx %o4,32,%o5 + brz,pt %o2,.L_bn_sqr_words_return + stuw %o5,[%o0+12] + + lduw [%o1+8],%g2 + mulx %g2,%g2,%o4 + srlx %o4,32,%o5 + stuw %o4,[%o0+16] + stuw %o5,[%o0+20] + retl + clr %o0 + +.type bn_sqr_words,#function +.size bn_sqr_words,(.-bn_sqr_words) + +.align 32 +.global bn_div_words +/* + * BN_ULONG bn_div_words(h,l,d) + * BN_ULONG h,l,d; + */ +bn_div_words: + sllx %o0,32,%o0 + or %o0,%o1,%o0 + udivx %o0,%o2,%o0 + retl + srl %o0,%g0,%o0 ! clruw %o0 + +.type bn_div_words,#function +.size bn_div_words,(.-bn_div_words) + +.align 32 + +.global bn_add_words +/* + * BN_ULONG bn_add_words(rp,ap,bp,n) + * BN_ULONG *rp,*ap,*bp; + * int n; + */ +bn_add_words: + sra %o3,%g0,%o3 ! signx %o3 + brgz,a %o3,.L_bn_add_words_proceed + lduw [%o1],%o4 + retl + clr %o0 + +.L_bn_add_words_proceed: + andcc %o3,-4,%g0 + bz,pn %icc,.L_bn_add_words_tail + addcc %g0,0,%g0 ! clear carry flag + +.L_bn_add_words_loop: ! wow! 32 aligned! + dec 4,%o3 + lduw [%o2],%o5 + lduw [%o1+4],%g1 + lduw [%o2+4],%g2 + lduw [%o1+8],%g3 + lduw [%o2+8],%g4 + addccc %o5,%o4,%o5 + stuw %o5,[%o0] + + lduw [%o1+12],%o4 + lduw [%o2+12],%o5 + inc 16,%o1 + addccc %g1,%g2,%g1 + stuw %g1,[%o0+4] + + inc 16,%o2 + addccc %g3,%g4,%g3 + stuw %g3,[%o0+8] + + inc 16,%o0 + addccc %o5,%o4,%o5 + stuw %o5,[%o0-4] + and %o3,-4,%g1 + brnz,a,pt %g1,.L_bn_add_words_loop + lduw [%o1],%o4 + + brnz,a,pn %o3,.L_bn_add_words_tail + lduw [%o1],%o4 +.L_bn_add_words_return: + clr %o0 + retl + movcs %icc,1,%o0 + nop + +.L_bn_add_words_tail: + lduw [%o2],%o5 + dec %o3 + addccc %o5,%o4,%o5 + brz,pt %o3,.L_bn_add_words_return + stuw %o5,[%o0] + + lduw [%o1+4],%o4 + lduw [%o2+4],%o5 + dec %o3 + addccc %o5,%o4,%o5 + brz,pt %o3,.L_bn_add_words_return + stuw %o5,[%o0+4] + + lduw [%o1+8],%o4 + lduw [%o2+8],%o5 + addccc %o5,%o4,%o5 + stuw %o5,[%o0+8] + clr %o0 + retl + movcs %icc,1,%o0 + +.type bn_add_words,#function +.size bn_add_words,(.-bn_add_words) + +.global bn_sub_words +/* + * BN_ULONG bn_sub_words(rp,ap,bp,n) + * BN_ULONG *rp,*ap,*bp; + * int n; + */ +bn_sub_words: + sra %o3,%g0,%o3 ! signx %o3 + brgz,a %o3,.L_bn_sub_words_proceed + lduw [%o1],%o4 + retl + clr %o0 + +.L_bn_sub_words_proceed: + andcc %o3,-4,%g0 + bz,pn %icc,.L_bn_sub_words_tail + addcc %g0,0,%g0 ! clear carry flag + +.L_bn_sub_words_loop: ! wow! 32 aligned! + dec 4,%o3 + lduw [%o2],%o5 + lduw [%o1+4],%g1 + lduw [%o2+4],%g2 + lduw [%o1+8],%g3 + lduw [%o2+8],%g4 + subccc %o4,%o5,%o5 + stuw %o5,[%o0] + + lduw [%o1+12],%o4 + lduw [%o2+12],%o5 + inc 16,%o1 + subccc %g1,%g2,%g2 + stuw %g2,[%o0+4] + + inc 16,%o2 + subccc %g3,%g4,%g4 + stuw %g4,[%o0+8] + + inc 16,%o0 + subccc %o4,%o5,%o5 + stuw %o5,[%o0-4] + and %o3,-4,%g1 + brnz,a,pt %g1,.L_bn_sub_words_loop + lduw [%o1],%o4 + + brnz,a,pn %o3,.L_bn_sub_words_tail + lduw [%o1],%o4 +.L_bn_sub_words_return: + clr %o0 + retl + movcs %icc,1,%o0 + nop + +.L_bn_sub_words_tail: ! wow! 32 aligned! + lduw [%o2],%o5 + dec %o3 + subccc %o4,%o5,%o5 + brz,pt %o3,.L_bn_sub_words_return + stuw %o5,[%o0] + + lduw [%o1+4],%o4 + lduw [%o2+4],%o5 + dec %o3 + subccc %o4,%o5,%o5 + brz,pt %o3,.L_bn_sub_words_return + stuw %o5,[%o0+4] + + lduw [%o1+8],%o4 + lduw [%o2+8],%o5 + subccc %o4,%o5,%o5 + stuw %o5,[%o0+8] + clr %o0 + retl + movcs %icc,1,%o0 + +.type bn_sub_words,#function +.size bn_sub_words,(.-bn_sub_words) + +/* + * Code below depends on the fact that upper parts of the %l0-%l7 + * and %i0-%i7 are zeroed by kernel after context switch. In + * previous versions this comment stated that "the trouble is that + * it's not feasible to implement the mumbo-jumbo in less V9 + * instructions:-(" which apparently isn't true thanks to + * 'bcs,a %xcc,.+8; inc %rd' pair. But the performance improvement + * results not from the shorter code, but from elimination of + * multicycle none-pairable 'rd %y,%rd' instructions. + * + * Andy. + */ + +/* + * Here is register usage map for *all* routines below. + */ +#define t_1 %o0 +#define t_2 %o1 +#define c_12 %o2 +#define c_3 %o3 + +#define ap(I) [%i1+4*I] +#define bp(I) [%i2+4*I] +#define rp(I) [%i0+4*I] + +#define a_0 %l0 +#define a_1 %l1 +#define a_2 %l2 +#define a_3 %l3 +#define a_4 %l4 +#define a_5 %l5 +#define a_6 %l6 +#define a_7 %l7 + +#define b_0 %i3 +#define b_1 %i4 +#define b_2 %i5 +#define b_3 %o4 +#define b_4 %o5 +#define b_5 %o7 +#define b_6 %g1 +#define b_7 %g4 + +.align 32 +.global bn_mul_comba8 +/* + * void bn_mul_comba8(r,a,b) + * BN_ULONG *r,*a,*b; + */ +bn_mul_comba8: + save %sp,FRAME_SIZE,%sp + mov 1,t_2 + lduw ap(0),a_0 + sllx t_2,32,t_2 + lduw bp(0),b_0 != + lduw bp(1),b_1 + mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3); + srlx t_1,32,c_12 + stuw t_1,rp(0) !=!r[0]=c1; + + lduw ap(1),a_1 + mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(2),a_2 + mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 != + stuw t_1,rp(1) !r[1]=c2; + or c_12,c_3,c_12 + + mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2); + addcc c_12,t_1,c_12 != + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw bp(2),b_2 != + mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + lduw bp(3),b_3 + mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(2) !r[2]=c3; + or c_12,c_3,c_12 != + + mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + lduw ap(3),a_3 + mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3); + addcc c_12,t_1,c_12 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(4),a_4 + mulx a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);!= + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 != + stuw t_1,rp(3) !r[3]=c1; + or c_12,c_3,c_12 + + mulx a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1); + addcc c_12,t_1,c_12 != + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,b_1,t_1 !=!mul_add_c(a[3],b[1],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw bp(4),b_4 != + mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + lduw bp(5),b_5 + mulx a_0,b_4,t_1 !mul_add_c(a[0],b[4],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(4) !r[4]=c2; + or c_12,c_3,c_12 != + + mulx a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + lduw ap(5),a_5 + mulx a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2); + addcc c_12,t_1,c_12 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(6),a_6 + mulx a_5,b_0,t_1 !=!mul_add_c(a[5],b[0],c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 != + stuw t_1,rp(5) !r[5]=c3; + or c_12,c_3,c_12 + + mulx a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3); + addcc c_12,t_1,c_12 != + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_4,b_2,t_1 !=!mul_add_c(a[4],b[2],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,b_3,t_1 !=!mul_add_c(a[3],b[3],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_2,b_4,t_1 !=!mul_add_c(a[2],b[4],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw bp(6),b_6 != + mulx a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + lduw bp(7),b_7 + mulx a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(6) !r[6]=c1; + or c_12,c_3,c_12 != + + mulx a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_3,b_4,t_1 !mul_add_c(a[3],b[4],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + lduw ap(7),a_7 + mulx a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_7,b_0,t_1 !=!mul_add_c(a[7],b[0],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 != + stuw t_1,rp(7) !r[7]=c2; + or c_12,c_3,c_12 + + mulx a_7,b_1,t_1 !=!mul_add_c(a[7],b[1],c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_6,b_2,t_1 !mul_add_c(a[6],b[2],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_2,b_6,t_1 !mul_add_c(a[2],b[6],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + srlx t_1,32,c_12 + stuw t_1,rp(8) !r[8]=c3; + or c_12,c_3,c_12 + + mulx a_2,b_7,t_1 !=!mul_add_c(a[2],b[7],c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + mulx a_3,b_6,t_1 !mul_add_c(a[3],b[6],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_7,b_2,t_1 !mul_add_c(a[7],b[2],c1,c2,c3); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(9) !r[9]=c1; + or c_12,c_3,c_12 != + + mulx a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_5,b_5,t_1 !mul_add_c(a[5],b[5],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(10) !r[10]=c2; + or c_12,c_3,c_12 != + + mulx a_4,b_7,t_1 !mul_add_c(a[4],b[7],c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(11) !r[11]=c3; + or c_12,c_3,c_12 != + + mulx a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(12) !r[12]=c1; + or c_12,c_3,c_12 != + + mulx a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + st t_1,rp(13) !r[13]=c2; + or c_12,c_3,c_12 != + + mulx a_7,b_7,t_1 !mul_add_c(a[7],b[7],c3,c1,c2); + addcc c_12,t_1,t_1 + srlx t_1,32,c_12 != + stuw t_1,rp(14) !r[14]=c3; + stuw c_12,rp(15) !r[15]=c1; + + ret + restore %g0,%g0,%o0 != + +.type bn_mul_comba8,#function +.size bn_mul_comba8,(.-bn_mul_comba8) + +.align 32 + +.global bn_mul_comba4 +/* + * void bn_mul_comba4(r,a,b) + * BN_ULONG *r,*a,*b; + */ +bn_mul_comba4: + save %sp,FRAME_SIZE,%sp + lduw ap(0),a_0 + mov 1,t_2 + lduw bp(0),b_0 + sllx t_2,32,t_2 != + lduw bp(1),b_1 + mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3); + srlx t_1,32,c_12 + stuw t_1,rp(0) !=!r[0]=c1; + + lduw ap(1),a_1 + mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(2),a_2 + mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 != + stuw t_1,rp(1) !r[1]=c2; + or c_12,c_3,c_12 + + mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2); + addcc c_12,t_1,c_12 != + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw bp(2),b_2 != + mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 != + lduw bp(3),b_3 + mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(2) !r[2]=c3; + or c_12,c_3,c_12 != + + mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + mulx a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 != + add c_3,t_2,c_3 + lduw ap(3),a_3 + mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3); + addcc c_12,t_1,c_12 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!= + addcc c_12,t_1,t_1 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(3) !=!r[3]=c1; + or c_12,c_3,c_12 + + mulx a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1); + addcc c_12,t_1,c_12 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1); + addcc c_12,t_1,t_1 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(4) !=!r[4]=c2; + or c_12,c_3,c_12 + + mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2); + addcc c_12,t_1,t_1 != + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(5) !=!r[5]=c3; + or c_12,c_3,c_12 + + mulx a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3); + addcc c_12,t_1,t_1 + srlx t_1,32,c_12 != + stuw t_1,rp(6) !r[6]=c1; + stuw c_12,rp(7) !r[7]=c2; + + ret + restore %g0,%g0,%o0 + +.type bn_mul_comba4,#function +.size bn_mul_comba4,(.-bn_mul_comba4) + +.align 32 + +.global bn_sqr_comba8 +bn_sqr_comba8: + save %sp,FRAME_SIZE,%sp + mov 1,t_2 + lduw ap(0),a_0 + sllx t_2,32,t_2 + lduw ap(1),a_1 + mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3); + srlx t_1,32,c_12 + stuw t_1,rp(0) !r[0]=c1; + + lduw ap(2),a_2 + mulx a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(1) !r[1]=c2; + or c_12,c_3,c_12 + + mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(3),a_3 + mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(2) !r[2]=c3; + or c_12,c_3,c_12 + + mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(4),a_4 + mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + st t_1,rp(3) !r[3]=c1; + or c_12,c_3,c_12 + + mulx a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(5),a_5 + mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(4) !r[4]=c2; + or c_12,c_3,c_12 + + mulx a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(6),a_6 + mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(5) !r[5]=c3; + or c_12,c_3,c_12 + + mulx a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(7),a_7 + mulx a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(6) !r[6]=c1; + or c_12,c_3,c_12 + + mulx a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(7) !r[7]=c2; + or c_12,c_3,c_12 + + mulx a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(8) !r[8]=c3; + or c_12,c_3,c_12 + + mulx a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(9) !r[9]=c1; + or c_12,c_3,c_12 + + mulx a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(10) !r[10]=c2; + or c_12,c_3,c_12 + + mulx a_4,a_7,t_1 !sqr_add_c2(a,7,4,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_5,a_6,t_1 !sqr_add_c2(a,6,5,c3,c1,c2); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(11) !r[11]=c3; + or c_12,c_3,c_12 + + mulx a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(12) !r[12]=c1; + or c_12,c_3,c_12 + + mulx a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(13) !r[13]=c2; + or c_12,c_3,c_12 + + mulx a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2); + addcc c_12,t_1,t_1 + srlx t_1,32,c_12 + stuw t_1,rp(14) !r[14]=c3; + stuw c_12,rp(15) !r[15]=c1; + + ret + restore %g0,%g0,%o0 + +.type bn_sqr_comba8,#function +.size bn_sqr_comba8,(.-bn_sqr_comba8) + +.align 32 + +.global bn_sqr_comba4 +/* + * void bn_sqr_comba4(r,a) + * BN_ULONG *r,*a; + */ +bn_sqr_comba4: + save %sp,FRAME_SIZE,%sp + mov 1,t_2 + lduw ap(0),a_0 + sllx t_2,32,t_2 + lduw ap(1),a_1 + mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3); + srlx t_1,32,c_12 + stuw t_1,rp(0) !r[0]=c1; + + lduw ap(2),a_2 + mulx a_0,a_1,t_1 !sqr_add_c2(a,1,0,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(1) !r[1]=c2; + or c_12,c_3,c_12 + + mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + lduw ap(3),a_3 + mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(2) !r[2]=c3; + or c_12,c_3,c_12 + + mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3); + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(3) !r[3]=c1; + or c_12,c_3,c_12 + + mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,c_12 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1); + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(4) !r[4]=c2; + or c_12,c_3,c_12 + + mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2); + addcc c_12,t_1,c_12 + clr c_3 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + addcc c_12,t_1,t_1 + bcs,a %xcc,.+8 + add c_3,t_2,c_3 + srlx t_1,32,c_12 + stuw t_1,rp(5) !r[5]=c3; + or c_12,c_3,c_12 + + mulx a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3); + addcc c_12,t_1,t_1 + srlx t_1,32,c_12 + stuw t_1,rp(6) !r[6]=c1; + stuw c_12,rp(7) !r[7]=c2; + + ret + restore %g0,%g0,%o0 + +.type bn_sqr_comba4,#function +.size bn_sqr_comba4,(.-bn_sqr_comba4) + +.align 32 diff --git a/openssl/crypto/bn/asm/sparcv9-mont.pl b/openssl/crypto/bn/asm/sparcv9-mont.pl new file mode 100644 index 00000000..b8fb1e8a --- /dev/null +++ b/openssl/crypto/bn/asm/sparcv9-mont.pl @@ -0,0 +1,606 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# December 2005 +# +# Pure SPARCv9/8+ and IALU-only bn_mul_mont implementation. The reasons +# for undertaken effort are multiple. First of all, UltraSPARC is not +# the whole SPARCv9 universe and other VIS-free implementations deserve +# optimized code as much. Secondly, newly introduced UltraSPARC T1, +# a.k.a. Niagara, has shared FPU and concurrent FPU-intensive pathes, +# such as sparcv9a-mont, will simply sink it. Yes, T1 is equipped with +# several integrated RSA/DSA accelerator circuits accessible through +# kernel driver [only(*)], but having decent user-land software +# implementation is important too. Finally, reasons like desire to +# experiment with dedicated squaring procedure. Yes, this module +# implements one, because it was easiest to draft it in SPARCv9 +# instructions... + +# (*) Engine accessing the driver in question is on my TODO list. +# For reference, acceleator is estimated to give 6 to 10 times +# improvement on single-threaded RSA sign. It should be noted +# that 6-10x improvement coefficient does not actually mean +# something extraordinary in terms of absolute [single-threaded] +# performance, as SPARCv9 instruction set is by all means least +# suitable for high performance crypto among other 64 bit +# platforms. 6-10x factor simply places T1 in same performance +# domain as say AMD64 and IA-64. Improvement of RSA verify don't +# appear impressive at all, but it's the sign operation which is +# far more critical/interesting. + +# You might notice that inner loops are modulo-scheduled:-) This has +# essentially negligible impact on UltraSPARC performance, it's +# Fujitsu SPARC64 V users who should notice and hopefully appreciate +# the advantage... Currently this module surpasses sparcv9a-mont.pl +# by ~20% on UltraSPARC-III and later cores, but recall that sparcv9a +# module still have hidden potential [see TODO list there], which is +# estimated to be larger than 20%... + +# int bn_mul_mont( +$rp="%i0"; # BN_ULONG *rp, +$ap="%i1"; # const BN_ULONG *ap, +$bp="%i2"; # const BN_ULONG *bp, +$np="%i3"; # const BN_ULONG *np, +$n0="%i4"; # const BN_ULONG *n0, +$num="%i5"; # int num); + +$bits=32; +for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); } +if ($bits==64) { $bias=2047; $frame=192; } +else { $bias=0; $frame=128; } + +$car0="%o0"; +$car1="%o1"; +$car2="%o2"; # 1 bit +$acc0="%o3"; +$acc1="%o4"; +$mask="%g1"; # 32 bits, what a waste... +$tmp0="%g4"; +$tmp1="%g5"; + +$i="%l0"; +$j="%l1"; +$mul0="%l2"; +$mul1="%l3"; +$tp="%l4"; +$apj="%l5"; +$npj="%l6"; +$tpj="%l7"; + +$fname="bn_mul_mont_int"; + +$code=<<___; +.section ".text",#alloc,#execinstr + +.global $fname +.align 32 +$fname: + cmp %o5,4 ! 128 bits minimum + bge,pt %icc,.Lenter + sethi %hi(0xffffffff),$mask + retl + clr %o0 +.align 32 +.Lenter: + save %sp,-$frame,%sp + sll $num,2,$num ! num*=4 + or $mask,%lo(0xffffffff),$mask + ld [$n0],$n0 + cmp $ap,$bp + and $num,$mask,$num + ld [$bp],$mul0 ! bp[0] + nop + + add %sp,$bias,%o7 ! real top of stack + ld [$ap],$car0 ! ap[0] ! redundant in squaring context + sub %o7,$num,%o7 + ld [$ap+4],$apj ! ap[1] + and %o7,-1024,%o7 + ld [$np],$car1 ! np[0] + sub %o7,$bias,%sp ! alloca + ld [$np+4],$npj ! np[1] + be,pt `$bits==32?"%icc":"%xcc"`,.Lbn_sqr_mont + mov 12,$j + + mulx $car0,$mul0,$car0 ! ap[0]*bp[0] + mulx $apj,$mul0,$tmp0 !prologue! ap[1]*bp[0] + and $car0,$mask,$acc0 + add %sp,$bias+$frame,$tp + ld [$ap+8],$apj !prologue! + + mulx $n0,$acc0,$mul1 ! "t[0]"*n0 + and $mul1,$mask,$mul1 + + mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0 + mulx $npj,$mul1,$acc1 !prologue! np[1]*"t[0]"*n0 + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + ld [$np+8],$npj !prologue! + srlx $car1,32,$car1 + mov $tmp0,$acc0 !prologue! + +.L1st: + mulx $apj,$mul0,$tmp0 + mulx $npj,$mul1,$tmp1 + add $acc0,$car0,$car0 + ld [$ap+$j],$apj ! ap[j] + and $car0,$mask,$acc0 + add $acc1,$car1,$car1 + ld [$np+$j],$npj ! np[j] + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + add $j,4,$j ! j++ + mov $tmp0,$acc0 + st $car1,[$tp] + cmp $j,$num + mov $tmp1,$acc1 + srlx $car1,32,$car1 + bl %icc,.L1st + add $tp,4,$tp ! tp++ +!.L1st + + mulx $apj,$mul0,$tmp0 !epilogue! + mulx $npj,$mul1,$tmp1 + add $acc0,$car0,$car0 + and $car0,$mask,$acc0 + add $acc1,$car1,$car1 + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + st $car1,[$tp] + srlx $car1,32,$car1 + + add $tmp0,$car0,$car0 + and $car0,$mask,$acc0 + add $tmp1,$car1,$car1 + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + st $car1,[$tp+4] + srlx $car1,32,$car1 + + add $car0,$car1,$car1 + st $car1,[$tp+8] + srlx $car1,32,$car2 + + mov 4,$i ! i++ + ld [$bp+4],$mul0 ! bp[1] +.Louter: + add %sp,$bias+$frame,$tp + ld [$ap],$car0 ! ap[0] + ld [$ap+4],$apj ! ap[1] + ld [$np],$car1 ! np[0] + ld [$np+4],$npj ! np[1] + ld [$tp],$tmp1 ! tp[0] + ld [$tp+4],$tpj ! tp[1] + mov 12,$j + + mulx $car0,$mul0,$car0 + mulx $apj,$mul0,$tmp0 !prologue! + add $tmp1,$car0,$car0 + ld [$ap+8],$apj !prologue! + and $car0,$mask,$acc0 + + mulx $n0,$acc0,$mul1 + and $mul1,$mask,$mul1 + + mulx $car1,$mul1,$car1 + mulx $npj,$mul1,$acc1 !prologue! + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + ld [$np+8],$npj !prologue! + srlx $car1,32,$car1 + mov $tmp0,$acc0 !prologue! + +.Linner: + mulx $apj,$mul0,$tmp0 + mulx $npj,$mul1,$tmp1 + add $tpj,$car0,$car0 + ld [$ap+$j],$apj ! ap[j] + add $acc0,$car0,$car0 + add $acc1,$car1,$car1 + ld [$np+$j],$npj ! np[j] + and $car0,$mask,$acc0 + ld [$tp+8],$tpj ! tp[j] + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + add $j,4,$j ! j++ + mov $tmp0,$acc0 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + mov $tmp1,$acc1 + cmp $j,$num + bl %icc,.Linner + add $tp,4,$tp ! tp++ +!.Linner + + mulx $apj,$mul0,$tmp0 !epilogue! + mulx $npj,$mul1,$tmp1 + add $tpj,$car0,$car0 + add $acc0,$car0,$car0 + ld [$tp+8],$tpj ! tp[j] + and $car0,$mask,$acc0 + add $acc1,$car1,$car1 + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + + add $tpj,$car0,$car0 + add $tmp0,$car0,$car0 + and $car0,$mask,$acc0 + add $tmp1,$car1,$car1 + add $acc0,$car1,$car1 + st $car1,[$tp+4] ! tp[j-1] + srlx $car0,32,$car0 + add $i,4,$i ! i++ + srlx $car1,32,$car1 + + add $car0,$car1,$car1 + cmp $i,$num + add $car2,$car1,$car1 + st $car1,[$tp+8] + + srlx $car1,32,$car2 + bl,a %icc,.Louter + ld [$bp+$i],$mul0 ! bp[i] +!.Louter + + add $tp,12,$tp + +.Ltail: + add $np,$num,$np + add $rp,$num,$rp + mov $tp,$ap + sub %g0,$num,%o7 ! k=-num + ba .Lsub + subcc %g0,%g0,%g0 ! clear %icc.c +.align 16 +.Lsub: + ld [$tp+%o7],%o0 + ld [$np+%o7],%o1 + subccc %o0,%o1,%o1 ! tp[j]-np[j] + add $rp,%o7,$i + add %o7,4,%o7 + brnz %o7,.Lsub + st %o1,[$i] + subc $car2,0,$car2 ! handle upmost overflow bit + and $tp,$car2,$ap + andn $rp,$car2,$np + or $ap,$np,$ap + sub %g0,$num,%o7 + +.Lcopy: + ld [$ap+%o7],%o0 ! copy or in-place refresh + st %g0,[$tp+%o7] ! zap tp + st %o0,[$rp+%o7] + add %o7,4,%o7 + brnz %o7,.Lcopy + nop + mov 1,%i0 + ret + restore +___ + +######## +######## .Lbn_sqr_mont gives up to 20% *overall* improvement over +######## code without following dedicated squaring procedure. +######## +$sbit="%i2"; # re-use $bp! + +$code.=<<___; +.align 32 +.Lbn_sqr_mont: + mulx $mul0,$mul0,$car0 ! ap[0]*ap[0] + mulx $apj,$mul0,$tmp0 !prologue! + and $car0,$mask,$acc0 + add %sp,$bias+$frame,$tp + ld [$ap+8],$apj !prologue! + + mulx $n0,$acc0,$mul1 ! "t[0]"*n0 + srlx $car0,32,$car0 + and $mul1,$mask,$mul1 + + mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0 + mulx $npj,$mul1,$acc1 !prologue! + and $car0,1,$sbit + ld [$np+8],$npj !prologue! + srlx $car0,1,$car0 + add $acc0,$car1,$car1 + srlx $car1,32,$car1 + mov $tmp0,$acc0 !prologue! + +.Lsqr_1st: + mulx $apj,$mul0,$tmp0 + mulx $npj,$mul1,$tmp1 + add $acc0,$car0,$car0 ! ap[j]*a0+c0 + add $acc1,$car1,$car1 + ld [$ap+$j],$apj ! ap[j] + and $car0,$mask,$acc0 + ld [$np+$j],$npj ! np[j] + srlx $car0,32,$car0 + add $acc0,$acc0,$acc0 + or $sbit,$acc0,$acc0 + mov $tmp1,$acc1 + srlx $acc0,32,$sbit + add $j,4,$j ! j++ + and $acc0,$mask,$acc0 + cmp $j,$num + add $acc0,$car1,$car1 + st $car1,[$tp] + mov $tmp0,$acc0 + srlx $car1,32,$car1 + bl %icc,.Lsqr_1st + add $tp,4,$tp ! tp++ +!.Lsqr_1st + + mulx $apj,$mul0,$tmp0 ! epilogue + mulx $npj,$mul1,$tmp1 + add $acc0,$car0,$car0 ! ap[j]*a0+c0 + add $acc1,$car1,$car1 + and $car0,$mask,$acc0 + srlx $car0,32,$car0 + add $acc0,$acc0,$acc0 + or $sbit,$acc0,$acc0 + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + add $acc0,$car1,$car1 + st $car1,[$tp] + srlx $car1,32,$car1 + + add $tmp0,$car0,$car0 ! ap[j]*a0+c0 + add $tmp1,$car1,$car1 + and $car0,$mask,$acc0 + srlx $car0,32,$car0 + add $acc0,$acc0,$acc0 + or $sbit,$acc0,$acc0 + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + add $acc0,$car1,$car1 + st $car1,[$tp+4] + srlx $car1,32,$car1 + + add $car0,$car0,$car0 + or $sbit,$car0,$car0 + add $car0,$car1,$car1 + st $car1,[$tp+8] + srlx $car1,32,$car2 + + ld [%sp+$bias+$frame],$tmp0 ! tp[0] + ld [%sp+$bias+$frame+4],$tmp1 ! tp[1] + ld [%sp+$bias+$frame+8],$tpj ! tp[2] + ld [$ap+4],$mul0 ! ap[1] + ld [$ap+8],$apj ! ap[2] + ld [$np],$car1 ! np[0] + ld [$np+4],$npj ! np[1] + mulx $n0,$tmp0,$mul1 + + mulx $mul0,$mul0,$car0 + and $mul1,$mask,$mul1 + + mulx $car1,$mul1,$car1 + mulx $npj,$mul1,$acc1 + add $tmp0,$car1,$car1 + and $car0,$mask,$acc0 + ld [$np+8],$npj ! np[2] + srlx $car1,32,$car1 + add $tmp1,$car1,$car1 + srlx $car0,32,$car0 + add $acc0,$car1,$car1 + and $car0,1,$sbit + add $acc1,$car1,$car1 + srlx $car0,1,$car0 + mov 12,$j + st $car1,[%sp+$bias+$frame] ! tp[0]= + srlx $car1,32,$car1 + add %sp,$bias+$frame+4,$tp + +.Lsqr_2nd: + mulx $apj,$mul0,$acc0 + mulx $npj,$mul1,$acc1 + add $acc0,$car0,$car0 + add $tpj,$car1,$car1 + ld [$ap+$j],$apj ! ap[j] + and $car0,$mask,$acc0 + ld [$np+$j],$npj ! np[j] + srlx $car0,32,$car0 + add $acc1,$car1,$car1 + ld [$tp+8],$tpj ! tp[j] + add $acc0,$acc0,$acc0 + add $j,4,$j ! j++ + or $sbit,$acc0,$acc0 + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + cmp $j,$num + add $acc0,$car1,$car1 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + bl %icc,.Lsqr_2nd + add $tp,4,$tp ! tp++ +!.Lsqr_2nd + + mulx $apj,$mul0,$acc0 + mulx $npj,$mul1,$acc1 + add $acc0,$car0,$car0 + add $tpj,$car1,$car1 + and $car0,$mask,$acc0 + srlx $car0,32,$car0 + add $acc1,$car1,$car1 + add $acc0,$acc0,$acc0 + or $sbit,$acc0,$acc0 + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + add $acc0,$car1,$car1 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + + add $car0,$car0,$car0 + or $sbit,$car0,$car0 + add $car0,$car1,$car1 + add $car2,$car1,$car1 + st $car1,[$tp+4] + srlx $car1,32,$car2 + + ld [%sp+$bias+$frame],$tmp1 ! tp[0] + ld [%sp+$bias+$frame+4],$tpj ! tp[1] + ld [$ap+8],$mul0 ! ap[2] + ld [$np],$car1 ! np[0] + ld [$np+4],$npj ! np[1] + mulx $n0,$tmp1,$mul1 + and $mul1,$mask,$mul1 + mov 8,$i + + mulx $mul0,$mul0,$car0 + mulx $car1,$mul1,$car1 + and $car0,$mask,$acc0 + add $tmp1,$car1,$car1 + srlx $car0,32,$car0 + add %sp,$bias+$frame,$tp + srlx $car1,32,$car1 + and $car0,1,$sbit + srlx $car0,1,$car0 + mov 4,$j + +.Lsqr_outer: +.Lsqr_inner1: + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + add $j,4,$j + ld [$tp+8],$tpj + cmp $j,$i + add $acc1,$car1,$car1 + ld [$np+$j],$npj + st $car1,[$tp] + srlx $car1,32,$car1 + bl %icc,.Lsqr_inner1 + add $tp,4,$tp +!.Lsqr_inner1 + + add $j,4,$j + ld [$ap+$j],$apj ! ap[j] + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + ld [$np+$j],$npj ! np[j] + add $acc0,$car1,$car1 + ld [$tp+8],$tpj ! tp[j] + add $acc1,$car1,$car1 + st $car1,[$tp] + srlx $car1,32,$car1 + + add $j,4,$j + cmp $j,$num + be,pn %icc,.Lsqr_no_inner2 + add $tp,4,$tp + +.Lsqr_inner2: + mulx $apj,$mul0,$acc0 + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + add $acc0,$car0,$car0 + ld [$ap+$j],$apj ! ap[j] + and $car0,$mask,$acc0 + ld [$np+$j],$npj ! np[j] + srlx $car0,32,$car0 + add $acc0,$acc0,$acc0 + ld [$tp+8],$tpj ! tp[j] + or $sbit,$acc0,$acc0 + add $j,4,$j ! j++ + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + cmp $j,$num + add $acc0,$car1,$car1 + add $acc1,$car1,$car1 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + bl %icc,.Lsqr_inner2 + add $tp,4,$tp ! tp++ + +.Lsqr_no_inner2: + mulx $apj,$mul0,$acc0 + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + add $acc0,$car0,$car0 + and $car0,$mask,$acc0 + srlx $car0,32,$car0 + add $acc0,$acc0,$acc0 + or $sbit,$acc0,$acc0 + srlx $acc0,32,$sbit + and $acc0,$mask,$acc0 + add $acc0,$car1,$car1 + add $acc1,$car1,$car1 + st $car1,[$tp] ! tp[j-1] + srlx $car1,32,$car1 + + add $car0,$car0,$car0 + or $sbit,$car0,$car0 + add $car0,$car1,$car1 + add $car2,$car1,$car1 + st $car1,[$tp+4] + srlx $car1,32,$car2 + + add $i,4,$i ! i++ + ld [%sp+$bias+$frame],$tmp1 ! tp[0] + ld [%sp+$bias+$frame+4],$tpj ! tp[1] + ld [$ap+$i],$mul0 ! ap[j] + ld [$np],$car1 ! np[0] + ld [$np+4],$npj ! np[1] + mulx $n0,$tmp1,$mul1 + and $mul1,$mask,$mul1 + add $i,4,$tmp0 + + mulx $mul0,$mul0,$car0 + mulx $car1,$mul1,$car1 + and $car0,$mask,$acc0 + add $tmp1,$car1,$car1 + srlx $car0,32,$car0 + add %sp,$bias+$frame,$tp + srlx $car1,32,$car1 + and $car0,1,$sbit + srlx $car0,1,$car0 + + cmp $tmp0,$num ! i<num-1 + bl %icc,.Lsqr_outer + mov 4,$j + +.Lsqr_last: + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + add $j,4,$j + ld [$tp+8],$tpj + cmp $j,$i + add $acc1,$car1,$car1 + ld [$np+$j],$npj + st $car1,[$tp] + srlx $car1,32,$car1 + bl %icc,.Lsqr_last + add $tp,4,$tp +!.Lsqr_last + + mulx $npj,$mul1,$acc1 + add $tpj,$car1,$car1 + add $acc0,$car1,$car1 + add $acc1,$car1,$car1 + st $car1,[$tp] + srlx $car1,32,$car1 + + add $car0,$car0,$car0 ! recover $car0 + or $sbit,$car0,$car0 + add $car0,$car1,$car1 + add $car2,$car1,$car1 + st $car1,[$tp+4] + srlx $car1,32,$car2 + + ba .Ltail + add $tp,8,$tp +.type $fname,#function +.size $fname,(.-$fname) +.asciz "Montgomery Multipltication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>" +.align 32 +___ +$code =~ s/\`([^\`]*)\`/eval($1)/gem; +print $code; +close STDOUT; diff --git a/openssl/crypto/bn/asm/sparcv9a-mont.pl b/openssl/crypto/bn/asm/sparcv9a-mont.pl new file mode 100755 index 00000000..a14205f2 --- /dev/null +++ b/openssl/crypto/bn/asm/sparcv9a-mont.pl @@ -0,0 +1,882 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# October 2005 +# +# "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU? +# Because unlike integer multiplier, which simply stalls whole CPU, +# FPU is fully pipelined and can effectively emit 48 bit partial +# product every cycle. Why not blended SPARC v9? One can argue that +# making this module dependent on UltraSPARC VIS extension limits its +# binary compatibility. Well yes, it does exclude SPARC64 prior-V(!) +# implementations from compatibility matrix. But the rest, whole Sun +# UltraSPARC family and brand new Fujitsu's SPARC64 V, all support +# VIS extension instructions used in this module. This is considered +# good enough to not care about HAL SPARC64 users [if any] who have +# integer-only pure SPARCv9 module to "fall down" to. + +# USI&II cores currently exhibit uniform 2x improvement [over pre- +# bn_mul_mont codebase] for all key lengths and benchmarks. On USIII +# performance improves few percents for shorter keys and worsens few +# percents for longer keys. This is because USIII integer multiplier +# is >3x faster than USI&II one, which is harder to match [but see +# TODO list below]. It should also be noted that SPARC64 V features +# out-of-order execution, which *might* mean that integer multiplier +# is pipelined, which in turn *might* be impossible to match... On +# additional note, SPARC64 V implements FP Multiply-Add instruction, +# which is perfectly usable in this context... In other words, as far +# as Fujitsu SPARC64 V goes, talk to the author:-) + +# The implementation implies following "non-natural" limitations on +# input arguments: +# - num may not be less than 4; +# - num has to be even; +# Failure to meet either condition has no fatal effects, simply +# doesn't give any performance gain. + +# TODO: +# - modulo-schedule inner loop for better performance (on in-order +# execution core such as UltraSPARC this shall result in further +# noticeable(!) improvement); +# - dedicated squaring procedure[?]; + +###################################################################### +# November 2006 +# +# Modulo-scheduled inner loops allow to interleave floating point and +# integer instructions and minimize Read-After-Write penalties. This +# results in *further* 20-50% perfromance improvement [depending on +# key length, more for longer keys] on USI&II cores and 30-80% - on +# USIII&IV. + +$fname="bn_mul_mont_fpu"; +$bits=32; +for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); } + +if ($bits==64) { + $bias=2047; + $frame=192; +} else { + $bias=0; + $frame=128; # 96 rounded up to largest known cache-line +} +$locals=64; + +# In order to provide for 32-/64-bit ABI duality, I keep integers wider +# than 32 bit in %g1-%g4 and %o0-%o5. %l0-%l7 and %i0-%i5 are used +# exclusively for pointers, indexes and other small values... +# int bn_mul_mont( +$rp="%i0"; # BN_ULONG *rp, +$ap="%i1"; # const BN_ULONG *ap, +$bp="%i2"; # const BN_ULONG *bp, +$np="%i3"; # const BN_ULONG *np, +$n0="%i4"; # const BN_ULONG *n0, +$num="%i5"; # int num); + +$tp="%l0"; # t[num] +$ap_l="%l1"; # a[num],n[num] are smashed to 32-bit words and saved +$ap_h="%l2"; # to these four vectors as double-precision FP values. +$np_l="%l3"; # This way a bunch of fxtods are eliminated in second +$np_h="%l4"; # loop and L1-cache aliasing is minimized... +$i="%l5"; +$j="%l6"; +$mask="%l7"; # 16-bit mask, 0xffff + +$n0="%g4"; # reassigned(!) to "64-bit" register +$carry="%i4"; # %i4 reused(!) for a carry bit + +# FP register naming chart +# +# ..HILO +# dcba +# -------- +# LOa +# LOb +# LOc +# LOd +# HIa +# HIb +# HIc +# HId +# ..a +# ..b +$ba="%f0"; $bb="%f2"; $bc="%f4"; $bd="%f6"; +$na="%f8"; $nb="%f10"; $nc="%f12"; $nd="%f14"; +$alo="%f16"; $alo_="%f17"; $ahi="%f18"; $ahi_="%f19"; +$nlo="%f20"; $nlo_="%f21"; $nhi="%f22"; $nhi_="%f23"; + +$dota="%f24"; $dotb="%f26"; + +$aloa="%f32"; $alob="%f34"; $aloc="%f36"; $alod="%f38"; +$ahia="%f40"; $ahib="%f42"; $ahic="%f44"; $ahid="%f46"; +$nloa="%f48"; $nlob="%f50"; $nloc="%f52"; $nlod="%f54"; +$nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62"; + +$ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load + +$code=<<___; +.section ".text",#alloc,#execinstr + +.global $fname +.align 32 +$fname: + save %sp,-$frame-$locals,%sp + + cmp $num,4 + bl,a,pn %icc,.Lret + clr %i0 + andcc $num,1,%g0 ! $num has to be even... + bnz,a,pn %icc,.Lret + clr %i0 ! signal "unsupported input value" + + srl $num,1,$num + sethi %hi(0xffff),$mask + ld [%i4+0],$n0 ! $n0 reassigned, remember? + or $mask,%lo(0xffff),$mask + ld [%i4+4],%o0 + sllx %o0,32,%o0 + or %o0,$n0,$n0 ! $n0=n0[1].n0[0] + + sll $num,3,$num ! num*=8 + + add %sp,$bias,%o0 ! real top of stack + sll $num,2,%o1 + add %o1,$num,%o1 ! %o1=num*5 + sub %o0,%o1,%o0 + and %o0,-2048,%o0 ! optimize TLB utilization + sub %o0,$bias,%sp ! alloca(5*num*8) + + rd %asi,%o7 ! save %asi + add %sp,$bias+$frame+$locals,$tp + add $tp,$num,$ap_l + add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vectors' ends ! + add $ap_l,$num,$ap_h + add $ap_h,$num,$np_l + add $np_l,$num,$np_h + + wr %g0,$ASI_FL16_P,%asi ! setup %asi for 16-bit FP loads + + add $rp,$num,$rp ! readjust input pointers to point + add $ap,$num,$ap ! at the ends too... + add $bp,$num,$bp + add $np,$num,$np + + stx %o7,[%sp+$bias+$frame+48] ! save %asi + + sub %g0,$num,$i ! i=-num + sub %g0,$num,$j ! j=-num + + add $ap,$j,%o3 + add $bp,$i,%o4 + + ld [%o3+4],%g1 ! bp[0] + ld [%o3+0],%o0 + ld [%o4+4],%g5 ! ap[0] + sllx %g1,32,%g1 + ld [%o4+0],%o1 + sllx %g5,32,%g5 + or %g1,%o0,%o0 + or %g5,%o1,%o1 + + add $np,$j,%o5 + + mulx %o1,%o0,%o0 ! ap[0]*bp[0] + mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0 + stx %o0,[%sp+$bias+$frame+0] + + ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words + fzeros $alo + ld [%o3+4],$ahi_ + fzeros $ahi + ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words + fzeros $nlo + ld [%o5+4],$nhi_ + fzeros $nhi + + ! transfer b[i] to FPU as 4x16-bit values + ldda [%o4+2]%asi,$ba + fxtod $alo,$alo + ldda [%o4+0]%asi,$bb + fxtod $ahi,$ahi + ldda [%o4+6]%asi,$bc + fxtod $nlo,$nlo + ldda [%o4+4]%asi,$bd + fxtod $nhi,$nhi + + ! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values + ldda [%sp+$bias+$frame+6]%asi,$na + fxtod $ba,$ba + ldda [%sp+$bias+$frame+4]%asi,$nb + fxtod $bb,$bb + ldda [%sp+$bias+$frame+2]%asi,$nc + fxtod $bc,$bc + ldda [%sp+$bias+$frame+0]%asi,$nd + fxtod $bd,$bd + + std $alo,[$ap_l+$j] ! save smashed ap[j] in double format + fxtod $na,$na + std $ahi,[$ap_h+$j] + fxtod $nb,$nb + std $nlo,[$np_l+$j] ! save smashed np[j] in double format + fxtod $nc,$nc + std $nhi,[$np_h+$j] + fxtod $nd,$nd + + fmuld $alo,$ba,$aloa + fmuld $nlo,$na,$nloa + fmuld $alo,$bb,$alob + fmuld $nlo,$nb,$nlob + fmuld $alo,$bc,$aloc + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + fmuld $alo,$bd,$alod + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + fmuld $ahi,$ba,$ahia + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + fmuld $ahi,$bb,$ahib + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + fmuld $ahi,$bc,$ahic + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + fmuld $ahi,$bd,$ahid + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + + faddd $ahic,$nhic,$dota ! $nhic + faddd $ahid,$nhid,$dotb ! $nhid + + faddd $nloc,$nhia,$nloc + faddd $nlod,$nhib,$nlod + + fdtox $nloa,$nloa + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + add $j,8,$j + std $nlob,[%sp+$bias+$frame+8] + add $ap,$j,%o4 + std $nloc,[%sp+$bias+$frame+16] + add $np,$j,%o5 + std $nlod,[%sp+$bias+$frame+24] + + ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words + fzeros $alo + ld [%o4+4],$ahi_ + fzeros $ahi + ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words + fzeros $nlo + ld [%o5+4],$nhi_ + fzeros $nhi + + fxtod $alo,$alo + fxtod $ahi,$ahi + fxtod $nlo,$nlo + fxtod $nhi,$nhi + + ldx [%sp+$bias+$frame+0],%o0 + fmuld $alo,$ba,$aloa + ldx [%sp+$bias+$frame+8],%o1 + fmuld $nlo,$na,$nloa + ldx [%sp+$bias+$frame+16],%o2 + fmuld $alo,$bb,$alob + ldx [%sp+$bias+$frame+24],%o3 + fmuld $nlo,$nb,$nlob + + srlx %o0,16,%o7 + std $alo,[$ap_l+$j] ! save smashed ap[j] in double format + fmuld $alo,$bc,$aloc + add %o7,%o1,%o1 + std $ahi,[$ap_h+$j] + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + srlx %o1,16,%o7 + std $nlo,[$np_l+$j] ! save smashed np[j] in double format + fmuld $alo,$bd,$alod + add %o7,%o2,%o2 + std $nhi,[$np_h+$j] + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + srlx %o2,16,%o7 + fmuld $ahi,$ba,$ahia + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + !and %o0,$mask,%o0 + !and %o1,$mask,%o1 + !and %o2,$mask,%o2 + !sllx %o1,16,%o1 + !sllx %o2,32,%o2 + !sllx %o3,48,%o7 + !or %o1,%o0,%o0 + !or %o2,%o0,%o0 + !or %o7,%o0,%o0 ! 64-bit result + srlx %o3,16,%g1 ! 34-bit carry + fmuld $ahi,$bb,$ahib + + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + fmuld $ahi,$bc,$ahic + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + fmuld $ahi,$bd,$ahid + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + + faddd $dota,$nloa,$nloa + faddd $dotb,$nlob,$nlob + faddd $ahic,$nhic,$dota ! $nhic + faddd $ahid,$nhid,$dotb ! $nhid + + faddd $nloc,$nhia,$nloc + faddd $nlod,$nhib,$nlod + + fdtox $nloa,$nloa + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + addcc $j,8,$j + std $nloc,[%sp+$bias+$frame+16] + bz,pn %icc,.L1stskip + std $nlod,[%sp+$bias+$frame+24] + +.align 32 ! incidentally already aligned ! +.L1st: + add $ap,$j,%o4 + add $np,$j,%o5 + ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words + fzeros $alo + ld [%o4+4],$ahi_ + fzeros $ahi + ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words + fzeros $nlo + ld [%o5+4],$nhi_ + fzeros $nhi + + fxtod $alo,$alo + fxtod $ahi,$ahi + fxtod $nlo,$nlo + fxtod $nhi,$nhi + + ldx [%sp+$bias+$frame+0],%o0 + fmuld $alo,$ba,$aloa + ldx [%sp+$bias+$frame+8],%o1 + fmuld $nlo,$na,$nloa + ldx [%sp+$bias+$frame+16],%o2 + fmuld $alo,$bb,$alob + ldx [%sp+$bias+$frame+24],%o3 + fmuld $nlo,$nb,$nlob + + srlx %o0,16,%o7 + std $alo,[$ap_l+$j] ! save smashed ap[j] in double format + fmuld $alo,$bc,$aloc + add %o7,%o1,%o1 + std $ahi,[$ap_h+$j] + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + srlx %o1,16,%o7 + std $nlo,[$np_l+$j] ! save smashed np[j] in double format + fmuld $alo,$bd,$alod + add %o7,%o2,%o2 + std $nhi,[$np_h+$j] + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + srlx %o2,16,%o7 + fmuld $ahi,$ba,$ahia + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + and %o0,$mask,%o0 + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + and %o1,$mask,%o1 + and %o2,$mask,%o2 + fmuld $ahi,$bb,$ahib + sllx %o1,16,%o1 + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + sllx %o2,32,%o2 + fmuld $ahi,$bc,$ahic + sllx %o3,48,%o7 + or %o1,%o0,%o0 + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + or %o2,%o0,%o0 + fmuld $ahi,$bd,$ahid + or %o7,%o0,%o0 ! 64-bit result + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + addcc %g1,%o0,%o0 + faddd $dota,$nloa,$nloa + srlx %o3,16,%g1 ! 34-bit carry + faddd $dotb,$nlob,$nlob + bcs,a %xcc,.+8 + add %g1,1,%g1 + + stx %o0,[$tp] ! tp[j-1]= + + faddd $ahic,$nhic,$dota ! $nhic + faddd $ahid,$nhid,$dotb ! $nhid + + faddd $nloc,$nhia,$nloc + faddd $nlod,$nhib,$nlod + + fdtox $nloa,$nloa + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + std $nloc,[%sp+$bias+$frame+16] + std $nlod,[%sp+$bias+$frame+24] + + addcc $j,8,$j + bnz,pt %icc,.L1st + add $tp,8,$tp + +.L1stskip: + fdtox $dota,$dota + fdtox $dotb,$dotb + + ldx [%sp+$bias+$frame+0],%o0 + ldx [%sp+$bias+$frame+8],%o1 + ldx [%sp+$bias+$frame+16],%o2 + ldx [%sp+$bias+$frame+24],%o3 + + srlx %o0,16,%o7 + std $dota,[%sp+$bias+$frame+32] + add %o7,%o1,%o1 + std $dotb,[%sp+$bias+$frame+40] + srlx %o1,16,%o7 + add %o7,%o2,%o2 + srlx %o2,16,%o7 + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + and %o0,$mask,%o0 + and %o1,$mask,%o1 + and %o2,$mask,%o2 + sllx %o1,16,%o1 + sllx %o2,32,%o2 + sllx %o3,48,%o7 + or %o1,%o0,%o0 + or %o2,%o0,%o0 + or %o7,%o0,%o0 ! 64-bit result + ldx [%sp+$bias+$frame+32],%o4 + addcc %g1,%o0,%o0 + ldx [%sp+$bias+$frame+40],%o5 + srlx %o3,16,%g1 ! 34-bit carry + bcs,a %xcc,.+8 + add %g1,1,%g1 + + stx %o0,[$tp] ! tp[j-1]= + add $tp,8,$tp + + srlx %o4,16,%o7 + add %o7,%o5,%o5 + and %o4,$mask,%o4 + sllx %o5,16,%o7 + or %o7,%o4,%o4 + addcc %g1,%o4,%o4 + srlx %o5,48,%g1 + bcs,a %xcc,.+8 + add %g1,1,%g1 + + mov %g1,$carry + stx %o4,[$tp] ! tp[num-1]= + + ba .Louter + add $i,8,$i +.align 32 +.Louter: + sub %g0,$num,$j ! j=-num + add %sp,$bias+$frame+$locals,$tp + + add $ap,$j,%o3 + add $bp,$i,%o4 + + ld [%o3+4],%g1 ! bp[i] + ld [%o3+0],%o0 + ld [%o4+4],%g5 ! ap[0] + sllx %g1,32,%g1 + ld [%o4+0],%o1 + sllx %g5,32,%g5 + or %g1,%o0,%o0 + or %g5,%o1,%o1 + + ldx [$tp],%o2 ! tp[0] + mulx %o1,%o0,%o0 + addcc %o2,%o0,%o0 + mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0 + stx %o0,[%sp+$bias+$frame+0] + + ! transfer b[i] to FPU as 4x16-bit values + ldda [%o4+2]%asi,$ba + ldda [%o4+0]%asi,$bb + ldda [%o4+6]%asi,$bc + ldda [%o4+4]%asi,$bd + + ! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values + ldda [%sp+$bias+$frame+6]%asi,$na + fxtod $ba,$ba + ldda [%sp+$bias+$frame+4]%asi,$nb + fxtod $bb,$bb + ldda [%sp+$bias+$frame+2]%asi,$nc + fxtod $bc,$bc + ldda [%sp+$bias+$frame+0]%asi,$nd + fxtod $bd,$bd + ldd [$ap_l+$j],$alo ! load a[j] in double format + fxtod $na,$na + ldd [$ap_h+$j],$ahi + fxtod $nb,$nb + ldd [$np_l+$j],$nlo ! load n[j] in double format + fxtod $nc,$nc + ldd [$np_h+$j],$nhi + fxtod $nd,$nd + + fmuld $alo,$ba,$aloa + fmuld $nlo,$na,$nloa + fmuld $alo,$bb,$alob + fmuld $nlo,$nb,$nlob + fmuld $alo,$bc,$aloc + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + fmuld $alo,$bd,$alod + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + fmuld $ahi,$ba,$ahia + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + fmuld $ahi,$bb,$ahib + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + fmuld $ahi,$bc,$ahic + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + fmuld $ahi,$bd,$ahid + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + + faddd $ahic,$nhic,$dota ! $nhic + faddd $ahid,$nhid,$dotb ! $nhid + + faddd $nloc,$nhia,$nloc + faddd $nlod,$nhib,$nlod + + fdtox $nloa,$nloa + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + std $nloc,[%sp+$bias+$frame+16] + add $j,8,$j + std $nlod,[%sp+$bias+$frame+24] + + ldd [$ap_l+$j],$alo ! load a[j] in double format + ldd [$ap_h+$j],$ahi + ldd [$np_l+$j],$nlo ! load n[j] in double format + ldd [$np_h+$j],$nhi + + fmuld $alo,$ba,$aloa + fmuld $nlo,$na,$nloa + fmuld $alo,$bb,$alob + fmuld $nlo,$nb,$nlob + fmuld $alo,$bc,$aloc + ldx [%sp+$bias+$frame+0],%o0 + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + ldx [%sp+$bias+$frame+8],%o1 + fmuld $alo,$bd,$alod + ldx [%sp+$bias+$frame+16],%o2 + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + ldx [%sp+$bias+$frame+24],%o3 + fmuld $ahi,$ba,$ahia + + srlx %o0,16,%o7 + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + add %o7,%o1,%o1 + fmuld $ahi,$bb,$ahib + srlx %o1,16,%o7 + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + add %o7,%o2,%o2 + fmuld $ahi,$bc,$ahic + srlx %o2,16,%o7 + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + ! why? + and %o0,$mask,%o0 + fmuld $ahi,$bd,$ahid + and %o1,$mask,%o1 + and %o2,$mask,%o2 + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + sllx %o1,16,%o1 + faddd $dota,$nloa,$nloa + sllx %o2,32,%o2 + faddd $dotb,$nlob,$nlob + sllx %o3,48,%o7 + or %o1,%o0,%o0 + faddd $ahic,$nhic,$dota ! $nhic + or %o2,%o0,%o0 + faddd $ahid,$nhid,$dotb ! $nhid + or %o7,%o0,%o0 ! 64-bit result + ldx [$tp],%o7 + faddd $nloc,$nhia,$nloc + addcc %o7,%o0,%o0 + ! end-of-why? + faddd $nlod,$nhib,$nlod + srlx %o3,16,%g1 ! 34-bit carry + fdtox $nloa,$nloa + bcs,a %xcc,.+8 + add %g1,1,%g1 + + fdtox $nlob,$nlob + fdtox $nloc,$nloc + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + addcc $j,8,$j + std $nloc,[%sp+$bias+$frame+16] + bz,pn %icc,.Linnerskip + std $nlod,[%sp+$bias+$frame+24] + + ba .Linner + nop +.align 32 +.Linner: + ldd [$ap_l+$j],$alo ! load a[j] in double format + ldd [$ap_h+$j],$ahi + ldd [$np_l+$j],$nlo ! load n[j] in double format + ldd [$np_h+$j],$nhi + + fmuld $alo,$ba,$aloa + fmuld $nlo,$na,$nloa + fmuld $alo,$bb,$alob + fmuld $nlo,$nb,$nlob + fmuld $alo,$bc,$aloc + ldx [%sp+$bias+$frame+0],%o0 + faddd $aloa,$nloa,$nloa + fmuld $nlo,$nc,$nloc + ldx [%sp+$bias+$frame+8],%o1 + fmuld $alo,$bd,$alod + ldx [%sp+$bias+$frame+16],%o2 + faddd $alob,$nlob,$nlob + fmuld $nlo,$nd,$nlod + ldx [%sp+$bias+$frame+24],%o3 + fmuld $ahi,$ba,$ahia + + srlx %o0,16,%o7 + faddd $aloc,$nloc,$nloc + fmuld $nhi,$na,$nhia + add %o7,%o1,%o1 + fmuld $ahi,$bb,$ahib + srlx %o1,16,%o7 + faddd $alod,$nlod,$nlod + fmuld $nhi,$nb,$nhib + add %o7,%o2,%o2 + fmuld $ahi,$bc,$ahic + srlx %o2,16,%o7 + faddd $ahia,$nhia,$nhia + fmuld $nhi,$nc,$nhic + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + and %o0,$mask,%o0 + fmuld $ahi,$bd,$ahid + and %o1,$mask,%o1 + and %o2,$mask,%o2 + faddd $ahib,$nhib,$nhib + fmuld $nhi,$nd,$nhid + sllx %o1,16,%o1 + faddd $dota,$nloa,$nloa + sllx %o2,32,%o2 + faddd $dotb,$nlob,$nlob + sllx %o3,48,%o7 + or %o1,%o0,%o0 + faddd $ahic,$nhic,$dota ! $nhic + or %o2,%o0,%o0 + faddd $ahid,$nhid,$dotb ! $nhid + or %o7,%o0,%o0 ! 64-bit result + faddd $nloc,$nhia,$nloc + addcc %g1,%o0,%o0 + ldx [$tp+8],%o7 ! tp[j] + faddd $nlod,$nhib,$nlod + srlx %o3,16,%g1 ! 34-bit carry + fdtox $nloa,$nloa + bcs,a %xcc,.+8 + add %g1,1,%g1 + fdtox $nlob,$nlob + addcc %o7,%o0,%o0 + fdtox $nloc,$nloc + bcs,a %xcc,.+8 + add %g1,1,%g1 + + stx %o0,[$tp] ! tp[j-1] + fdtox $nlod,$nlod + + std $nloa,[%sp+$bias+$frame+0] + std $nlob,[%sp+$bias+$frame+8] + std $nloc,[%sp+$bias+$frame+16] + addcc $j,8,$j + std $nlod,[%sp+$bias+$frame+24] + bnz,pt %icc,.Linner + add $tp,8,$tp + +.Linnerskip: + fdtox $dota,$dota + fdtox $dotb,$dotb + + ldx [%sp+$bias+$frame+0],%o0 + ldx [%sp+$bias+$frame+8],%o1 + ldx [%sp+$bias+$frame+16],%o2 + ldx [%sp+$bias+$frame+24],%o3 + + srlx %o0,16,%o7 + std $dota,[%sp+$bias+$frame+32] + add %o7,%o1,%o1 + std $dotb,[%sp+$bias+$frame+40] + srlx %o1,16,%o7 + add %o7,%o2,%o2 + srlx %o2,16,%o7 + add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15] + and %o0,$mask,%o0 + and %o1,$mask,%o1 + and %o2,$mask,%o2 + sllx %o1,16,%o1 + sllx %o2,32,%o2 + sllx %o3,48,%o7 + or %o1,%o0,%o0 + or %o2,%o0,%o0 + ldx [%sp+$bias+$frame+32],%o4 + or %o7,%o0,%o0 ! 64-bit result + ldx [%sp+$bias+$frame+40],%o5 + addcc %g1,%o0,%o0 + ldx [$tp+8],%o7 ! tp[j] + srlx %o3,16,%g1 ! 34-bit carry + bcs,a %xcc,.+8 + add %g1,1,%g1 + + addcc %o7,%o0,%o0 + bcs,a %xcc,.+8 + add %g1,1,%g1 + + stx %o0,[$tp] ! tp[j-1] + add $tp,8,$tp + + srlx %o4,16,%o7 + add %o7,%o5,%o5 + and %o4,$mask,%o4 + sllx %o5,16,%o7 + or %o7,%o4,%o4 + addcc %g1,%o4,%o4 + srlx %o5,48,%g1 + bcs,a %xcc,.+8 + add %g1,1,%g1 + + addcc $carry,%o4,%o4 + stx %o4,[$tp] ! tp[num-1] + mov %g1,$carry + bcs,a %xcc,.+8 + add $carry,1,$carry + + addcc $i,8,$i + bnz %icc,.Louter + nop + + add $tp,8,$tp ! adjust tp to point at the end + orn %g0,%g0,%g4 + sub %g0,$num,%o7 ! n=-num + ba .Lsub + subcc %g0,%g0,%g0 ! clear %icc.c + +.align 32 +.Lsub: + ldx [$tp+%o7],%o0 + add $np,%o7,%g1 + ld [%g1+0],%o2 + ld [%g1+4],%o3 + srlx %o0,32,%o1 + subccc %o0,%o2,%o2 + add $rp,%o7,%g1 + subccc %o1,%o3,%o3 + st %o2,[%g1+0] + add %o7,8,%o7 + brnz,pt %o7,.Lsub + st %o3,[%g1+4] + subc $carry,0,%g4 + sub %g0,$num,%o7 ! n=-num + ba .Lcopy + nop + +.align 32 +.Lcopy: + ldx [$tp+%o7],%o0 + add $rp,%o7,%g1 + ld [%g1+0],%o2 + ld [%g1+4],%o3 + stx %g0,[$tp+%o7] + and %o0,%g4,%o0 + srlx %o0,32,%o1 + andn %o2,%g4,%o2 + andn %o3,%g4,%o3 + or %o2,%o0,%o0 + or %o3,%o1,%o1 + st %o0,[%g1+0] + add %o7,8,%o7 + brnz,pt %o7,.Lcopy + st %o1,[%g1+4] + sub %g0,$num,%o7 ! n=-num + +.Lzap: + stx %g0,[$ap_l+%o7] + stx %g0,[$ap_h+%o7] + stx %g0,[$np_l+%o7] + stx %g0,[$np_h+%o7] + add %o7,8,%o7 + brnz,pt %o7,.Lzap + nop + + ldx [%sp+$bias+$frame+48],%o7 + wr %g0,%o7,%asi ! restore %asi + + mov 1,%i0 +.Lret: + ret + restore +.type $fname,#function +.size $fname,(.-$fname) +.asciz "Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>" +.align 32 +___ + +$code =~ s/\`([^\`]*)\`/eval($1)/gem; + +# Below substitution makes it possible to compile without demanding +# VIS extentions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I +# dare to do this, because VIS capability is detected at run-time now +# and this routine is not called on CPU not capable to execute it. Do +# note that fzeros is not the only VIS dependency! Another dependency +# is implicit and is just _a_ numerical value loaded to %asi register, +# which assembler can't recognize as VIS specific... +$code =~ s/fzeros\s+%f([0-9]+)/ + sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1) + /gem; + +print $code; +# flush +close STDOUT; diff --git a/openssl/crypto/bn/asm/via-mont.pl b/openssl/crypto/bn/asm/via-mont.pl new file mode 100644 index 00000000..c046a514 --- /dev/null +++ b/openssl/crypto/bn/asm/via-mont.pl @@ -0,0 +1,242 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# Wrapper around 'rep montmul', VIA-specific instruction accessing +# PadLock Montgomery Multiplier. The wrapper is designed as drop-in +# replacement for OpenSSL bn_mul_mont [first implemented in 0.9.9]. +# +# Below are interleaved outputs from 'openssl speed rsa dsa' for 4 +# different software configurations on 1.5GHz VIA Esther processor. +# Lines marked with "software integer" denote performance of hand- +# coded integer-only assembler found in OpenSSL 0.9.7. "Software SSE2" +# refers to hand-coded SSE2 Montgomery multiplication procedure found +# OpenSSL 0.9.9. "Hardware VIA SDK" refers to padlock_pmm routine from +# Padlock SDK 2.0.1 available for download from VIA, which naturally +# utilizes the magic 'repz montmul' instruction. And finally "hardware +# this" refers to *this* implementation which also uses 'repz montmul' +# +# sign verify sign/s verify/s +# rsa 512 bits 0.001720s 0.000140s 581.4 7149.7 software integer +# rsa 512 bits 0.000690s 0.000086s 1450.3 11606.0 software SSE2 +# rsa 512 bits 0.006136s 0.000201s 163.0 4974.5 hardware VIA SDK +# rsa 512 bits 0.000712s 0.000050s 1404.9 19858.5 hardware this +# +# rsa 1024 bits 0.008518s 0.000413s 117.4 2420.8 software integer +# rsa 1024 bits 0.004275s 0.000277s 233.9 3609.7 software SSE2 +# rsa 1024 bits 0.012136s 0.000260s 82.4 3844.5 hardware VIA SDK +# rsa 1024 bits 0.002522s 0.000116s 396.5 8650.9 hardware this +# +# rsa 2048 bits 0.050101s 0.001371s 20.0 729.6 software integer +# rsa 2048 bits 0.030273s 0.001008s 33.0 991.9 software SSE2 +# rsa 2048 bits 0.030833s 0.000976s 32.4 1025.1 hardware VIA SDK +# rsa 2048 bits 0.011879s 0.000342s 84.2 2921.7 hardware this +# +# rsa 4096 bits 0.327097s 0.004859s 3.1 205.8 software integer +# rsa 4096 bits 0.229318s 0.003859s 4.4 259.2 software SSE2 +# rsa 4096 bits 0.233953s 0.003274s 4.3 305.4 hardware VIA SDK +# rsa 4096 bits 0.070493s 0.001166s 14.2 857.6 hardware this +# +# dsa 512 bits 0.001342s 0.001651s 745.2 605.7 software integer +# dsa 512 bits 0.000844s 0.000987s 1185.3 1013.1 software SSE2 +# dsa 512 bits 0.001902s 0.002247s 525.6 444.9 hardware VIA SDK +# dsa 512 bits 0.000458s 0.000524s 2182.2 1909.1 hardware this +# +# dsa 1024 bits 0.003964s 0.004926s 252.3 203.0 software integer +# dsa 1024 bits 0.002686s 0.003166s 372.3 315.8 software SSE2 +# dsa 1024 bits 0.002397s 0.002823s 417.1 354.3 hardware VIA SDK +# dsa 1024 bits 0.000978s 0.001170s 1022.2 855.0 hardware this +# +# dsa 2048 bits 0.013280s 0.016518s 75.3 60.5 software integer +# dsa 2048 bits 0.009911s 0.011522s 100.9 86.8 software SSE2 +# dsa 2048 bits 0.009542s 0.011763s 104.8 85.0 hardware VIA SDK +# dsa 2048 bits 0.002884s 0.003352s 346.8 298.3 hardware this +# +# To give you some other reference point here is output for 2.4GHz P4 +# running hand-coded SSE2 bn_mul_mont found in 0.9.9, i.e. "software +# SSE2" in above terms. +# +# rsa 512 bits 0.000407s 0.000047s 2454.2 21137.0 +# rsa 1024 bits 0.002426s 0.000141s 412.1 7100.0 +# rsa 2048 bits 0.015046s 0.000491s 66.5 2034.9 +# rsa 4096 bits 0.109770s 0.002379s 9.1 420.3 +# dsa 512 bits 0.000438s 0.000525s 2281.1 1904.1 +# dsa 1024 bits 0.001346s 0.001595s 742.7 627.0 +# dsa 2048 bits 0.004745s 0.005582s 210.7 179.1 +# +# Conclusions: +# - VIA SDK leaves a *lot* of room for improvement (which this +# implementation successfully fills:-); +# - 'rep montmul' gives up to >3x performance improvement depending on +# key length; +# - in terms of absolute performance it delivers approximately as much +# as modern out-of-order 32-bit cores [again, for longer keys]. + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +push(@INC,"${dir}","${dir}../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],"via-mont.pl"); + +# int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num); +$func="bn_mul_mont_padlock"; + +$pad=16*1; # amount of reserved bytes on top of every vector + +# stack layout +$mZeroPrime=&DWP(0,"esp"); # these are specified by VIA +$A=&DWP(4,"esp"); +$B=&DWP(8,"esp"); +$T=&DWP(12,"esp"); +$M=&DWP(16,"esp"); +$scratch=&DWP(20,"esp"); +$rp=&DWP(24,"esp"); # these are mine +$sp=&DWP(28,"esp"); +# &DWP(32,"esp") # 32 byte scratch area +# &DWP(64+(4*$num+$pad)*0,"esp") # padded tp[num] +# &DWP(64+(4*$num+$pad)*1,"esp") # padded copy of ap[num] +# &DWP(64+(4*$num+$pad)*2,"esp") # padded copy of bp[num] +# &DWP(64+(4*$num+$pad)*3,"esp") # padded copy of np[num] +# Note that SDK suggests to unconditionally allocate 2K per vector. This +# has quite an impact on performance. It naturally depends on key length, +# but to give an example 1024 bit private RSA key operations suffer >30% +# penalty. I allocate only as much as actually required... + +&function_begin($func); + &xor ("eax","eax"); + &mov ("ecx",&wparam(5)); # num + # meet VIA's limitations for num [note that the specification + # expresses them in bits, while we work with amount of 32-bit words] + &test ("ecx",3); + &jnz (&label("leave")); # num % 4 != 0 + &cmp ("ecx",8); + &jb (&label("leave")); # num < 8 + &cmp ("ecx",1024); + &ja (&label("leave")); # num > 1024 + + &pushf (); + &cld (); + + &mov ("edi",&wparam(0)); # rp + &mov ("eax",&wparam(1)); # ap + &mov ("ebx",&wparam(2)); # bp + &mov ("edx",&wparam(3)); # np + &mov ("esi",&wparam(4)); # n0 + &mov ("esi",&DWP(0,"esi")); # *n0 + + &lea ("ecx",&DWP($pad,"","ecx",4)); # ecx becomes vector size in bytes + &lea ("ebp",&DWP(64,"","ecx",4)); # allocate 4 vectors + 64 bytes + &neg ("ebp"); + &add ("ebp","esp"); + &and ("ebp",-64); # align to cache-line + &xchg ("ebp","esp"); # alloca + + &mov ($rp,"edi"); # save rp + &mov ($sp,"ebp"); # save esp + + &mov ($mZeroPrime,"esi"); + &lea ("esi",&DWP(64,"esp")); # tp + &mov ($T,"esi"); + &lea ("edi",&DWP(32,"esp")); # scratch area + &mov ($scratch,"edi"); + &mov ("esi","eax"); + + &lea ("ebp",&DWP(-$pad,"ecx")); + &shr ("ebp",2); # restore original num value in ebp + + &xor ("eax","eax"); + + &mov ("ecx","ebp"); + &lea ("ecx",&DWP((32+$pad)/4,"ecx"));# padded tp + scratch + &data_byte(0xf3,0xab); # rep stosl, bzero + + &mov ("ecx","ebp"); + &lea ("edi",&DWP(64+$pad,"esp","ecx",4));# pointer to ap copy + &mov ($A,"edi"); + &data_byte(0xf3,0xa5); # rep movsl, memcpy + &mov ("ecx",$pad/4); + &data_byte(0xf3,0xab); # rep stosl, bzero pad + # edi points at the end of padded ap copy... + + &mov ("ecx","ebp"); + &mov ("esi","ebx"); + &mov ($B,"edi"); + &data_byte(0xf3,0xa5); # rep movsl, memcpy + &mov ("ecx",$pad/4); + &data_byte(0xf3,0xab); # rep stosl, bzero pad + # edi points at the end of padded bp copy... + + &mov ("ecx","ebp"); + &mov ("esi","edx"); + &mov ($M,"edi"); + &data_byte(0xf3,0xa5); # rep movsl, memcpy + &mov ("ecx",$pad/4); + &data_byte(0xf3,0xab); # rep stosl, bzero pad + # edi points at the end of padded np copy... + + # let magic happen... + &mov ("ecx","ebp"); + &mov ("esi","esp"); + &shl ("ecx",5); # convert word counter to bit counter + &align (4); + &data_byte(0xf3,0x0f,0xa6,0xc0);# rep montmul + + &mov ("ecx","ebp"); + &lea ("esi",&DWP(64,"esp")); # tp + # edi still points at the end of padded np copy... + &neg ("ebp"); + &lea ("ebp",&DWP(-$pad,"edi","ebp",4)); # so just "rewind" + &mov ("edi",$rp); # restore rp + &xor ("edx","edx"); # i=0 and clear CF + +&set_label("sub",8); + &mov ("eax",&DWP(0,"esi","edx",4)); + &sbb ("eax",&DWP(0,"ebp","edx",4)); + &mov (&DWP(0,"edi","edx",4),"eax"); # rp[i]=tp[i]-np[i] + &lea ("edx",&DWP(1,"edx")); # i++ + &loop (&label("sub")); # doesn't affect CF! + + &mov ("eax",&DWP(0,"esi","edx",4)); # upmost overflow bit + &sbb ("eax",0); + &and ("esi","eax"); + ¬ ("eax"); + &mov ("ebp","edi"); + &and ("ebp","eax"); + &or ("esi","ebp"); # tp=carry?tp:rp + + &mov ("ecx","edx"); # num + &xor ("edx","edx"); # i=0 + +&set_label("copy",8); + &mov ("eax",&DWP(0,"esi","edx",4)); + &mov (&DWP(64,"esp","edx",4),"ecx"); # zap tp + &mov (&DWP(0,"edi","edx",4),"eax"); + &lea ("edx",&DWP(1,"edx")); # i++ + &loop (&label("copy")); + + &mov ("ebp",$sp); + &xor ("eax","eax"); + + &mov ("ecx",64/4); + &mov ("edi","esp"); # zap frame including scratch area + &data_byte(0xf3,0xab); # rep stosl, bzero + + # zap copies of ap, bp and np + &lea ("edi",&DWP(64+$pad,"esp","edx",4));# pointer to ap + &lea ("ecx",&DWP(3*$pad/4,"edx","edx",2)); + &data_byte(0xf3,0xab); # rep stosl, bzero + + &mov ("esp","ebp"); + &inc ("eax"); # signal "done" + &popf (); +&set_label("leave"); +&function_end($func); + +&asciz("Padlock Montgomery Multiplication, CRYPTOGAMS by <appro\@openssl.org>"); + +&asm_finish(); diff --git a/openssl/crypto/bn/asm/x86-mont.pl b/openssl/crypto/bn/asm/x86-mont.pl new file mode 100755 index 00000000..5cd3cd2e --- /dev/null +++ b/openssl/crypto/bn/asm/x86-mont.pl @@ -0,0 +1,591 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# October 2005 +# +# This is a "teaser" code, as it can be improved in several ways... +# First of all non-SSE2 path should be implemented (yes, for now it +# performs Montgomery multiplication/convolution only on SSE2-capable +# CPUs such as P4, others fall down to original code). Then inner loop +# can be unrolled and modulo-scheduled to improve ILP and possibly +# moved to 128-bit XMM register bank (though it would require input +# rearrangement and/or increase bus bandwidth utilization). Dedicated +# squaring procedure should give further performance improvement... +# Yet, for being draft, the code improves rsa512 *sign* benchmark by +# 110%(!), rsa1024 one - by 70% and rsa4096 - by 20%:-) + +# December 2006 +# +# Modulo-scheduling SSE2 loops results in further 15-20% improvement. +# Integer-only code [being equipped with dedicated squaring procedure] +# gives ~40% on rsa512 sign benchmark... + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +push(@INC,"${dir}","${dir}../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],$0); + +$sse2=0; +for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); } + +&external_label("OPENSSL_ia32cap_P") if ($sse2); + +&function_begin("bn_mul_mont"); + +$i="edx"; +$j="ecx"; +$ap="esi"; $tp="esi"; # overlapping variables!!! +$rp="edi"; $bp="edi"; # overlapping variables!!! +$np="ebp"; +$num="ebx"; + +$_num=&DWP(4*0,"esp"); # stack top layout +$_rp=&DWP(4*1,"esp"); +$_ap=&DWP(4*2,"esp"); +$_bp=&DWP(4*3,"esp"); +$_np=&DWP(4*4,"esp"); +$_n0=&DWP(4*5,"esp"); $_n0q=&QWP(4*5,"esp"); +$_sp=&DWP(4*6,"esp"); +$_bpend=&DWP(4*7,"esp"); +$frame=32; # size of above frame rounded up to 16n + + &xor ("eax","eax"); + &mov ("edi",&wparam(5)); # int num + &cmp ("edi",4); + &jl (&label("just_leave")); + + &lea ("esi",&wparam(0)); # put aside pointer to argument block + &lea ("edx",&wparam(1)); # load ap + &mov ("ebp","esp"); # saved stack pointer! + &add ("edi",2); # extra two words on top of tp + &neg ("edi"); + &lea ("esp",&DWP(-$frame,"esp","edi",4)); # alloca($frame+4*(num+2)) + &neg ("edi"); + + # minimize cache contention by arraning 2K window between stack + # pointer and ap argument [np is also position sensitive vector, + # but it's assumed to be near ap, as it's allocated at ~same + # time]. + &mov ("eax","esp"); + &sub ("eax","edx"); + &and ("eax",2047); + &sub ("esp","eax"); # this aligns sp and ap modulo 2048 + + &xor ("edx","esp"); + &and ("edx",2048); + &xor ("edx",2048); + &sub ("esp","edx"); # this splits them apart modulo 4096 + + &and ("esp",-64); # align to cache line + + ################################# load argument block... + &mov ("eax",&DWP(0*4,"esi"));# BN_ULONG *rp + &mov ("ebx",&DWP(1*4,"esi"));# const BN_ULONG *ap + &mov ("ecx",&DWP(2*4,"esi"));# const BN_ULONG *bp + &mov ("edx",&DWP(3*4,"esi"));# const BN_ULONG *np + &mov ("esi",&DWP(4*4,"esi"));# const BN_ULONG *n0 + #&mov ("edi",&DWP(5*4,"esi"));# int num + + &mov ("esi",&DWP(0,"esi")); # pull n0[0] + &mov ($_rp,"eax"); # ... save a copy of argument block + &mov ($_ap,"ebx"); + &mov ($_bp,"ecx"); + &mov ($_np,"edx"); + &mov ($_n0,"esi"); + &lea ($num,&DWP(-3,"edi")); # num=num-1 to assist modulo-scheduling + #&mov ($_num,$num); # redundant as $num is not reused + &mov ($_sp,"ebp"); # saved stack pointer! + +if($sse2) { +$acc0="mm0"; # mmx register bank layout +$acc1="mm1"; +$car0="mm2"; +$car1="mm3"; +$mul0="mm4"; +$mul1="mm5"; +$temp="mm6"; +$mask="mm7"; + + &picmeup("eax","OPENSSL_ia32cap_P"); + &bt (&DWP(0,"eax"),26); + &jnc (&label("non_sse2")); + + &mov ("eax",-1); + &movd ($mask,"eax"); # mask 32 lower bits + + &mov ($ap,$_ap); # load input pointers + &mov ($bp,$_bp); + &mov ($np,$_np); + + &xor ($i,$i); # i=0 + &xor ($j,$j); # j=0 + + &movd ($mul0,&DWP(0,$bp)); # bp[0] + &movd ($mul1,&DWP(0,$ap)); # ap[0] + &movd ($car1,&DWP(0,$np)); # np[0] + + &pmuludq($mul1,$mul0); # ap[0]*bp[0] + &movq ($car0,$mul1); + &movq ($acc0,$mul1); # I wish movd worked for + &pand ($acc0,$mask); # inter-register transfers + + &pmuludq($mul1,$_n0q); # *=n0 + + &pmuludq($car1,$mul1); # "t[0]"*np[0]*n0 + &paddq ($car1,$acc0); + + &movd ($acc1,&DWP(4,$np)); # np[1] + &movd ($acc0,&DWP(4,$ap)); # ap[1] + + &psrlq ($car0,32); + &psrlq ($car1,32); + + &inc ($j); # j++ +&set_label("1st",16); + &pmuludq($acc0,$mul0); # ap[j]*bp[0] + &pmuludq($acc1,$mul1); # np[j]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &pand ($acc0,$mask); + &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1] + &paddq ($car1,$acc0); # +=ap[j]*bp[0]; + &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1] + &psrlq ($car0,32); + &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[j-1]= + &psrlq ($car1,32); + + &lea ($j,&DWP(1,$j)); + &cmp ($j,$num); + &jl (&label("1st")); + + &pmuludq($acc0,$mul0); # ap[num-1]*bp[0] + &pmuludq($acc1,$mul1); # np[num-1]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &pand ($acc0,$mask); + &paddq ($car1,$acc0); # +=ap[num-1]*bp[0]; + &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]= + + &psrlq ($car0,32); + &psrlq ($car1,32); + + &paddq ($car1,$car0); + &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1] + + &inc ($i); # i++ +&set_label("outer"); + &xor ($j,$j); # j=0 + + &movd ($mul0,&DWP(0,$bp,$i,4)); # bp[i] + &movd ($mul1,&DWP(0,$ap)); # ap[0] + &movd ($temp,&DWP($frame,"esp")); # tp[0] + &movd ($car1,&DWP(0,$np)); # np[0] + &pmuludq($mul1,$mul0); # ap[0]*bp[i] + + &paddq ($mul1,$temp); # +=tp[0] + &movq ($acc0,$mul1); + &movq ($car0,$mul1); + &pand ($acc0,$mask); + + &pmuludq($mul1,$_n0q); # *=n0 + + &pmuludq($car1,$mul1); + &paddq ($car1,$acc0); + + &movd ($temp,&DWP($frame+4,"esp")); # tp[1] + &movd ($acc1,&DWP(4,$np)); # np[1] + &movd ($acc0,&DWP(4,$ap)); # ap[1] + + &psrlq ($car0,32); + &psrlq ($car1,32); + &paddq ($car0,$temp); # +=tp[1] + + &inc ($j); # j++ + &dec ($num); +&set_label("inner"); + &pmuludq($acc0,$mul0); # ap[j]*bp[i] + &pmuludq($acc1,$mul1); # np[j]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &movd ($temp,&DWP($frame+4,"esp",$j,4));# tp[j+1] + &pand ($acc0,$mask); + &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1] + &paddq ($car1,$acc0); # +=ap[j]*bp[i]+tp[j] + &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1] + &psrlq ($car0,32); + &movd (&DWP($frame-4,"esp",$j,4),$car1);# tp[j-1]= + &psrlq ($car1,32); + &paddq ($car0,$temp); # +=tp[j+1] + + &dec ($num); + &lea ($j,&DWP(1,$j)); # j++ + &jnz (&label("inner")); + + &mov ($num,$j); + &pmuludq($acc0,$mul0); # ap[num-1]*bp[i] + &pmuludq($acc1,$mul1); # np[num-1]*m1 + &paddq ($car0,$acc0); # +=c0 + &paddq ($car1,$acc1); # +=c1 + + &movq ($acc0,$car0); + &pand ($acc0,$mask); + &paddq ($car1,$acc0); # +=ap[num-1]*bp[i]+tp[num-1] + &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]= + &psrlq ($car0,32); + &psrlq ($car1,32); + + &movd ($temp,&DWP($frame+4,"esp",$num,4)); # += tp[num] + &paddq ($car1,$car0); + &paddq ($car1,$temp); + &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1] + + &lea ($i,&DWP(1,$i)); # i++ + &cmp ($i,$num); + &jle (&label("outer")); + + &emms (); # done with mmx bank + &jmp (&label("common_tail")); + +&set_label("non_sse2",16); +} + +if (0) { + &mov ("esp",$_sp); + &xor ("eax","eax"); # signal "not fast enough [yet]" + &jmp (&label("just_leave")); + # While the below code provides competitive performance for + # all key lengthes on modern Intel cores, it's still more + # than 10% slower for 4096-bit key elsewhere:-( "Competitive" + # means compared to the original integer-only assembler. + # 512-bit RSA sign is better by ~40%, but that's about all + # one can say about all CPUs... +} else { +$inp="esi"; # integer path uses these registers differently +$word="edi"; +$carry="ebp"; + + &mov ($inp,$_ap); + &lea ($carry,&DWP(1,$num)); + &mov ($word,$_bp); + &xor ($j,$j); # j=0 + &mov ("edx",$inp); + &and ($carry,1); # see if num is even + &sub ("edx",$word); # see if ap==bp + &lea ("eax",&DWP(4,$word,$num,4)); # &bp[num] + &or ($carry,"edx"); + &mov ($word,&DWP(0,$word)); # bp[0] + &jz (&label("bn_sqr_mont")); + &mov ($_bpend,"eax"); + &mov ("eax",&DWP(0,$inp)); + &xor ("edx","edx"); + +&set_label("mull",16); + &mov ($carry,"edx"); + &mul ($word); # ap[j]*bp[0] + &add ($carry,"eax"); + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1] + &cmp ($j,$num); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &jl (&label("mull")); + + &mov ($carry,"edx"); + &mul ($word); # ap[num-1]*bp[0] + &mov ($word,$_n0); + &add ("eax",$carry); + &mov ($inp,$_np); + &adc ("edx",0); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + + &mov (&DWP($frame,"esp",$num,4),"eax"); # tp[num-1]= + &xor ($j,$j); + &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]= + &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]= + + &mov ("eax",&DWP(0,$inp)); # np[0] + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &mov ("eax",&DWP(4,$inp)); # np[1] + &adc ("edx",0); + &inc ($j); + + &jmp (&label("2ndmadd")); + +&set_label("1stmadd",16); + &mov ($carry,"edx"); + &mul ($word); # ap[j]*bp[i] + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1] + &adc ("edx",0); + &cmp ($j,$num); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &jl (&label("1stmadd")); + + &mov ($carry,"edx"); + &mul ($word); # ap[num-1]*bp[i] + &add ("eax",&DWP($frame,"esp",$num,4)); # +=tp[num-1] + &mov ($word,$_n0); + &adc ("edx",0); + &mov ($inp,$_np); + &add ($carry,"eax"); + &adc ("edx",0); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + + &xor ($j,$j); + &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num] + &mov (&DWP($frame,"esp",$num,4),$carry); # tp[num-1]= + &adc ($j,0); + &mov ("eax",&DWP(0,$inp)); # np[0] + &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]= + &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]= + + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &mov ("eax",&DWP(4,$inp)); # np[1] + &adc ("edx",0); + &mov ($j,1); + +&set_label("2ndmadd",16); + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+1] + &adc ("edx",0); + &cmp ($j,$num); + &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j-1]= + &jl (&label("2ndmadd")); + + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1] + &adc ("edx",0); + &add ($carry,"eax"); + &adc ("edx",0); + &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]= + + &xor ("eax","eax"); + &mov ($j,$_bp); # &bp[i] + &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num] + &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1] + &lea ($j,&DWP(4,$j)); + &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]= + &cmp ($j,$_bpend); + &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]= + &je (&label("common_tail")); + + &mov ($word,&DWP(0,$j)); # bp[i+1] + &mov ($inp,$_ap); + &mov ($_bp,$j); # &bp[++i] + &xor ($j,$j); + &xor ("edx","edx"); + &mov ("eax",&DWP(0,$inp)); + &jmp (&label("1stmadd")); + +&set_label("bn_sqr_mont",16); +$sbit=$num; + &mov ($_num,$num); + &mov ($_bp,$j); # i=0 + + &mov ("eax",$word); # ap[0] + &mul ($word); # ap[0]*ap[0] + &mov (&DWP($frame,"esp"),"eax"); # tp[0]= + &mov ($sbit,"edx"); + &shr ("edx",1); + &and ($sbit,1); + &inc ($j); +&set_label("sqr",16); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j] + &mov ($carry,"edx"); + &mul ($word); # ap[j]*ap[0] + &add ("eax",$carry); + &lea ($j,&DWP(1,$j)); + &adc ("edx",0); + &lea ($carry,&DWP(0,$sbit,"eax",2)); + &shr ("eax",31); + &cmp ($j,$_num); + &mov ($sbit,"eax"); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &jl (&label("sqr")); + + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[num-1] + &mov ($carry,"edx"); + &mul ($word); # ap[num-1]*ap[0] + &add ("eax",$carry); + &mov ($word,$_n0); + &adc ("edx",0); + &mov ($inp,$_np); + &lea ($carry,&DWP(0,$sbit,"eax",2)); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + &shr ("eax",31); + &mov (&DWP($frame,"esp",$j,4),$carry); # tp[num-1]= + + &lea ($carry,&DWP(0,"eax","edx",2)); + &mov ("eax",&DWP(0,$inp)); # np[0] + &shr ("edx",31); + &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num]= + &mov (&DWP($frame+8,"esp",$j,4),"edx"); # tp[num+1]= + + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &mov ($num,$j); + &adc ("edx",0); + &mov ("eax",&DWP(4,$inp)); # np[1] + &mov ($j,1); + +&set_label("3rdmadd",16); + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(4,$inp,$j,4)); # np[j+1] + &adc ("edx",0); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j-1]= + + &mov ($carry,"edx"); + &mul ($word); # np[j+1]*m + &add ($carry,&DWP($frame+4,"esp",$j,4)); # +=tp[j+1] + &lea ($j,&DWP(2,$j)); + &adc ("edx",0); + &add ($carry,"eax"); + &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+2] + &adc ("edx",0); + &cmp ($j,$num); + &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j]= + &jl (&label("3rdmadd")); + + &mov ($carry,"edx"); + &mul ($word); # np[j]*m + &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1] + &adc ("edx",0); + &add ($carry,"eax"); + &adc ("edx",0); + &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]= + + &mov ($j,$_bp); # i + &xor ("eax","eax"); + &mov ($inp,$_ap); + &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num] + &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1] + &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]= + &cmp ($j,$num); + &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]= + &je (&label("common_tail")); + + &mov ($word,&DWP(4,$inp,$j,4)); # ap[i] + &lea ($j,&DWP(1,$j)); + &mov ("eax",$word); + &mov ($_bp,$j); # ++i + &mul ($word); # ap[i]*ap[i] + &add ("eax",&DWP($frame,"esp",$j,4)); # +=tp[i] + &adc ("edx",0); + &mov (&DWP($frame,"esp",$j,4),"eax"); # tp[i]= + &xor ($carry,$carry); + &cmp ($j,$num); + &lea ($j,&DWP(1,$j)); + &je (&label("sqrlast")); + + &mov ($sbit,"edx"); # zaps $num + &shr ("edx",1); + &and ($sbit,1); +&set_label("sqradd",16); + &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j] + &mov ($carry,"edx"); + &mul ($word); # ap[j]*ap[i] + &add ("eax",$carry); + &lea ($carry,&DWP(0,"eax","eax")); + &adc ("edx",0); + &shr ("eax",31); + &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j] + &lea ($j,&DWP(1,$j)); + &adc ("eax",0); + &add ($carry,$sbit); + &adc ("eax",0); + &cmp ($j,$_num); + &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]= + &mov ($sbit,"eax"); + &jle (&label("sqradd")); + + &mov ($carry,"edx"); + &lea ("edx",&DWP(0,$sbit,"edx",2)); + &shr ($carry,31); +&set_label("sqrlast"); + &mov ($word,$_n0); + &mov ($inp,$_np); + &imul ($word,&DWP($frame,"esp")); # n0*tp[0] + + &add ("edx",&DWP($frame,"esp",$j,4)); # +=tp[num] + &mov ("eax",&DWP(0,$inp)); # np[0] + &adc ($carry,0); + &mov (&DWP($frame,"esp",$j,4),"edx"); # tp[num]= + &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num+1]= + + &mul ($word); # np[0]*m + &add ("eax",&DWP($frame,"esp")); # +=tp[0] + &lea ($num,&DWP(-1,$j)); + &adc ("edx",0); + &mov ($j,1); + &mov ("eax",&DWP(4,$inp)); # np[1] + + &jmp (&label("3rdmadd")); +} + +&set_label("common_tail",16); + &mov ($np,$_np); # load modulus pointer + &mov ($rp,$_rp); # load result pointer + &lea ($tp,&DWP($frame,"esp")); # [$ap and $bp are zapped] + + &mov ("eax",&DWP(0,$tp)); # tp[0] + &mov ($j,$num); # j=num-1 + &xor ($i,$i); # i=0 and clear CF! + +&set_label("sub",16); + &sbb ("eax",&DWP(0,$np,$i,4)); + &mov (&DWP(0,$rp,$i,4),"eax"); # rp[i]=tp[i]-np[i] + &dec ($j); # doesn't affect CF! + &mov ("eax",&DWP(4,$tp,$i,4)); # tp[i+1] + &lea ($i,&DWP(1,$i)); # i++ + &jge (&label("sub")); + + &sbb ("eax",0); # handle upmost overflow bit + &and ($tp,"eax"); + ¬ ("eax"); + &mov ($np,$rp); + &and ($np,"eax"); + &or ($tp,$np); # tp=carry?tp:rp + +&set_label("copy",16); # copy or in-place refresh + &mov ("eax",&DWP(0,$tp,$num,4)); + &mov (&DWP(0,$rp,$num,4),"eax"); # rp[i]=tp[i] + &mov (&DWP($frame,"esp",$num,4),$j); # zap temporary vector + &dec ($num); + &jge (&label("copy")); + + &mov ("esp",$_sp); # pull saved stack pointer + &mov ("eax",1); +&set_label("just_leave"); +&function_end("bn_mul_mont"); + +&asciz("Montgomery Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>"); + +&asm_finish(); diff --git a/openssl/crypto/bn/asm/x86.pl b/openssl/crypto/bn/asm/x86.pl new file mode 100644 index 00000000..1bc4f1bb --- /dev/null +++ b/openssl/crypto/bn/asm/x86.pl @@ -0,0 +1,28 @@ +#!/usr/local/bin/perl + +push(@INC,"perlasm","../../perlasm"); +require "x86asm.pl"; + +require("x86/mul_add.pl"); +require("x86/mul.pl"); +require("x86/sqr.pl"); +require("x86/div.pl"); +require("x86/add.pl"); +require("x86/sub.pl"); +require("x86/comba.pl"); + +&asm_init($ARGV[0],$0); + +&bn_mul_add_words("bn_mul_add_words"); +&bn_mul_words("bn_mul_words"); +&bn_sqr_words("bn_sqr_words"); +&bn_div_words("bn_div_words"); +&bn_add_words("bn_add_words"); +&bn_sub_words("bn_sub_words"); +&bn_mul_comba("bn_mul_comba8",8); +&bn_mul_comba("bn_mul_comba4",4); +&bn_sqr_comba("bn_sqr_comba8",8); +&bn_sqr_comba("bn_sqr_comba4",4); + +&asm_finish(); + diff --git a/openssl/crypto/bn/asm/x86/add.pl b/openssl/crypto/bn/asm/x86/add.pl new file mode 100644 index 00000000..0b5cf583 --- /dev/null +++ b/openssl/crypto/bn/asm/x86/add.pl @@ -0,0 +1,76 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_add_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $a="esi"; + $b="edi"; + $c="eax"; + $r="ebx"; + $tmp1="ecx"; + $tmp2="edx"; + $num="ebp"; + + &mov($r,&wparam(0)); # get r + &mov($a,&wparam(1)); # get a + &mov($b,&wparam(2)); # get b + &mov($num,&wparam(3)); # get num + &xor($c,$c); # clear carry + &and($num,0xfffffff8); # num / 8 + + &jz(&label("aw_finish")); + + &set_label("aw_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &add($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &add($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($a,32); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("aw_loop")); + + &set_label("aw_finish",0); + &mov($num,&wparam(3)); # get num + &and($num,7); + &jz(&label("aw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0));# *b + &add($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &add($tmp1,$tmp2); + &adc($c,0); + &dec($num) if ($i != 6); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *a + &jz(&label("aw_end")) if ($i != 6); + } + &set_label("aw_end",0); + +# &mov("eax",$c); # $c is "eax" + + &function_end($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86/comba.pl b/openssl/crypto/bn/asm/x86/comba.pl new file mode 100644 index 00000000..22912536 --- /dev/null +++ b/openssl/crypto/bn/asm/x86/comba.pl @@ -0,0 +1,277 @@ +#!/usr/local/bin/perl +# x86 assember + +sub mul_add_c + { + local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("mul a[$ai]*b[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$b,"",0)); + + &mul("edx"); + &add($c0,"eax"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # laod next a + &mov("eax",&wparam(0)) if $pos > 0; # load r[] + ### + &adc($c1,"edx"); + &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0; # laod next b + &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b + ### + &adc($c2,0); + # is pos > 1, it means it is the last loop + &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[]; + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a + } + +sub sqr_add_c + { + local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("sqr a[$ai]*a[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$b,"",0)); + + if ($ai == $bi) + { &mul("eax");} + else + { &mul("edx");} + &add($c0,"eax"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a + ### + &adc($c1,"edx"); + &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb); + ### + &adc($c2,0); + # is pos > 1, it means it is the last loop + &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[]; + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b + } + +sub sqr_add_c2 + { + local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_; + + # pos == -1 if eax and edx are pre-loaded, 0 to load from next + # words, and 1 if load return value + + &comment("sqr a[$ai]*a[$bi]"); + + # "eax" and "edx" will always be pre-loaded. + # &mov("eax",&DWP($ai*4,$a,"",0)) ; + # &mov("edx",&DWP($bi*4,$a,"",0)); + + if ($ai == $bi) + { &mul("eax");} + else + { &mul("edx");} + &add("eax","eax"); + ### + &adc("edx","edx"); + ### + &adc($c2,0); + &add($c0,"eax"); + &adc($c1,"edx"); + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a + &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b + &adc($c2,0); + &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[]; + &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb); + ### + } + +sub bn_mul_comba + { + local($name,$num)=@_; + local($a,$b,$c0,$c1,$c2); + local($i,$as,$ae,$bs,$be,$ai,$bi); + local($tot,$end); + + &function_begin_B($name,""); + + $c0="ebx"; + $c1="ecx"; + $c2="ebp"; + $a="esi"; + $b="edi"; + + $as=0; + $ae=0; + $bs=0; + $be=0; + $tot=$num+$num-1; + + &push("esi"); + &mov($a,&wparam(1)); + &push("edi"); + &mov($b,&wparam(2)); + &push("ebp"); + &push("ebx"); + + &xor($c0,$c0); + &mov("eax",&DWP(0,$a,"",0)); # load the first word + &xor($c1,$c1); + &mov("edx",&DWP(0,$b,"",0)); # load the first second + + for ($i=0; $i<$tot; $i++) + { + $ai=$as; + $bi=$bs; + $end=$be+1; + + &comment("################## Calculate word $i"); + + for ($j=$bs; $j<$end; $j++) + { + &xor($c2,$c2) if ($j == $bs); + if (($j+1) == $end) + { + $v=1; + $v=2 if (($i+1) == $tot); + } + else + { $v=0; } + if (($j+1) != $end) + { + $na=($ai-1); + $nb=($bi+1); + } + else + { + $na=$as+($i < ($num-1)); + $nb=$bs+($i >= ($num-1)); + } +#printf STDERR "[$ai,$bi] -> [$na,$nb]\n"; + &mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb); + if ($v) + { + &comment("saved r[$i]"); + # &mov("eax",&wparam(0)); + # &mov(&DWP($i*4,"eax","",0),$c0); + ($c0,$c1,$c2)=($c1,$c2,$c0); + } + $ai--; + $bi++; + } + $as++ if ($i < ($num-1)); + $ae++ if ($i >= ($num-1)); + + $bs++ if ($i >= ($num-1)); + $be++ if ($i < ($num-1)); + } + &comment("save r[$i]"); + # &mov("eax",&wparam(0)); + &mov(&DWP($i*4,"eax","",0),$c0); + + &pop("ebx"); + &pop("ebp"); + &pop("edi"); + &pop("esi"); + &ret(); + &function_end_B($name); + } + +sub bn_sqr_comba + { + local($name,$num)=@_; + local($r,$a,$c0,$c1,$c2)=@_; + local($i,$as,$ae,$bs,$be,$ai,$bi); + local($b,$tot,$end,$half); + + &function_begin_B($name,""); + + $c0="ebx"; + $c1="ecx"; + $c2="ebp"; + $a="esi"; + $r="edi"; + + &push("esi"); + &push("edi"); + &push("ebp"); + &push("ebx"); + &mov($r,&wparam(0)); + &mov($a,&wparam(1)); + &xor($c0,$c0); + &xor($c1,$c1); + &mov("eax",&DWP(0,$a,"",0)); # load the first word + + $as=0; + $ae=0; + $bs=0; + $be=0; + $tot=$num+$num-1; + + for ($i=0; $i<$tot; $i++) + { + $ai=$as; + $bi=$bs; + $end=$be+1; + + &comment("############### Calculate word $i"); + for ($j=$bs; $j<$end; $j++) + { + &xor($c2,$c2) if ($j == $bs); + if (($ai-1) < ($bi+1)) + { + $v=1; + $v=2 if ($i+1) == $tot; + } + else + { $v=0; } + if (!$v) + { + $na=$ai-1; + $nb=$bi+1; + } + else + { + $na=$as+($i < ($num-1)); + $nb=$bs+($i >= ($num-1)); + } + if ($ai == $bi) + { + &sqr_add_c($r,$a,$ai,$bi, + $c0,$c1,$c2,$v,$i,$na,$nb); + } + else + { + &sqr_add_c2($r,$a,$ai,$bi, + $c0,$c1,$c2,$v,$i,$na,$nb); + } + if ($v) + { + &comment("saved r[$i]"); + #&mov(&DWP($i*4,$r,"",0),$c0); + ($c0,$c1,$c2)=($c1,$c2,$c0); + last; + } + $ai--; + $bi++; + } + $as++ if ($i < ($num-1)); + $ae++ if ($i >= ($num-1)); + + $bs++ if ($i >= ($num-1)); + $be++ if ($i < ($num-1)); + } + &mov(&DWP($i*4,$r,"",0),$c0); + &pop("ebx"); + &pop("ebp"); + &pop("edi"); + &pop("esi"); + &ret(); + &function_end_B($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86/div.pl b/openssl/crypto/bn/asm/x86/div.pl new file mode 100644 index 00000000..0e90152c --- /dev/null +++ b/openssl/crypto/bn/asm/x86/div.pl @@ -0,0 +1,15 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_div_words + { + local($name)=@_; + + &function_begin($name,""); + &mov("edx",&wparam(0)); # + &mov("eax",&wparam(1)); # + &mov("ebx",&wparam(2)); # + &div("ebx"); + &function_end($name); + } +1; diff --git a/openssl/crypto/bn/asm/x86/f b/openssl/crypto/bn/asm/x86/f new file mode 100644 index 00000000..22e41122 --- /dev/null +++ b/openssl/crypto/bn/asm/x86/f @@ -0,0 +1,3 @@ +#!/usr/local/bin/perl +# x86 assember + diff --git a/openssl/crypto/bn/asm/x86/mul.pl b/openssl/crypto/bn/asm/x86/mul.pl new file mode 100644 index 00000000..674cb9b0 --- /dev/null +++ b/openssl/crypto/bn/asm/x86/mul.pl @@ -0,0 +1,77 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_mul_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $Low="eax"; + $High="edx"; + $a="ebx"; + $w="ecx"; + $r="edi"; + $c="esi"; + $num="ebp"; + + &xor($c,$c); # clear carry + &mov($r,&wparam(0)); # + &mov($a,&wparam(1)); # + &mov($num,&wparam(2)); # + &mov($w,&wparam(3)); # + + &and($num,0xfffffff8); # num / 8 + &jz(&label("mw_finish")); + + &set_label("mw_loop",0); + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + + &mov("eax",&DWP($i,$a,"",0)); # *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + # XXX + + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t); + + &mov($c,"edx"); # c= H(t); + } + + &comment(""); + &add($a,32); + &add($r,32); + &sub($num,8); + &jz(&label("mw_finish")); + &jmp(&label("mw_loop")); + + &set_label("mw_finish",0); + &mov($num,&wparam(2)); # get num + &and($num,7); + &jnz(&label("mw_finish2")); + &jmp(&label("mw_end")); + + &set_label("mw_finish2",1); + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a,"",0));# *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + # XXX + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t); + &mov($c,"edx"); # c= H(t); + &dec($num) if ($i != 7-1); + &jz(&label("mw_end")) if ($i != 7-1); + } + &set_label("mw_end",0); + &mov("eax",$c); + + &function_end($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86/mul_add.pl b/openssl/crypto/bn/asm/x86/mul_add.pl new file mode 100644 index 00000000..61830d3a --- /dev/null +++ b/openssl/crypto/bn/asm/x86/mul_add.pl @@ -0,0 +1,87 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_mul_add_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $Low="eax"; + $High="edx"; + $a="ebx"; + $w="ebp"; + $r="edi"; + $c="esi"; + + &xor($c,$c); # clear carry + &mov($r,&wparam(0)); # + + &mov("ecx",&wparam(2)); # + &mov($a,&wparam(1)); # + + &and("ecx",0xfffffff8); # num / 8 + &mov($w,&wparam(3)); # + + &push("ecx"); # Up the stack for a tmp variable + + &jz(&label("maw_finish")); + + &set_label("maw_loop",0); + + &mov(&swtmp(0),"ecx"); # + + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + + &mov("eax",&DWP($i,$a,"",0)); # *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+= *r + &mov($c,&DWP($i,$r,"",0)); # L(t)+= *r + &adc("edx",0); # H(t)+=carry + &add("eax",$c); # L(t)+=c + &adc("edx",0); # H(t)+=carry + &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t); + &mov($c,"edx"); # c= H(t); + } + + &comment(""); + &mov("ecx",&swtmp(0)); # + &add($a,32); + &add($r,32); + &sub("ecx",8); + &jnz(&label("maw_loop")); + + &set_label("maw_finish",0); + &mov("ecx",&wparam(2)); # get num + &and("ecx",7); + &jnz(&label("maw_finish2")); # helps branch prediction + &jmp(&label("maw_end")); + + &set_label("maw_finish2",1); + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a,"",0));# *a + &mul($w); # *a * w + &add("eax",$c); # L(t)+=c + &mov($c,&DWP($i*4,$r,"",0)); # L(t)+= *r + &adc("edx",0); # H(t)+=carry + &add("eax",$c); + &adc("edx",0); # H(t)+=carry + &dec("ecx") if ($i != 7-1); + &mov(&DWP($i*4,$r,"",0),"eax"); # *r= L(t); + &mov($c,"edx"); # c= H(t); + &jz(&label("maw_end")) if ($i != 7-1); + } + &set_label("maw_end",0); + &mov("eax",$c); + + &pop("ecx"); # clear variable from + + &function_end($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86/sqr.pl b/openssl/crypto/bn/asm/x86/sqr.pl new file mode 100644 index 00000000..1f90993c --- /dev/null +++ b/openssl/crypto/bn/asm/x86/sqr.pl @@ -0,0 +1,60 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_sqr_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $r="esi"; + $a="edi"; + $num="ebx"; + + &mov($r,&wparam(0)); # + &mov($a,&wparam(1)); # + &mov($num,&wparam(2)); # + + &and($num,0xfffffff8); # num / 8 + &jz(&label("sw_finish")); + + &set_label("sw_loop",0); + for ($i=0; $i<32; $i+=4) + { + &comment("Round $i"); + &mov("eax",&DWP($i,$a,"",0)); # *a + # XXX + &mul("eax"); # *a * *a + &mov(&DWP($i*2,$r,"",0),"eax"); # + &mov(&DWP($i*2+4,$r,"",0),"edx");# + } + + &comment(""); + &add($a,32); + &add($r,64); + &sub($num,8); + &jnz(&label("sw_loop")); + + &set_label("sw_finish",0); + &mov($num,&wparam(2)); # get num + &and($num,7); + &jz(&label("sw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov("eax",&DWP($i*4,$a,"",0)); # *a + # XXX + &mul("eax"); # *a * *a + &mov(&DWP($i*8,$r,"",0),"eax"); # + &dec($num) if ($i != 7-1); + &mov(&DWP($i*8+4,$r,"",0),"edx"); + &jz(&label("sw_end")) if ($i != 7-1); + } + &set_label("sw_end",0); + + &function_end($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86/sub.pl b/openssl/crypto/bn/asm/x86/sub.pl new file mode 100644 index 00000000..837b0e1b --- /dev/null +++ b/openssl/crypto/bn/asm/x86/sub.pl @@ -0,0 +1,76 @@ +#!/usr/local/bin/perl +# x86 assember + +sub bn_sub_words + { + local($name)=@_; + + &function_begin($name,""); + + &comment(""); + $a="esi"; + $b="edi"; + $c="eax"; + $r="ebx"; + $tmp1="ecx"; + $tmp2="edx"; + $num="ebp"; + + &mov($r,&wparam(0)); # get r + &mov($a,&wparam(1)); # get a + &mov($b,&wparam(2)); # get b + &mov($num,&wparam(3)); # get num + &xor($c,$c); # clear carry + &and($num,0xfffffff8); # num / 8 + + &jz(&label("aw_finish")); + + &set_label("aw_loop",0); + for ($i=0; $i<8; $i++) + { + &comment("Round $i"); + + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0)); # *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *r + } + + &comment(""); + &add($a,32); + &add($b,32); + &add($r,32); + &sub($num,8); + &jnz(&label("aw_loop")); + + &set_label("aw_finish",0); + &mov($num,&wparam(3)); # get num + &and($num,7); + &jz(&label("aw_end")); + + for ($i=0; $i<7; $i++) + { + &comment("Tail Round $i"); + &mov($tmp1,&DWP($i*4,$a,"",0)); # *a + &mov($tmp2,&DWP($i*4,$b,"",0));# *b + &sub($tmp1,$c); + &mov($c,0); + &adc($c,$c); + &sub($tmp1,$tmp2); + &adc($c,0); + &dec($num) if ($i != 6); + &mov(&DWP($i*4,$r,"",0),$tmp1); # *a + &jz(&label("aw_end")) if ($i != 6); + } + &set_label("aw_end",0); + +# &mov("eax",$c); # $c is "eax" + + &function_end($name); + } + +1; diff --git a/openssl/crypto/bn/asm/x86_64-gcc.c b/openssl/crypto/bn/asm/x86_64-gcc.c new file mode 100644 index 00000000..acb0b401 --- /dev/null +++ b/openssl/crypto/bn/asm/x86_64-gcc.c @@ -0,0 +1,606 @@ +#include "../bn_lcl.h" +#if !(defined(__GNUC__) && __GNUC__>=2) +# include "../bn_asm.c" /* kind of dirty hack for Sun Studio */ +#else +/* + * x86_64 BIGNUM accelerator version 0.1, December 2002. + * + * Implemented by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL + * project. + * + * Rights for redistribution and usage in source and binary forms are + * granted according to the OpenSSL license. Warranty of any kind is + * disclaimed. + * + * Q. Version 0.1? It doesn't sound like Andy, he used to assign real + * versions, like 1.0... + * A. Well, that's because this code is basically a quick-n-dirty + * proof-of-concept hack. As you can see it's implemented with + * inline assembler, which means that you're bound to GCC and that + * there might be enough room for further improvement. + * + * Q. Why inline assembler? + * A. x86_64 features own ABI which I'm not familiar with. This is + * why I decided to let the compiler take care of subroutine + * prologue/epilogue as well as register allocation. For reference. + * Win64 implements different ABI for AMD64, different from Linux. + * + * Q. How much faster does it get? + * A. 'apps/openssl speed rsa dsa' output with no-asm: + * + * sign verify sign/s verify/s + * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2 + * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0 + * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8 + * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6 + * sign verify sign/s verify/s + * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3 + * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2 + * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0 + * + * 'apps/openssl speed rsa dsa' output with this module: + * + * sign verify sign/s verify/s + * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9 + * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7 + * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0 + * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8 + * sign verify sign/s verify/s + * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3 + * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4 + * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6 + * + * For the reference. IA-32 assembler implementation performs + * very much like 64-bit code compiled with no-asm on the same + * machine. + */ + +#ifdef _WIN64 +#define BN_ULONG unsigned long long +#else +#define BN_ULONG unsigned long +#endif + +#undef mul +#undef mul_add +#undef sqr + +/* + * "m"(a), "+m"(r) is the way to favor DirectPath µ-code; + * "g"(0) let the compiler to decide where does it + * want to keep the value of zero; + */ +#define mul_add(r,a,word,carry) do { \ + register BN_ULONG high,low; \ + asm ("mulq %3" \ + : "=a"(low),"=d"(high) \ + : "a"(word),"m"(a) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(carry),"+d"(high)\ + : "a"(low),"g"(0) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+m"(r),"+d"(high) \ + : "r"(carry),"g"(0) \ + : "cc"); \ + carry=high; \ + } while (0) + +#define mul(r,a,word,carry) do { \ + register BN_ULONG high,low; \ + asm ("mulq %3" \ + : "=a"(low),"=d"(high) \ + : "a"(word),"g"(a) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(carry),"+d"(high)\ + : "a"(low),"g"(0) \ + : "cc"); \ + (r)=carry, carry=high; \ + } while (0) + +#define sqr(r0,r1,a) \ + asm ("mulq %2" \ + : "=a"(r0),"=d"(r1) \ + : "a"(a) \ + : "cc"); + +BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) + { + BN_ULONG c1=0; + + if (num <= 0) return(c1); + + while (num&~3) + { + mul_add(rp[0],ap[0],w,c1); + mul_add(rp[1],ap[1],w,c1); + mul_add(rp[2],ap[2],w,c1); + mul_add(rp[3],ap[3],w,c1); + ap+=4; rp+=4; num-=4; + } + if (num) + { + mul_add(rp[0],ap[0],w,c1); if (--num==0) return c1; + mul_add(rp[1],ap[1],w,c1); if (--num==0) return c1; + mul_add(rp[2],ap[2],w,c1); return c1; + } + + return(c1); + } + +BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) + { + BN_ULONG c1=0; + + if (num <= 0) return(c1); + + while (num&~3) + { + mul(rp[0],ap[0],w,c1); + mul(rp[1],ap[1],w,c1); + mul(rp[2],ap[2],w,c1); + mul(rp[3],ap[3],w,c1); + ap+=4; rp+=4; num-=4; + } + if (num) + { + mul(rp[0],ap[0],w,c1); if (--num == 0) return c1; + mul(rp[1],ap[1],w,c1); if (--num == 0) return c1; + mul(rp[2],ap[2],w,c1); + } + return(c1); + } + +void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n) + { + if (n <= 0) return; + + while (n&~3) + { + sqr(r[0],r[1],a[0]); + sqr(r[2],r[3],a[1]); + sqr(r[4],r[5],a[2]); + sqr(r[6],r[7],a[3]); + a+=4; r+=8; n-=4; + } + if (n) + { + sqr(r[0],r[1],a[0]); if (--n == 0) return; + sqr(r[2],r[3],a[1]); if (--n == 0) return; + sqr(r[4],r[5],a[2]); + } + } + +BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) +{ BN_ULONG ret,waste; + + asm ("divq %4" + : "=a"(ret),"=d"(waste) + : "a"(l),"d"(h),"g"(d) + : "cc"); + + return ret; +} + +BN_ULONG bn_add_words (BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int n) +{ BN_ULONG ret=0,i=0; + + if (n <= 0) return 0; + + asm ( + " subq %2,%2 \n" + ".p2align 4 \n" + "1: movq (%4,%2,8),%0 \n" + " adcq (%5,%2,8),%0 \n" + " movq %0,(%3,%2,8) \n" + " leaq 1(%2),%2 \n" + " loop 1b \n" + " sbbq %0,%0 \n" + : "=&a"(ret),"+c"(n),"=&r"(i) + : "r"(rp),"r"(ap),"r"(bp) + : "cc" + ); + + return ret&1; +} + +#ifndef SIMICS +BN_ULONG bn_sub_words (BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int n) +{ BN_ULONG ret=0,i=0; + + if (n <= 0) return 0; + + asm ( + " subq %2,%2 \n" + ".p2align 4 \n" + "1: movq (%4,%2,8),%0 \n" + " sbbq (%5,%2,8),%0 \n" + " movq %0,(%3,%2,8) \n" + " leaq 1(%2),%2 \n" + " loop 1b \n" + " sbbq %0,%0 \n" + : "=&a"(ret),"+c"(n),"=&r"(i) + : "r"(rp),"r"(ap),"r"(bp) + : "cc" + ); + + return ret&1; +} +#else +/* Simics 1.4<7 has buggy sbbq:-( */ +#define BN_MASK2 0xffffffffffffffffL +BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n) + { + BN_ULONG t1,t2; + int c=0; + + if (n <= 0) return((BN_ULONG)0); + + for (;;) + { + t1=a[0]; t2=b[0]; + r[0]=(t1-t2-c)&BN_MASK2; + if (t1 != t2) c=(t1 < t2); + if (--n <= 0) break; + + t1=a[1]; t2=b[1]; + r[1]=(t1-t2-c)&BN_MASK2; + if (t1 != t2) c=(t1 < t2); + if (--n <= 0) break; + + t1=a[2]; t2=b[2]; + r[2]=(t1-t2-c)&BN_MASK2; + if (t1 != t2) c=(t1 < t2); + if (--n <= 0) break; + + t1=a[3]; t2=b[3]; + r[3]=(t1-t2-c)&BN_MASK2; + if (t1 != t2) c=(t1 < t2); + if (--n <= 0) break; + + a+=4; + b+=4; + r+=4; + } + return(c); + } +#endif + +/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */ +/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */ +/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */ +/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */ + +#if 0 +/* original macros are kept for reference purposes */ +#define mul_add_c(a,b,c0,c1,c2) { \ + BN_ULONG ta=(a),tb=(b); \ + t1 = ta * tb; \ + t2 = BN_UMULT_HIGH(ta,tb); \ + c0 += t1; t2 += (c0<t1)?1:0; \ + c1 += t2; c2 += (c1<t2)?1:0; \ + } + +#define mul_add_c2(a,b,c0,c1,c2) { \ + BN_ULONG ta=(a),tb=(b),t0; \ + t1 = BN_UMULT_HIGH(ta,tb); \ + t0 = ta * tb; \ + t2 = t1+t1; c2 += (t2<t1)?1:0; \ + t1 = t0+t0; t2 += (t1<t0)?1:0; \ + c0 += t1; t2 += (c0<t1)?1:0; \ + c1 += t2; c2 += (c1<t2)?1:0; \ + } +#else +#define mul_add_c(a,b,c0,c1,c2) do { \ + asm ("mulq %3" \ + : "=a"(t1),"=d"(t2) \ + : "a"(a),"m"(b) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c0),"+d"(t2) \ + : "a"(t1),"g"(0) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c1),"+r"(c2) \ + : "d"(t2),"g"(0) \ + : "cc"); \ + } while (0) + +#define sqr_add_c(a,i,c0,c1,c2) do { \ + asm ("mulq %2" \ + : "=a"(t1),"=d"(t2) \ + : "a"(a[i]) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c0),"+d"(t2) \ + : "a"(t1),"g"(0) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c1),"+r"(c2) \ + : "d"(t2),"g"(0) \ + : "cc"); \ + } while (0) + +#define mul_add_c2(a,b,c0,c1,c2) do { \ + asm ("mulq %3" \ + : "=a"(t1),"=d"(t2) \ + : "a"(a),"m"(b) \ + : "cc"); \ + asm ("addq %0,%0; adcq %2,%1" \ + : "+d"(t2),"+r"(c2) \ + : "g"(0) \ + : "cc"); \ + asm ("addq %0,%0; adcq %2,%1" \ + : "+a"(t1),"+d"(t2) \ + : "g"(0) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c0),"+d"(t2) \ + : "a"(t1),"g"(0) \ + : "cc"); \ + asm ("addq %2,%0; adcq %3,%1" \ + : "+r"(c1),"+r"(c2) \ + : "d"(t2),"g"(0) \ + : "cc"); \ + } while (0) +#endif + +#define sqr_add_c2(a,i,j,c0,c1,c2) \ + mul_add_c2((a)[i],(a)[j],c0,c1,c2) + +void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) + { + BN_ULONG t1,t2; + BN_ULONG c1,c2,c3; + + c1=0; + c2=0; + c3=0; + mul_add_c(a[0],b[0],c1,c2,c3); + r[0]=c1; + c1=0; + mul_add_c(a[0],b[1],c2,c3,c1); + mul_add_c(a[1],b[0],c2,c3,c1); + r[1]=c2; + c2=0; + mul_add_c(a[2],b[0],c3,c1,c2); + mul_add_c(a[1],b[1],c3,c1,c2); + mul_add_c(a[0],b[2],c3,c1,c2); + r[2]=c3; + c3=0; + mul_add_c(a[0],b[3],c1,c2,c3); + mul_add_c(a[1],b[2],c1,c2,c3); + mul_add_c(a[2],b[1],c1,c2,c3); + mul_add_c(a[3],b[0],c1,c2,c3); + r[3]=c1; + c1=0; + mul_add_c(a[4],b[0],c2,c3,c1); + mul_add_c(a[3],b[1],c2,c3,c1); + mul_add_c(a[2],b[2],c2,c3,c1); + mul_add_c(a[1],b[3],c2,c3,c1); + mul_add_c(a[0],b[4],c2,c3,c1); + r[4]=c2; + c2=0; + mul_add_c(a[0],b[5],c3,c1,c2); + mul_add_c(a[1],b[4],c3,c1,c2); + mul_add_c(a[2],b[3],c3,c1,c2); + mul_add_c(a[3],b[2],c3,c1,c2); + mul_add_c(a[4],b[1],c3,c1,c2); + mul_add_c(a[5],b[0],c3,c1,c2); + r[5]=c3; + c3=0; + mul_add_c(a[6],b[0],c1,c2,c3); + mul_add_c(a[5],b[1],c1,c2,c3); + mul_add_c(a[4],b[2],c1,c2,c3); + mul_add_c(a[3],b[3],c1,c2,c3); + mul_add_c(a[2],b[4],c1,c2,c3); + mul_add_c(a[1],b[5],c1,c2,c3); + mul_add_c(a[0],b[6],c1,c2,c3); + r[6]=c1; + c1=0; + mul_add_c(a[0],b[7],c2,c3,c1); + mul_add_c(a[1],b[6],c2,c3,c1); + mul_add_c(a[2],b[5],c2,c3,c1); + mul_add_c(a[3],b[4],c2,c3,c1); + mul_add_c(a[4],b[3],c2,c3,c1); + mul_add_c(a[5],b[2],c2,c3,c1); + mul_add_c(a[6],b[1],c2,c3,c1); + mul_add_c(a[7],b[0],c2,c3,c1); + r[7]=c2; + c2=0; + mul_add_c(a[7],b[1],c3,c1,c2); + mul_add_c(a[6],b[2],c3,c1,c2); + mul_add_c(a[5],b[3],c3,c1,c2); + mul_add_c(a[4],b[4],c3,c1,c2); + mul_add_c(a[3],b[5],c3,c1,c2); + mul_add_c(a[2],b[6],c3,c1,c2); + mul_add_c(a[1],b[7],c3,c1,c2); + r[8]=c3; + c3=0; + mul_add_c(a[2],b[7],c1,c2,c3); + mul_add_c(a[3],b[6],c1,c2,c3); + mul_add_c(a[4],b[5],c1,c2,c3); + mul_add_c(a[5],b[4],c1,c2,c3); + mul_add_c(a[6],b[3],c1,c2,c3); + mul_add_c(a[7],b[2],c1,c2,c3); + r[9]=c1; + c1=0; + mul_add_c(a[7],b[3],c2,c3,c1); + mul_add_c(a[6],b[4],c2,c3,c1); + mul_add_c(a[5],b[5],c2,c3,c1); + mul_add_c(a[4],b[6],c2,c3,c1); + mul_add_c(a[3],b[7],c2,c3,c1); + r[10]=c2; + c2=0; + mul_add_c(a[4],b[7],c3,c1,c2); + mul_add_c(a[5],b[6],c3,c1,c2); + mul_add_c(a[6],b[5],c3,c1,c2); + mul_add_c(a[7],b[4],c3,c1,c2); + r[11]=c3; + c3=0; + mul_add_c(a[7],b[5],c1,c2,c3); + mul_add_c(a[6],b[6],c1,c2,c3); + mul_add_c(a[5],b[7],c1,c2,c3); + r[12]=c1; + c1=0; + mul_add_c(a[6],b[7],c2,c3,c1); + mul_add_c(a[7],b[6],c2,c3,c1); + r[13]=c2; + c2=0; + mul_add_c(a[7],b[7],c3,c1,c2); + r[14]=c3; + r[15]=c1; + } + +void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) + { + BN_ULONG t1,t2; + BN_ULONG c1,c2,c3; + + c1=0; + c2=0; + c3=0; + mul_add_c(a[0],b[0],c1,c2,c3); + r[0]=c1; + c1=0; + mul_add_c(a[0],b[1],c2,c3,c1); + mul_add_c(a[1],b[0],c2,c3,c1); + r[1]=c2; + c2=0; + mul_add_c(a[2],b[0],c3,c1,c2); + mul_add_c(a[1],b[1],c3,c1,c2); + mul_add_c(a[0],b[2],c3,c1,c2); + r[2]=c3; + c3=0; + mul_add_c(a[0],b[3],c1,c2,c3); + mul_add_c(a[1],b[2],c1,c2,c3); + mul_add_c(a[2],b[1],c1,c2,c3); + mul_add_c(a[3],b[0],c1,c2,c3); + r[3]=c1; + c1=0; + mul_add_c(a[3],b[1],c2,c3,c1); + mul_add_c(a[2],b[2],c2,c3,c1); + mul_add_c(a[1],b[3],c2,c3,c1); + r[4]=c2; + c2=0; + mul_add_c(a[2],b[3],c3,c1,c2); + mul_add_c(a[3],b[2],c3,c1,c2); + r[5]=c3; + c3=0; + mul_add_c(a[3],b[3],c1,c2,c3); + r[6]=c1; + r[7]=c2; + } + +void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a) + { + BN_ULONG t1,t2; + BN_ULONG c1,c2,c3; + + c1=0; + c2=0; + c3=0; + sqr_add_c(a,0,c1,c2,c3); + r[0]=c1; + c1=0; + sqr_add_c2(a,1,0,c2,c3,c1); + r[1]=c2; + c2=0; + sqr_add_c(a,1,c3,c1,c2); + sqr_add_c2(a,2,0,c3,c1,c2); + r[2]=c3; + c3=0; + sqr_add_c2(a,3,0,c1,c2,c3); + sqr_add_c2(a,2,1,c1,c2,c3); + r[3]=c1; + c1=0; + sqr_add_c(a,2,c2,c3,c1); + sqr_add_c2(a,3,1,c2,c3,c1); + sqr_add_c2(a,4,0,c2,c3,c1); + r[4]=c2; + c2=0; + sqr_add_c2(a,5,0,c3,c1,c2); + sqr_add_c2(a,4,1,c3,c1,c2); + sqr_add_c2(a,3,2,c3,c1,c2); + r[5]=c3; + c3=0; + sqr_add_c(a,3,c1,c2,c3); + sqr_add_c2(a,4,2,c1,c2,c3); + sqr_add_c2(a,5,1,c1,c2,c3); + sqr_add_c2(a,6,0,c1,c2,c3); + r[6]=c1; + c1=0; + sqr_add_c2(a,7,0,c2,c3,c1); + sqr_add_c2(a,6,1,c2,c3,c1); + sqr_add_c2(a,5,2,c2,c3,c1); + sqr_add_c2(a,4,3,c2,c3,c1); + r[7]=c2; + c2=0; + sqr_add_c(a,4,c3,c1,c2); + sqr_add_c2(a,5,3,c3,c1,c2); + sqr_add_c2(a,6,2,c3,c1,c2); + sqr_add_c2(a,7,1,c3,c1,c2); + r[8]=c3; + c3=0; + sqr_add_c2(a,7,2,c1,c2,c3); + sqr_add_c2(a,6,3,c1,c2,c3); + sqr_add_c2(a,5,4,c1,c2,c3); + r[9]=c1; + c1=0; + sqr_add_c(a,5,c2,c3,c1); + sqr_add_c2(a,6,4,c2,c3,c1); + sqr_add_c2(a,7,3,c2,c3,c1); + r[10]=c2; + c2=0; + sqr_add_c2(a,7,4,c3,c1,c2); + sqr_add_c2(a,6,5,c3,c1,c2); + r[11]=c3; + c3=0; + sqr_add_c(a,6,c1,c2,c3); + sqr_add_c2(a,7,5,c1,c2,c3); + r[12]=c1; + c1=0; + sqr_add_c2(a,7,6,c2,c3,c1); + r[13]=c2; + c2=0; + sqr_add_c(a,7,c3,c1,c2); + r[14]=c3; + r[15]=c1; + } + +void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a) + { + BN_ULONG t1,t2; + BN_ULONG c1,c2,c3; + + c1=0; + c2=0; + c3=0; + sqr_add_c(a,0,c1,c2,c3); + r[0]=c1; + c1=0; + sqr_add_c2(a,1,0,c2,c3,c1); + r[1]=c2; + c2=0; + sqr_add_c(a,1,c3,c1,c2); + sqr_add_c2(a,2,0,c3,c1,c2); + r[2]=c3; + c3=0; + sqr_add_c2(a,3,0,c1,c2,c3); + sqr_add_c2(a,2,1,c1,c2,c3); + r[3]=c1; + c1=0; + sqr_add_c(a,2,c2,c3,c1); + sqr_add_c2(a,3,1,c2,c3,c1); + r[4]=c2; + c2=0; + sqr_add_c2(a,3,2,c3,c1,c2); + r[5]=c3; + c3=0; + sqr_add_c(a,3,c1,c2,c3); + r[6]=c1; + r[7]=c2; + } +#endif diff --git a/openssl/crypto/bn/asm/x86_64-mont.pl b/openssl/crypto/bn/asm/x86_64-mont.pl new file mode 100755 index 00000000..3b7a6f24 --- /dev/null +++ b/openssl/crypto/bn/asm/x86_64-mont.pl @@ -0,0 +1,330 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# October 2005. +# +# Montgomery multiplication routine for x86_64. While it gives modest +# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more +# than twice, >2x, as fast. Most common rsa1024 sign is improved by +# respectful 50%. It remains to be seen if loop unrolling and +# dedicated squaring routine can provide further improvement... + +$flavour = shift; +$output = shift; +if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } + +$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or +die "can't locate x86_64-xlate.pl"; + +open STDOUT,"| $^X $xlate $flavour $output"; + +# int bn_mul_mont( +$rp="%rdi"; # BN_ULONG *rp, +$ap="%rsi"; # const BN_ULONG *ap, +$bp="%rdx"; # const BN_ULONG *bp, +$np="%rcx"; # const BN_ULONG *np, +$n0="%r8"; # const BN_ULONG *n0, +$num="%r9"; # int num); +$lo0="%r10"; +$hi0="%r11"; +$bp="%r12"; # reassign $bp +$hi1="%r13"; +$i="%r14"; +$j="%r15"; +$m0="%rbx"; +$m1="%rbp"; + +$code=<<___; +.text + +.globl bn_mul_mont +.type bn_mul_mont,\@function,6 +.align 16 +bn_mul_mont: + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + + mov ${num}d,${num}d + lea 2($num),%r10 + mov %rsp,%r11 + neg %r10 + lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2)) + and \$-1024,%rsp # minimize TLB usage + + mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp +.Lprologue: + mov %rdx,$bp # $bp reassigned, remember? + + mov ($n0),$n0 # pull n0[0] value + + xor $i,$i # i=0 + xor $j,$j # j=0 + + mov ($bp),$m0 # m0=bp[0] + mov ($ap),%rax + mulq $m0 # ap[0]*bp[0] + mov %rax,$lo0 + mov %rdx,$hi0 + + imulq $n0,%rax # "tp[0]"*n0 + mov %rax,$m1 + + mulq ($np) # np[0]*m1 + add $lo0,%rax # discarded + adc \$0,%rdx + mov %rdx,$hi1 + + lea 1($j),$j # j++ +.L1st: + mov ($ap,$j,8),%rax + mulq $m0 # ap[j]*bp[0] + add $hi0,%rax + adc \$0,%rdx + mov %rax,$lo0 + mov ($np,$j,8),%rax + mov %rdx,$hi0 + + mulq $m1 # np[j]*m1 + add $hi1,%rax + lea 1($j),$j # j++ + adc \$0,%rdx + add $lo0,%rax # np[j]*m1+ap[j]*bp[0] + adc \$0,%rdx + mov %rax,-16(%rsp,$j,8) # tp[j-1] + cmp $num,$j + mov %rdx,$hi1 + jl .L1st + + xor %rdx,%rdx + add $hi0,$hi1 + adc \$0,%rdx + mov $hi1,-8(%rsp,$num,8) + mov %rdx,(%rsp,$num,8) # store upmost overflow bit + + lea 1($i),$i # i++ +.align 4 +.Louter: + xor $j,$j # j=0 + + mov ($bp,$i,8),$m0 # m0=bp[i] + mov ($ap),%rax # ap[0] + mulq $m0 # ap[0]*bp[i] + add (%rsp),%rax # ap[0]*bp[i]+tp[0] + adc \$0,%rdx + mov %rax,$lo0 + mov %rdx,$hi0 + + imulq $n0,%rax # tp[0]*n0 + mov %rax,$m1 + + mulq ($np,$j,8) # np[0]*m1 + add $lo0,%rax # discarded + mov 8(%rsp),$lo0 # tp[1] + adc \$0,%rdx + mov %rdx,$hi1 + + lea 1($j),$j # j++ +.align 4 +.Linner: + mov ($ap,$j,8),%rax + mulq $m0 # ap[j]*bp[i] + add $hi0,%rax + adc \$0,%rdx + add %rax,$lo0 # ap[j]*bp[i]+tp[j] + mov ($np,$j,8),%rax + adc \$0,%rdx + mov %rdx,$hi0 + + mulq $m1 # np[j]*m1 + add $hi1,%rax + lea 1($j),$j # j++ + adc \$0,%rdx + add $lo0,%rax # np[j]*m1+ap[j]*bp[i]+tp[j] + adc \$0,%rdx + mov (%rsp,$j,8),$lo0 + cmp $num,$j + mov %rax,-16(%rsp,$j,8) # tp[j-1] + mov %rdx,$hi1 + jl .Linner + + xor %rdx,%rdx + add $hi0,$hi1 + adc \$0,%rdx + add $lo0,$hi1 # pull upmost overflow bit + adc \$0,%rdx + mov $hi1,-8(%rsp,$num,8) + mov %rdx,(%rsp,$num,8) # store upmost overflow bit + + lea 1($i),$i # i++ + cmp $num,$i + jl .Louter + + lea (%rsp),$ap # borrow ap for tp + lea -1($num),$j # j=num-1 + + mov ($ap),%rax # tp[0] + xor $i,$i # i=0 and clear CF! + jmp .Lsub +.align 16 +.Lsub: sbb ($np,$i,8),%rax + mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i] + dec $j # doesn't affect CF! + mov 8($ap,$i,8),%rax # tp[i+1] + lea 1($i),$i # i++ + jge .Lsub + + sbb \$0,%rax # handle upmost overflow bit + and %rax,$ap + not %rax + mov $rp,$np + and %rax,$np + lea -1($num),$j + or $np,$ap # ap=borrow?tp:rp +.align 16 +.Lcopy: # copy or in-place refresh + mov ($ap,$j,8),%rax + mov %rax,($rp,$j,8) # rp[i]=tp[i] + mov $i,(%rsp,$j,8) # zap temporary vector + dec $j + jge .Lcopy + + mov 8(%rsp,$num,8),%rsi # restore %rsp + mov \$1,%rax + mov (%rsi),%r15 + mov 8(%rsi),%r14 + mov 16(%rsi),%r13 + mov 24(%rsi),%r12 + mov 32(%rsi),%rbp + mov 40(%rsi),%rbx + lea 48(%rsi),%rsp +.Lepilogue: + ret +.size bn_mul_mont,.-bn_mul_mont +.asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>" +.align 16 +___ + +# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, +# CONTEXT *context,DISPATCHER_CONTEXT *disp) +if ($win64) { +$rec="%rcx"; +$frame="%rdx"; +$context="%r8"; +$disp="%r9"; + +$code.=<<___; +.extern __imp_RtlVirtualUnwind +.type se_handler,\@abi-omnipotent +.align 16 +se_handler: + push %rsi + push %rdi + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + pushfq + sub \$64,%rsp + + mov 120($context),%rax # pull context->Rax + mov 248($context),%rbx # pull context->Rip + + lea .Lprologue(%rip),%r10 + cmp %r10,%rbx # context->Rip<.Lprologue + jb .Lin_prologue + + mov 152($context),%rax # pull context->Rsp + + lea .Lepilogue(%rip),%r10 + cmp %r10,%rbx # context->Rip>=.Lepilogue + jae .Lin_prologue + + mov 192($context),%r10 # pull $num + mov 8(%rax,%r10,8),%rax # pull saved stack pointer + lea 48(%rax),%rax + + mov -8(%rax),%rbx + mov -16(%rax),%rbp + mov -24(%rax),%r12 + mov -32(%rax),%r13 + mov -40(%rax),%r14 + mov -48(%rax),%r15 + mov %rbx,144($context) # restore context->Rbx + mov %rbp,160($context) # restore context->Rbp + mov %r12,216($context) # restore context->R12 + mov %r13,224($context) # restore context->R13 + mov %r14,232($context) # restore context->R14 + mov %r15,240($context) # restore context->R15 + +.Lin_prologue: + mov 8(%rax),%rdi + mov 16(%rax),%rsi + mov %rax,152($context) # restore context->Rsp + mov %rsi,168($context) # restore context->Rsi + mov %rdi,176($context) # restore context->Rdi + + mov 40($disp),%rdi # disp->ContextRecord + mov $context,%rsi # context + mov \$154,%ecx # sizeof(CONTEXT) + .long 0xa548f3fc # cld; rep movsq + + mov $disp,%rsi + xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER + mov 8(%rsi),%rdx # arg2, disp->ImageBase + mov 0(%rsi),%r8 # arg3, disp->ControlPc + mov 16(%rsi),%r9 # arg4, disp->FunctionEntry + mov 40(%rsi),%r10 # disp->ContextRecord + lea 56(%rsi),%r11 # &disp->HandlerData + lea 24(%rsi),%r12 # &disp->EstablisherFrame + mov %r10,32(%rsp) # arg5 + mov %r11,40(%rsp) # arg6 + mov %r12,48(%rsp) # arg7 + mov %rcx,56(%rsp) # arg8, (NULL) + call *__imp_RtlVirtualUnwind(%rip) + + mov \$1,%eax # ExceptionContinueSearch + add \$64,%rsp + popfq + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + pop %rdi + pop %rsi + ret +.size se_handler,.-se_handler + +.section .pdata +.align 4 + .rva .LSEH_begin_bn_mul_mont + .rva .LSEH_end_bn_mul_mont + .rva .LSEH_info_bn_mul_mont + +.section .xdata +.align 8 +.LSEH_info_bn_mul_mont: + .byte 9,0,0,0 + .rva se_handler +___ +} + +print $code; +close STDOUT; |