summaryrefslogtreecommitdiff
path: root/ics-openvpn-stripped/main/openssl/crypto/bn
diff options
context:
space:
mode:
authorParménides GV <parmegv@sdf.org>2014-11-04 20:45:42 +0100
committerParménides GV <parmegv@sdf.org>2014-11-04 20:45:42 +0100
commit5304543ebd60778ad46123cd63142e27627fa150 (patch)
treeb07723b530e20b23ae83de822387f6551ea7f9f4 /ics-openvpn-stripped/main/openssl/crypto/bn
parent713c3a98f53a6bd1ad94e90f28d3e37d20abfab9 (diff)
Update ics-openvpn to rev 906.
Diffstat (limited to 'ics-openvpn-stripped/main/openssl/crypto/bn')
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/README27
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/alpha-mont.pl321
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-gf2m.S201
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-gf2m.pl281
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-mont.S579
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-mont.pl675
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-586.S1533
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-586.pl774
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-mips.S2175
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/co-586.S1254
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/co-586.pl287
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/ia64-mont.pl851
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/ia64.S1555
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips-mont.S284
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips-mont.pl426
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips.pl2583
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips3-mont.pl327
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips3.S2201
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/modexp512-x86_64.S1773
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/modexp512-x86_64.pl1497
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/pa-risc2.S1618
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/pa-risc2W.S1605
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/parisc-mont.pl995
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc-mont.pl334
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc.pl1998
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc64-mont.pl1088
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x-gf2m.pl221
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x-mont.pl277
-rwxr-xr-xics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x.S678
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv8.S1458
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv8plus.S1558
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv9-mont.pl606
-rwxr-xr-xics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv9a-mont.pl882
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/via-mont.pl242
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-gf2m.S347
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-gf2m.pl313
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-mont.S460
-rwxr-xr-xics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-mont.pl593
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86.pl28
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/add.pl76
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/comba.pl277
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/div.pl15
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/f3
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/mul.pl77
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/mul_add.pl87
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/sqr.pl60
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/sub.pl76
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gcc.c606
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gf2m.S291
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gf2m.pl390
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont.S1374
-rwxr-xr-xics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont.pl1681
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont5.S784
-rwxr-xr-xics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont5.pl1071
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn.h908
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn.mul19
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_add.c313
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_asm.c1030
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_blind.c385
-rwxr-xr-xics-openvpn-stripped/main/openssl/crypto/bn/bn_const.c402
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_ctx.c454
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_depr.c112
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_div.c448
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_err.c152
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_exp.c1097
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_exp2.c312
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_gcd.c655
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_gf2m.c1113
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_kron.c184
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_lcl.h515
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_lib.c878
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_mod.c301
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_mont.c515
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_mpi.c130
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_mul.c1166
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_nist.c1109
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.c494
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.h327
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.pl119
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_print.c378
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_rand.c375
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_recp.c234
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_shift.c223
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_sqr.c294
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_sqrt.c393
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bn_word.c238
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bnspeed.c233
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/bntest.c2013
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/divtest.c41
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/exp.c62
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/expspeed.c353
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/exptest.c204
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/bn/todo3
93 files changed, 59955 insertions, 0 deletions
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/README b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/README
new file mode 100644
index 00000000..b0f3a68a
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/README
@@ -0,0 +1,27 @@
+<OBSOLETE>
+
+All assember in this directory are just version of the file
+crypto/bn/bn_asm.c.
+
+Quite a few of these files are just the assember output from gcc since on
+quite a few machines they are 2 times faster than the system compiler.
+
+For the x86, I have hand written assember because of the bad job all
+compilers seem to do on it. This normally gives a 2 time speed up in the RSA
+routines.
+
+For the DEC alpha, I also hand wrote the assember (except the division which
+is just the output from the C compiler pasted on the end of the file).
+On the 2 alpha C compilers I had access to, it was not possible to do
+64b x 64b -> 128b calculations (both long and the long long data types
+were 64 bits). So the hand assember gives access to the 128 bit result and
+a 2 times speedup :-).
+
+There are 3 versions of assember for the HP PA-RISC.
+
+pa-risc.s is the origional one which works fine and generated using gcc :-)
+
+pa-risc2W.s and pa-risc2.s are 64 and 32-bit PA-RISC 2.0 implementations
+by Chris Ruemmler from HP (with some help from the HP C compiler).
+
+</OBSOLETE>
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/alpha-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/alpha-mont.pl
new file mode 100644
index 00000000..03596e20
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/alpha-mont.pl
@@ -0,0 +1,321 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# On 21264 RSA sign performance improves by 70/35/20/15 percent for
+# 512/1024/2048/4096 bit key lengths. This is against vendor compiler
+# instructed to '-tune host' code with in-line assembler. Other
+# benchmarks improve by 15-20%. To anchor it to something else, the
+# code provides approximately the same performance per GHz as AMD64.
+# I.e. if you compare 1GHz 21264 and 2GHz Opteron, you'll observe ~2x
+# difference.
+
+# int bn_mul_mont(
+$rp="a0"; # BN_ULONG *rp,
+$ap="a1"; # const BN_ULONG *ap,
+$bp="a2"; # const BN_ULONG *bp,
+$np="a3"; # const BN_ULONG *np,
+$n0="a4"; # const BN_ULONG *n0,
+$num="a5"; # int num);
+
+$lo0="t0";
+$hi0="t1";
+$lo1="t2";
+$hi1="t3";
+$aj="t4";
+$bi="t5";
+$nj="t6";
+$tp="t7";
+$alo="t8";
+$ahi="t9";
+$nlo="t10";
+$nhi="t11";
+$tj="t12";
+$i="s3";
+$j="s4";
+$m1="s5";
+
+$code=<<___;
+#ifdef __linux__
+#include <asm/regdef.h>
+#else
+#include <asm.h>
+#include <regdef.h>
+#endif
+
+.text
+
+.set noat
+.set noreorder
+
+.globl bn_mul_mont
+.align 5
+.ent bn_mul_mont
+bn_mul_mont:
+ lda sp,-48(sp)
+ stq ra,0(sp)
+ stq s3,8(sp)
+ stq s4,16(sp)
+ stq s5,24(sp)
+ stq fp,32(sp)
+ mov sp,fp
+ .mask 0x0400f000,-48
+ .frame fp,48,ra
+ .prologue 0
+
+ .align 4
+ .set reorder
+ sextl $num,$num
+ mov 0,v0
+ cmplt $num,4,AT
+ bne AT,.Lexit
+
+ ldq $hi0,0($ap) # ap[0]
+ s8addq $num,16,AT
+ ldq $aj,8($ap)
+ subq sp,AT,sp
+ ldq $bi,0($bp) # bp[0]
+ lda AT,-4096(zero) # mov -4096,AT
+ ldq $n0,0($n0)
+ and sp,AT,sp
+
+ mulq $hi0,$bi,$lo0
+ ldq $hi1,0($np) # np[0]
+ umulh $hi0,$bi,$hi0
+ ldq $nj,8($np)
+
+ mulq $lo0,$n0,$m1
+
+ mulq $hi1,$m1,$lo1
+ umulh $hi1,$m1,$hi1
+
+ addq $lo1,$lo0,$lo1
+ cmpult $lo1,$lo0,AT
+ addq $hi1,AT,$hi1
+
+ mulq $aj,$bi,$alo
+ mov 2,$j
+ umulh $aj,$bi,$ahi
+ mov sp,$tp
+
+ mulq $nj,$m1,$nlo
+ s8addq $j,$ap,$aj
+ umulh $nj,$m1,$nhi
+ s8addq $j,$np,$nj
+.align 4
+.L1st:
+ .set noreorder
+ ldq $aj,0($aj)
+ addl $j,1,$j
+ ldq $nj,0($nj)
+ lda $tp,8($tp)
+
+ addq $alo,$hi0,$lo0
+ mulq $aj,$bi,$alo
+ cmpult $lo0,$hi0,AT
+ addq $nlo,$hi1,$lo1
+
+ mulq $nj,$m1,$nlo
+ addq $ahi,AT,$hi0
+ cmpult $lo1,$hi1,v0
+ cmplt $j,$num,$tj
+
+ umulh $aj,$bi,$ahi
+ addq $nhi,v0,$hi1
+ addq $lo1,$lo0,$lo1
+ s8addq $j,$ap,$aj
+
+ umulh $nj,$m1,$nhi
+ cmpult $lo1,$lo0,v0
+ addq $hi1,v0,$hi1
+ s8addq $j,$np,$nj
+
+ stq $lo1,-8($tp)
+ nop
+ unop
+ bne $tj,.L1st
+ .set reorder
+
+ addq $alo,$hi0,$lo0
+ addq $nlo,$hi1,$lo1
+ cmpult $lo0,$hi0,AT
+ cmpult $lo1,$hi1,v0
+ addq $ahi,AT,$hi0
+ addq $nhi,v0,$hi1
+
+ addq $lo1,$lo0,$lo1
+ cmpult $lo1,$lo0,v0
+ addq $hi1,v0,$hi1
+
+ stq $lo1,0($tp)
+
+ addq $hi1,$hi0,$hi1
+ cmpult $hi1,$hi0,AT
+ stq $hi1,8($tp)
+ stq AT,16($tp)
+
+ mov 1,$i
+.align 4
+.Louter:
+ s8addq $i,$bp,$bi
+ ldq $hi0,0($ap)
+ ldq $aj,8($ap)
+ ldq $bi,0($bi)
+ ldq $hi1,0($np)
+ ldq $nj,8($np)
+ ldq $tj,0(sp)
+
+ mulq $hi0,$bi,$lo0
+ umulh $hi0,$bi,$hi0
+
+ addq $lo0,$tj,$lo0
+ cmpult $lo0,$tj,AT
+ addq $hi0,AT,$hi0
+
+ mulq $lo0,$n0,$m1
+
+ mulq $hi1,$m1,$lo1
+ umulh $hi1,$m1,$hi1
+
+ addq $lo1,$lo0,$lo1
+ cmpult $lo1,$lo0,AT
+ mov 2,$j
+ addq $hi1,AT,$hi1
+
+ mulq $aj,$bi,$alo
+ mov sp,$tp
+ umulh $aj,$bi,$ahi
+
+ mulq $nj,$m1,$nlo
+ s8addq $j,$ap,$aj
+ umulh $nj,$m1,$nhi
+.align 4
+.Linner:
+ .set noreorder
+ ldq $tj,8($tp) #L0
+ nop #U1
+ ldq $aj,0($aj) #L1
+ s8addq $j,$np,$nj #U0
+
+ ldq $nj,0($nj) #L0
+ nop #U1
+ addq $alo,$hi0,$lo0 #L1
+ lda $tp,8($tp)
+
+ mulq $aj,$bi,$alo #U1
+ cmpult $lo0,$hi0,AT #L0
+ addq $nlo,$hi1,$lo1 #L1
+ addl $j,1,$j
+
+ mulq $nj,$m1,$nlo #U1
+ addq $ahi,AT,$hi0 #L0
+ addq $lo0,$tj,$lo0 #L1
+ cmpult $lo1,$hi1,v0 #U0
+
+ umulh $aj,$bi,$ahi #U1
+ cmpult $lo0,$tj,AT #L0
+ addq $lo1,$lo0,$lo1 #L1
+ addq $nhi,v0,$hi1 #U0
+
+ umulh $nj,$m1,$nhi #U1
+ s8addq $j,$ap,$aj #L0
+ cmpult $lo1,$lo0,v0 #L1
+ cmplt $j,$num,$tj #U0 # borrow $tj
+
+ addq $hi0,AT,$hi0 #L0
+ addq $hi1,v0,$hi1 #U1
+ stq $lo1,-8($tp) #L1
+ bne $tj,.Linner #U0
+ .set reorder
+
+ ldq $tj,8($tp)
+ addq $alo,$hi0,$lo0
+ addq $nlo,$hi1,$lo1
+ cmpult $lo0,$hi0,AT
+ cmpult $lo1,$hi1,v0
+ addq $ahi,AT,$hi0
+ addq $nhi,v0,$hi1
+
+ addq $lo0,$tj,$lo0
+ cmpult $lo0,$tj,AT
+ addq $hi0,AT,$hi0
+
+ ldq $tj,16($tp)
+ addq $lo1,$lo0,$j
+ cmpult $j,$lo0,v0
+ addq $hi1,v0,$hi1
+
+ addq $hi1,$hi0,$lo1
+ stq $j,0($tp)
+ cmpult $lo1,$hi0,$hi1
+ addq $lo1,$tj,$lo1
+ cmpult $lo1,$tj,AT
+ addl $i,1,$i
+ addq $hi1,AT,$hi1
+ stq $lo1,8($tp)
+ cmplt $i,$num,$tj # borrow $tj
+ stq $hi1,16($tp)
+ bne $tj,.Louter
+
+ s8addq $num,sp,$tj # &tp[num]
+ mov $rp,$bp # put rp aside
+ mov sp,$tp
+ mov sp,$ap
+ mov 0,$hi0 # clear borrow bit
+
+.align 4
+.Lsub: ldq $lo0,0($tp)
+ ldq $lo1,0($np)
+ lda $tp,8($tp)
+ lda $np,8($np)
+ subq $lo0,$lo1,$lo1 # tp[i]-np[i]
+ cmpult $lo0,$lo1,AT
+ subq $lo1,$hi0,$lo0
+ cmpult $lo1,$lo0,$hi0
+ or $hi0,AT,$hi0
+ stq $lo0,0($rp)
+ cmpult $tp,$tj,v0
+ lda $rp,8($rp)
+ bne v0,.Lsub
+
+ subq $hi1,$hi0,$hi0 # handle upmost overflow bit
+ mov sp,$tp
+ mov $bp,$rp # restore rp
+
+ and sp,$hi0,$ap
+ bic $bp,$hi0,$bp
+ bis $bp,$ap,$ap # ap=borrow?tp:rp
+
+.align 4
+.Lcopy: ldq $aj,0($ap) # copy or in-place refresh
+ lda $tp,8($tp)
+ lda $rp,8($rp)
+ lda $ap,8($ap)
+ stq zero,-8($tp) # zap tp
+ cmpult $tp,$tj,AT
+ stq $aj,-8($rp)
+ bne AT,.Lcopy
+ mov 1,v0
+
+.Lexit:
+ .set noreorder
+ mov fp,sp
+ /*ldq ra,0(sp)*/
+ ldq s3,8(sp)
+ ldq s4,16(sp)
+ ldq s5,24(sp)
+ ldq fp,32(sp)
+ lda sp,48(sp)
+ ret (ra)
+.end bn_mul_mont
+.ascii "Montgomery Multiplication for Alpha, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-gf2m.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-gf2m.S
new file mode 100644
index 00000000..0fa25b26
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-gf2m.S
@@ -0,0 +1,201 @@
+#include "arm_arch.h"
+
+.text
+.code 32
+
+#if __ARM_ARCH__>=7
+.fpu neon
+#endif
+.type mul_1x1_ialu,%function
+.align 5
+mul_1x1_ialu:
+ mov r4,#0
+ bic r5,r1,#3<<30 @ a1=a&0x3fffffff
+ str r4,[sp,#0] @ tab[0]=0
+ add r6,r5,r5 @ a2=a1<<1
+ str r5,[sp,#4] @ tab[1]=a1
+ eor r7,r5,r6 @ a1^a2
+ str r6,[sp,#8] @ tab[2]=a2
+ mov r8,r5,lsl#2 @ a4=a1<<2
+ str r7,[sp,#12] @ tab[3]=a1^a2
+ eor r9,r5,r8 @ a1^a4
+ str r8,[sp,#16] @ tab[4]=a4
+ eor r4,r6,r8 @ a2^a4
+ str r9,[sp,#20] @ tab[5]=a1^a4
+ eor r7,r7,r8 @ a1^a2^a4
+ str r4,[sp,#24] @ tab[6]=a2^a4
+ and r8,r12,r0,lsl#2
+ str r7,[sp,#28] @ tab[7]=a1^a2^a4
+
+ and r9,r12,r0,lsr#1
+ ldr r5,[sp,r8] @ tab[b & 0x7]
+ and r8,r12,r0,lsr#4
+ ldr r7,[sp,r9] @ tab[b >> 3 & 0x7]
+ and r9,r12,r0,lsr#7
+ ldr r6,[sp,r8] @ tab[b >> 6 & 0x7]
+ eor r5,r5,r7,lsl#3 @ stall
+ mov r4,r7,lsr#29
+ ldr r7,[sp,r9] @ tab[b >> 9 & 0x7]
+
+ and r8,r12,r0,lsr#10
+ eor r5,r5,r6,lsl#6
+ eor r4,r4,r6,lsr#26
+ ldr r6,[sp,r8] @ tab[b >> 12 & 0x7]
+
+ and r9,r12,r0,lsr#13
+ eor r5,r5,r7,lsl#9
+ eor r4,r4,r7,lsr#23
+ ldr r7,[sp,r9] @ tab[b >> 15 & 0x7]
+
+ and r8,r12,r0,lsr#16
+ eor r5,r5,r6,lsl#12
+ eor r4,r4,r6,lsr#20
+ ldr r6,[sp,r8] @ tab[b >> 18 & 0x7]
+
+ and r9,r12,r0,lsr#19
+ eor r5,r5,r7,lsl#15
+ eor r4,r4,r7,lsr#17
+ ldr r7,[sp,r9] @ tab[b >> 21 & 0x7]
+
+ and r8,r12,r0,lsr#22
+ eor r5,r5,r6,lsl#18
+ eor r4,r4,r6,lsr#14
+ ldr r6,[sp,r8] @ tab[b >> 24 & 0x7]
+
+ and r9,r12,r0,lsr#25
+ eor r5,r5,r7,lsl#21
+ eor r4,r4,r7,lsr#11
+ ldr r7,[sp,r9] @ tab[b >> 27 & 0x7]
+
+ tst r1,#1<<30
+ and r8,r12,r0,lsr#28
+ eor r5,r5,r6,lsl#24
+ eor r4,r4,r6,lsr#8
+ ldr r6,[sp,r8] @ tab[b >> 30 ]
+
+ eorne r5,r5,r0,lsl#30
+ eorne r4,r4,r0,lsr#2
+ tst r1,#1<<31
+ eor r5,r5,r7,lsl#27
+ eor r4,r4,r7,lsr#5
+ eorne r5,r5,r0,lsl#31
+ eorne r4,r4,r0,lsr#1
+ eor r5,r5,r6,lsl#30
+ eor r4,r4,r6,lsr#2
+
+ mov pc,lr
+.size mul_1x1_ialu,.-mul_1x1_ialu
+.global bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,%function
+.align 5
+bn_GF2m_mul_2x2:
+#if __ARM_ARCH__>=7
+ ldr r12,.LOPENSSL_armcap
+.Lpic: ldr r12,[pc,r12]
+ tst r12,#1
+ beq .Lialu
+
+ ldr r12, [sp] @ 5th argument
+ vmov.32 d26, r2, r1
+ vmov.32 d27, r12, r3
+ vmov.i64 d28, #0x0000ffffffffffff
+ vmov.i64 d29, #0x00000000ffffffff
+ vmov.i64 d30, #0x000000000000ffff
+
+ vext.8 d2, d26, d26, #1 @ A1
+ vmull.p8 q1, d2, d27 @ F = A1*B
+ vext.8 d0, d27, d27, #1 @ B1
+ vmull.p8 q0, d26, d0 @ E = A*B1
+ vext.8 d4, d26, d26, #2 @ A2
+ vmull.p8 q2, d4, d27 @ H = A2*B
+ vext.8 d16, d27, d27, #2 @ B2
+ vmull.p8 q8, d26, d16 @ G = A*B2
+ vext.8 d6, d26, d26, #3 @ A3
+ veor q1, q1, q0 @ L = E + F
+ vmull.p8 q3, d6, d27 @ J = A3*B
+ vext.8 d0, d27, d27, #3 @ B3
+ veor q2, q2, q8 @ M = G + H
+ vmull.p8 q0, d26, d0 @ I = A*B3
+ veor d2, d2, d3 @ t0 = (L) (P0 + P1) << 8
+ vand d3, d3, d28
+ vext.8 d16, d27, d27, #4 @ B4
+ veor d4, d4, d5 @ t1 = (M) (P2 + P3) << 16
+ vand d5, d5, d29
+ vmull.p8 q8, d26, d16 @ K = A*B4
+ veor q3, q3, q0 @ N = I + J
+ veor d2, d2, d3
+ veor d4, d4, d5
+ veor d6, d6, d7 @ t2 = (N) (P4 + P5) << 24
+ vand d7, d7, d30
+ vext.8 q1, q1, q1, #15
+ veor d16, d16, d17 @ t3 = (K) (P6 + P7) << 32
+ vmov.i64 d17, #0
+ vext.8 q2, q2, q2, #14
+ veor d6, d6, d7
+ vmull.p8 q0, d26, d27 @ D = A*B
+ vext.8 q8, q8, q8, #12
+ vext.8 q3, q3, q3, #13
+ veor q1, q1, q2
+ veor q3, q3, q8
+ veor q0, q0, q1
+ veor q0, q0, q3
+
+ vst1.32 {q0}, [r0]
+ bx lr @ bx lr
+.align 4
+.Lialu:
+#endif
+ stmdb sp!,{r4-r10,lr}
+ mov r10,r0 @ reassign 1st argument
+ mov r0,r3 @ r0=b1
+ ldr r3,[sp,#32] @ load b0
+ mov r12,#7<<2
+ sub sp,sp,#32 @ allocate tab[8]
+
+ bl mul_1x1_ialu @ a1·b1
+ str r5,[r10,#8]
+ str r4,[r10,#12]
+
+ eor r0,r0,r3 @ flip b0 and b1
+ eor r1,r1,r2 @ flip a0 and a1
+ eor r3,r3,r0
+ eor r2,r2,r1
+ eor r0,r0,r3
+ eor r1,r1,r2
+ bl mul_1x1_ialu @ a0·b0
+ str r5,[r10]
+ str r4,[r10,#4]
+
+ eor r1,r1,r2
+ eor r0,r0,r3
+ bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
+ ldmia r10,{r6-r9}
+ eor r5,r5,r4
+ eor r4,r4,r7
+ eor r5,r5,r6
+ eor r4,r4,r8
+ eor r5,r5,r9
+ eor r4,r4,r9
+ str r4,[r10,#8]
+ eor r5,r5,r4
+ add sp,sp,#32 @ destroy tab[8]
+ str r5,[r10,#4]
+
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r10,pc}
+#else
+ ldmia sp!,{r4-r10,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+#if __ARM_ARCH__>=7
+.align 5
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-(.Lpic+8)
+#endif
+.asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 5
+
+.comm OPENSSL_armcap_P,4,4
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-gf2m.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-gf2m.pl
new file mode 100644
index 00000000..3f1f4f67
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-gf2m.pl
@@ -0,0 +1,281 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication
+# used in bn_gf2m.c. It's kind of low-hanging mechanical port from
+# C for the time being... Except that it has two code paths: pure
+# integer code suitable for any ARMv4 and later CPU and NEON code
+# suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs
+# in ~45 cycles on dual-issue core such as Cortex A8, which is ~50%
+# faster than compiler-generated code. For ECDH and ECDSA verify (but
+# not for ECDSA sign) it means 25%-45% improvement depending on key
+# length, more for longer keys. Even though NEON 1x1 multiplication
+# runs in even less cycles, ~30, improvement is measurable only on
+# longer keys. One has to optimize code elsewhere to get NEON glow...
+#
+# April 2014
+#
+# Double bn_GF2m_mul_2x2 performance by using algorithm from paper
+# referred below, which improves ECDH and ECDSA verify benchmarks
+# by 18-40%.
+#
+# Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
+# Polynomial Multiplication on ARM Processors using the NEON Engine.
+#
+# http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$code=<<___;
+#include "arm_arch.h"
+
+.text
+.code 32
+
+#if __ARM_ARCH__>=7
+.fpu neon
+#endif
+___
+################
+# private interface to mul_1x1_ialu
+#
+$a="r1";
+$b="r0";
+
+($a0,$a1,$a2,$a12,$a4,$a14)=
+($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12);
+
+$mask="r12";
+
+$code.=<<___;
+.type mul_1x1_ialu,%function
+.align 5
+mul_1x1_ialu:
+ mov $a0,#0
+ bic $a1,$a,#3<<30 @ a1=a&0x3fffffff
+ str $a0,[sp,#0] @ tab[0]=0
+ add $a2,$a1,$a1 @ a2=a1<<1
+ str $a1,[sp,#4] @ tab[1]=a1
+ eor $a12,$a1,$a2 @ a1^a2
+ str $a2,[sp,#8] @ tab[2]=a2
+ mov $a4,$a1,lsl#2 @ a4=a1<<2
+ str $a12,[sp,#12] @ tab[3]=a1^a2
+ eor $a14,$a1,$a4 @ a1^a4
+ str $a4,[sp,#16] @ tab[4]=a4
+ eor $a0,$a2,$a4 @ a2^a4
+ str $a14,[sp,#20] @ tab[5]=a1^a4
+ eor $a12,$a12,$a4 @ a1^a2^a4
+ str $a0,[sp,#24] @ tab[6]=a2^a4
+ and $i0,$mask,$b,lsl#2
+ str $a12,[sp,#28] @ tab[7]=a1^a2^a4
+
+ and $i1,$mask,$b,lsr#1
+ ldr $lo,[sp,$i0] @ tab[b & 0x7]
+ and $i0,$mask,$b,lsr#4
+ ldr $t1,[sp,$i1] @ tab[b >> 3 & 0x7]
+ and $i1,$mask,$b,lsr#7
+ ldr $t0,[sp,$i0] @ tab[b >> 6 & 0x7]
+ eor $lo,$lo,$t1,lsl#3 @ stall
+ mov $hi,$t1,lsr#29
+ ldr $t1,[sp,$i1] @ tab[b >> 9 & 0x7]
+
+ and $i0,$mask,$b,lsr#10
+ eor $lo,$lo,$t0,lsl#6
+ eor $hi,$hi,$t0,lsr#26
+ ldr $t0,[sp,$i0] @ tab[b >> 12 & 0x7]
+
+ and $i1,$mask,$b,lsr#13
+ eor $lo,$lo,$t1,lsl#9
+ eor $hi,$hi,$t1,lsr#23
+ ldr $t1,[sp,$i1] @ tab[b >> 15 & 0x7]
+
+ and $i0,$mask,$b,lsr#16
+ eor $lo,$lo,$t0,lsl#12
+ eor $hi,$hi,$t0,lsr#20
+ ldr $t0,[sp,$i0] @ tab[b >> 18 & 0x7]
+
+ and $i1,$mask,$b,lsr#19
+ eor $lo,$lo,$t1,lsl#15
+ eor $hi,$hi,$t1,lsr#17
+ ldr $t1,[sp,$i1] @ tab[b >> 21 & 0x7]
+
+ and $i0,$mask,$b,lsr#22
+ eor $lo,$lo,$t0,lsl#18
+ eor $hi,$hi,$t0,lsr#14
+ ldr $t0,[sp,$i0] @ tab[b >> 24 & 0x7]
+
+ and $i1,$mask,$b,lsr#25
+ eor $lo,$lo,$t1,lsl#21
+ eor $hi,$hi,$t1,lsr#11
+ ldr $t1,[sp,$i1] @ tab[b >> 27 & 0x7]
+
+ tst $a,#1<<30
+ and $i0,$mask,$b,lsr#28
+ eor $lo,$lo,$t0,lsl#24
+ eor $hi,$hi,$t0,lsr#8
+ ldr $t0,[sp,$i0] @ tab[b >> 30 ]
+
+ eorne $lo,$lo,$b,lsl#30
+ eorne $hi,$hi,$b,lsr#2
+ tst $a,#1<<31
+ eor $lo,$lo,$t1,lsl#27
+ eor $hi,$hi,$t1,lsr#5
+ eorne $lo,$lo,$b,lsl#31
+ eorne $hi,$hi,$b,lsr#1
+ eor $lo,$lo,$t0,lsl#30
+ eor $hi,$hi,$t0,lsr#2
+
+ mov pc,lr
+.size mul_1x1_ialu,.-mul_1x1_ialu
+___
+################
+# void bn_GF2m_mul_2x2(BN_ULONG *r,
+# BN_ULONG a1,BN_ULONG a0,
+# BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0
+{
+my ($r,$t0,$t1,$t2,$t3)=map("q$_",(0..3,8..12));
+my ($a,$b,$k48,$k32,$k16)=map("d$_",(26..31));
+
+$code.=<<___;
+.global bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,%function
+.align 5
+bn_GF2m_mul_2x2:
+#if __ARM_ARCH__>=7
+ ldr r12,.LOPENSSL_armcap
+.Lpic: ldr r12,[pc,r12]
+ tst r12,#1
+ beq .Lialu
+
+ ldr r12, [sp] @ 5th argument
+ vmov.32 $a, r2, r1
+ vmov.32 $b, r12, r3
+ vmov.i64 $k48, #0x0000ffffffffffff
+ vmov.i64 $k32, #0x00000000ffffffff
+ vmov.i64 $k16, #0x000000000000ffff
+
+ vext.8 $t0#lo, $a, $a, #1 @ A1
+ vmull.p8 $t0, $t0#lo, $b @ F = A1*B
+ vext.8 $r#lo, $b, $b, #1 @ B1
+ vmull.p8 $r, $a, $r#lo @ E = A*B1
+ vext.8 $t1#lo, $a, $a, #2 @ A2
+ vmull.p8 $t1, $t1#lo, $b @ H = A2*B
+ vext.8 $t3#lo, $b, $b, #2 @ B2
+ vmull.p8 $t3, $a, $t3#lo @ G = A*B2
+ vext.8 $t2#lo, $a, $a, #3 @ A3
+ veor $t0, $t0, $r @ L = E + F
+ vmull.p8 $t2, $t2#lo, $b @ J = A3*B
+ vext.8 $r#lo, $b, $b, #3 @ B3
+ veor $t1, $t1, $t3 @ M = G + H
+ vmull.p8 $r, $a, $r#lo @ I = A*B3
+ veor $t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8
+ vand $t0#hi, $t0#hi, $k48
+ vext.8 $t3#lo, $b, $b, #4 @ B4
+ veor $t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16
+ vand $t1#hi, $t1#hi, $k32
+ vmull.p8 $t3, $a, $t3#lo @ K = A*B4
+ veor $t2, $t2, $r @ N = I + J
+ veor $t0#lo, $t0#lo, $t0#hi
+ veor $t1#lo, $t1#lo, $t1#hi
+ veor $t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24
+ vand $t2#hi, $t2#hi, $k16
+ vext.8 $t0, $t0, $t0, #15
+ veor $t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32
+ vmov.i64 $t3#hi, #0
+ vext.8 $t1, $t1, $t1, #14
+ veor $t2#lo, $t2#lo, $t2#hi
+ vmull.p8 $r, $a, $b @ D = A*B
+ vext.8 $t3, $t3, $t3, #12
+ vext.8 $t2, $t2, $t2, #13
+ veor $t0, $t0, $t1
+ veor $t2, $t2, $t3
+ veor $r, $r, $t0
+ veor $r, $r, $t2
+
+ vst1.32 {$r}, [r0]
+ ret @ bx lr
+.align 4
+.Lialu:
+#endif
+___
+}
+$ret="r10"; # reassigned 1st argument
+$code.=<<___;
+ stmdb sp!,{r4-r10,lr}
+ mov $ret,r0 @ reassign 1st argument
+ mov $b,r3 @ $b=b1
+ ldr r3,[sp,#32] @ load b0
+ mov $mask,#7<<2
+ sub sp,sp,#32 @ allocate tab[8]
+
+ bl mul_1x1_ialu @ a1·b1
+ str $lo,[$ret,#8]
+ str $hi,[$ret,#12]
+
+ eor $b,$b,r3 @ flip b0 and b1
+ eor $a,$a,r2 @ flip a0 and a1
+ eor r3,r3,$b
+ eor r2,r2,$a
+ eor $b,$b,r3
+ eor $a,$a,r2
+ bl mul_1x1_ialu @ a0·b0
+ str $lo,[$ret]
+ str $hi,[$ret,#4]
+
+ eor $a,$a,r2
+ eor $b,$b,r3
+ bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
+___
+@r=map("r$_",(6..9));
+$code.=<<___;
+ ldmia $ret,{@r[0]-@r[3]}
+ eor $lo,$lo,$hi
+ eor $hi,$hi,@r[1]
+ eor $lo,$lo,@r[0]
+ eor $hi,$hi,@r[2]
+ eor $lo,$lo,@r[3]
+ eor $hi,$hi,@r[3]
+ str $hi,[$ret,#8]
+ eor $lo,$lo,$hi
+ add sp,sp,#32 @ destroy tab[8]
+ str $lo,[$ret,#4]
+
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r10,pc}
+#else
+ ldmia sp!,{r4-r10,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+#if __ARM_ARCH__>=7
+.align 5
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-(.Lpic+8)
+#endif
+.asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 5
+
+.comm OPENSSL_armcap_P,4,4
+___
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
+ s/\bret\b/bx lr/go or
+ s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
+
+ print $_,"\n";
+}
+close STDOUT; # enforce flush
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-mont.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-mont.S
new file mode 100644
index 00000000..fecae15e
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-mont.S
@@ -0,0 +1,579 @@
+#include "arm_arch.h"
+
+.text
+.code 32
+
+#if __ARM_ARCH__>=7
+.align 5
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-bn_mul_mont
+#endif
+
+.global bn_mul_mont
+.type bn_mul_mont,%function
+
+.align 5
+bn_mul_mont:
+ ldr ip,[sp,#4] @ load num
+ stmdb sp!,{r0,r2} @ sp points at argument block
+#if __ARM_ARCH__>=7
+ tst ip,#7
+ bne .Lialu
+ adr r0,bn_mul_mont
+ ldr r2,.LOPENSSL_armcap
+ ldr r0,[r0,r2]
+ tst r0,#1 @ NEON available?
+ ldmia sp, {r0,r2}
+ beq .Lialu
+ add sp,sp,#8
+ b bn_mul8x_mont_neon
+.align 4
+.Lialu:
+#endif
+ cmp ip,#2
+ mov r0,ip @ load num
+ movlt r0,#0
+ addlt sp,sp,#2*4
+ blt .Labrt
+
+ stmdb sp!,{r4-r12,lr} @ save 10 registers
+
+ mov r0,r0,lsl#2 @ rescale r0 for byte count
+ sub sp,sp,r0 @ alloca(4*num)
+ sub sp,sp,#4 @ +extra dword
+ sub r0,r0,#4 @ "num=num-1"
+ add r4,r2,r0 @ &bp[num-1]
+
+ add r0,sp,r0 @ r0 to point at &tp[num-1]
+ ldr r8,[r0,#14*4] @ &n0
+ ldr r2,[r2] @ bp[0]
+ ldr r5,[r1],#4 @ ap[0],ap++
+ ldr r6,[r3],#4 @ np[0],np++
+ ldr r8,[r8] @ *n0
+ str r4,[r0,#15*4] @ save &bp[num]
+
+ umull r10,r11,r5,r2 @ ap[0]*bp[0]
+ str r8,[r0,#14*4] @ save n0 value
+ mul r8,r10,r8 @ "tp[0]"*n0
+ mov r12,#0
+ umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]"
+ mov r4,sp
+
+.L1st:
+ ldr r5,[r1],#4 @ ap[j],ap++
+ mov r10,r11
+ ldr r6,[r3],#4 @ np[j],np++
+ mov r11,#0
+ umlal r10,r11,r5,r2 @ ap[j]*bp[0]
+ mov r14,#0
+ umlal r12,r14,r6,r8 @ np[j]*n0
+ adds r12,r12,r10
+ str r12,[r4],#4 @ tp[j-1]=,tp++
+ adc r12,r14,#0
+ cmp r4,r0
+ bne .L1st
+
+ adds r12,r12,r11
+ ldr r4,[r0,#13*4] @ restore bp
+ mov r14,#0
+ ldr r8,[r0,#14*4] @ restore n0
+ adc r14,r14,#0
+ str r12,[r0] @ tp[num-1]=
+ str r14,[r0,#4] @ tp[num]=
+
+.Louter:
+ sub r7,r0,sp @ "original" r0-1 value
+ sub r1,r1,r7 @ "rewind" ap to &ap[1]
+ ldr r2,[r4,#4]! @ *(++bp)
+ sub r3,r3,r7 @ "rewind" np to &np[1]
+ ldr r5,[r1,#-4] @ ap[0]
+ ldr r10,[sp] @ tp[0]
+ ldr r6,[r3,#-4] @ np[0]
+ ldr r7,[sp,#4] @ tp[1]
+
+ mov r11,#0
+ umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0]
+ str r4,[r0,#13*4] @ save bp
+ mul r8,r10,r8
+ mov r12,#0
+ umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]"
+ mov r4,sp
+
+.Linner:
+ ldr r5,[r1],#4 @ ap[j],ap++
+ adds r10,r11,r7 @ +=tp[j]
+ ldr r6,[r3],#4 @ np[j],np++
+ mov r11,#0
+ umlal r10,r11,r5,r2 @ ap[j]*bp[i]
+ mov r14,#0
+ umlal r12,r14,r6,r8 @ np[j]*n0
+ adc r11,r11,#0
+ ldr r7,[r4,#8] @ tp[j+1]
+ adds r12,r12,r10
+ str r12,[r4],#4 @ tp[j-1]=,tp++
+ adc r12,r14,#0
+ cmp r4,r0
+ bne .Linner
+
+ adds r12,r12,r11
+ mov r14,#0
+ ldr r4,[r0,#13*4] @ restore bp
+ adc r14,r14,#0
+ ldr r8,[r0,#14*4] @ restore n0
+ adds r12,r12,r7
+ ldr r7,[r0,#15*4] @ restore &bp[num]
+ adc r14,r14,#0
+ str r12,[r0] @ tp[num-1]=
+ str r14,[r0,#4] @ tp[num]=
+
+ cmp r4,r7
+ bne .Louter
+
+ ldr r2,[r0,#12*4] @ pull rp
+ add r0,r0,#4 @ r0 to point at &tp[num]
+ sub r5,r0,sp @ "original" num value
+ mov r4,sp @ "rewind" r4
+ mov r1,r4 @ "borrow" r1
+ sub r3,r3,r5 @ "rewind" r3 to &np[0]
+
+ subs r7,r7,r7 @ "clear" carry flag
+.Lsub: ldr r7,[r4],#4
+ ldr r6,[r3],#4
+ sbcs r7,r7,r6 @ tp[j]-np[j]
+ str r7,[r2],#4 @ rp[j]=
+ teq r4,r0 @ preserve carry
+ bne .Lsub
+ sbcs r14,r14,#0 @ upmost carry
+ mov r4,sp @ "rewind" r4
+ sub r2,r2,r5 @ "rewind" r2
+
+ and r1,r4,r14
+ bic r3,r2,r14
+ orr r1,r1,r3 @ ap=borrow?tp:rp
+
+.Lcopy: ldr r7,[r1],#4 @ copy or in-place refresh
+ str sp,[r4],#4 @ zap tp
+ str r7,[r2],#4
+ cmp r4,r0
+ bne .Lcopy
+
+ add sp,r0,#4 @ skip over tp[num+1]
+ ldmia sp!,{r4-r12,lr} @ restore registers
+ add sp,sp,#2*4 @ skip over {r0,r2}
+ mov r0,#1
+.Labrt:
+#if __ARM_ARCH__>=5
+ bx lr @ .word 0xe12fff1e
+#else
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+.size bn_mul_mont,.-bn_mul_mont
+#if __ARM_ARCH__>=7
+.fpu neon
+
+.type bn_mul8x_mont_neon,%function
+.align 5
+bn_mul8x_mont_neon:
+ mov ip,sp
+ stmdb sp!,{r4-r11}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+ ldmia ip,{r4-r5} @ load rest of parameter block
+
+ sub r7,sp,#16
+ vld1.32 {d28[0]}, [r2,:32]!
+ sub r7,r7,r5,lsl#4
+ vld1.32 {d0-d3}, [r1]! @ can't specify :32 :-(
+ and r7,r7,#-64
+ vld1.32 {d30[0]}, [r4,:32]
+ mov sp,r7 @ alloca
+ veor d8,d8,d8
+ subs r8,r5,#8
+ vzip.16 d28,d8
+
+ vmull.u32 q6,d28,d0[0]
+ vmull.u32 q7,d28,d0[1]
+ vmull.u32 q8,d28,d1[0]
+ vshl.i64 d10,d13,#16
+ vmull.u32 q9,d28,d1[1]
+
+ vadd.u64 d10,d10,d12
+ veor d8,d8,d8
+ vmul.u32 d29,d10,d30
+
+ vmull.u32 q10,d28,d2[0]
+ vld1.32 {d4-d7}, [r3]!
+ vmull.u32 q11,d28,d2[1]
+ vmull.u32 q12,d28,d3[0]
+ vzip.16 d29,d8
+ vmull.u32 q13,d28,d3[1]
+
+ bne .LNEON_1st
+
+ @ special case for num=8, everything is in register bank...
+
+ vmlal.u32 q6,d29,d4[0]
+ sub r9,r5,#1
+ vmlal.u32 q7,d29,d4[1]
+ vmlal.u32 q8,d29,d5[0]
+ vmlal.u32 q9,d29,d5[1]
+
+ vmlal.u32 q10,d29,d6[0]
+ vmov q5,q6
+ vmlal.u32 q11,d29,d6[1]
+ vmov q6,q7
+ vmlal.u32 q12,d29,d7[0]
+ vmov q7,q8
+ vmlal.u32 q13,d29,d7[1]
+ vmov q8,q9
+ vmov q9,q10
+ vshr.u64 d10,d10,#16
+ vmov q10,q11
+ vmov q11,q12
+ vadd.u64 d10,d10,d11
+ vmov q12,q13
+ veor q13,q13
+ vshr.u64 d10,d10,#16
+
+ b .LNEON_outer8
+
+.align 4
+.LNEON_outer8:
+ vld1.32 {d28[0]}, [r2,:32]!
+ veor d8,d8,d8
+ vzip.16 d28,d8
+ vadd.u64 d12,d12,d10
+
+ vmlal.u32 q6,d28,d0[0]
+ vmlal.u32 q7,d28,d0[1]
+ vmlal.u32 q8,d28,d1[0]
+ vshl.i64 d10,d13,#16
+ vmlal.u32 q9,d28,d1[1]
+
+ vadd.u64 d10,d10,d12
+ veor d8,d8,d8
+ subs r9,r9,#1
+ vmul.u32 d29,d10,d30
+
+ vmlal.u32 q10,d28,d2[0]
+ vmlal.u32 q11,d28,d2[1]
+ vmlal.u32 q12,d28,d3[0]
+ vzip.16 d29,d8
+ vmlal.u32 q13,d28,d3[1]
+
+ vmlal.u32 q6,d29,d4[0]
+ vmlal.u32 q7,d29,d4[1]
+ vmlal.u32 q8,d29,d5[0]
+ vmlal.u32 q9,d29,d5[1]
+
+ vmlal.u32 q10,d29,d6[0]
+ vmov q5,q6
+ vmlal.u32 q11,d29,d6[1]
+ vmov q6,q7
+ vmlal.u32 q12,d29,d7[0]
+ vmov q7,q8
+ vmlal.u32 q13,d29,d7[1]
+ vmov q8,q9
+ vmov q9,q10
+ vshr.u64 d10,d10,#16
+ vmov q10,q11
+ vmov q11,q12
+ vadd.u64 d10,d10,d11
+ vmov q12,q13
+ veor q13,q13
+ vshr.u64 d10,d10,#16
+
+ bne .LNEON_outer8
+
+ vadd.u64 d12,d12,d10
+ mov r7,sp
+ vshr.u64 d10,d12,#16
+ mov r8,r5
+ vadd.u64 d13,d13,d10
+ add r6,sp,#16
+ vshr.u64 d10,d13,#16
+ vzip.16 d12,d13
+
+ b .LNEON_tail2
+
+.align 4
+.LNEON_1st:
+ vmlal.u32 q6,d29,d4[0]
+ vld1.32 {d0-d3}, [r1]!
+ vmlal.u32 q7,d29,d4[1]
+ subs r8,r8,#8
+ vmlal.u32 q8,d29,d5[0]
+ vmlal.u32 q9,d29,d5[1]
+
+ vmlal.u32 q10,d29,d6[0]
+ vld1.32 {d4-d5}, [r3]!
+ vmlal.u32 q11,d29,d6[1]
+ vst1.64 {q6-q7}, [r7,:256]!
+ vmlal.u32 q12,d29,d7[0]
+ vmlal.u32 q13,d29,d7[1]
+ vst1.64 {q8-q9}, [r7,:256]!
+
+ vmull.u32 q6,d28,d0[0]
+ vld1.32 {d6-d7}, [r3]!
+ vmull.u32 q7,d28,d0[1]
+ vst1.64 {q10-q11}, [r7,:256]!
+ vmull.u32 q8,d28,d1[0]
+ vmull.u32 q9,d28,d1[1]
+ vst1.64 {q12-q13}, [r7,:256]!
+
+ vmull.u32 q10,d28,d2[0]
+ vmull.u32 q11,d28,d2[1]
+ vmull.u32 q12,d28,d3[0]
+ vmull.u32 q13,d28,d3[1]
+
+ bne .LNEON_1st
+
+ vmlal.u32 q6,d29,d4[0]
+ add r6,sp,#16
+ vmlal.u32 q7,d29,d4[1]
+ sub r1,r1,r5,lsl#2 @ rewind r1
+ vmlal.u32 q8,d29,d5[0]
+ vld1.64 {q5}, [sp,:128]
+ vmlal.u32 q9,d29,d5[1]
+ sub r9,r5,#1
+
+ vmlal.u32 q10,d29,d6[0]
+ vst1.64 {q6-q7}, [r7,:256]!
+ vmlal.u32 q11,d29,d6[1]
+ vshr.u64 d10,d10,#16
+ vld1.64 {q6}, [r6, :128]!
+ vmlal.u32 q12,d29,d7[0]
+ vst1.64 {q8-q9}, [r7,:256]!
+ vmlal.u32 q13,d29,d7[1]
+
+ vst1.64 {q10-q11}, [r7,:256]!
+ vadd.u64 d10,d10,d11
+ veor q4,q4,q4
+ vst1.64 {q12-q13}, [r7,:256]!
+ vld1.64 {q7-q8}, [r6, :256]!
+ vst1.64 {q4}, [r7,:128]
+ vshr.u64 d10,d10,#16
+
+ b .LNEON_outer
+
+.align 4
+.LNEON_outer:
+ vld1.32 {d28[0]}, [r2,:32]!
+ sub r3,r3,r5,lsl#2 @ rewind r3
+ vld1.32 {d0-d3}, [r1]!
+ veor d8,d8,d8
+ mov r7,sp
+ vzip.16 d28,d8
+ sub r8,r5,#8
+ vadd.u64 d12,d12,d10
+
+ vmlal.u32 q6,d28,d0[0]
+ vld1.64 {q9-q10},[r6,:256]!
+ vmlal.u32 q7,d28,d0[1]
+ vmlal.u32 q8,d28,d1[0]
+ vld1.64 {q11-q12},[r6,:256]!
+ vmlal.u32 q9,d28,d1[1]
+
+ vshl.i64 d10,d13,#16
+ veor d8,d8,d8
+ vadd.u64 d10,d10,d12
+ vld1.64 {q13},[r6,:128]!
+ vmul.u32 d29,d10,d30
+
+ vmlal.u32 q10,d28,d2[0]
+ vld1.32 {d4-d7}, [r3]!
+ vmlal.u32 q11,d28,d2[1]
+ vmlal.u32 q12,d28,d3[0]
+ vzip.16 d29,d8
+ vmlal.u32 q13,d28,d3[1]
+
+.LNEON_inner:
+ vmlal.u32 q6,d29,d4[0]
+ vld1.32 {d0-d3}, [r1]!
+ vmlal.u32 q7,d29,d4[1]
+ subs r8,r8,#8
+ vmlal.u32 q8,d29,d5[0]
+ vmlal.u32 q9,d29,d5[1]
+ vst1.64 {q6-q7}, [r7,:256]!
+
+ vmlal.u32 q10,d29,d6[0]
+ vld1.64 {q6}, [r6, :128]!
+ vmlal.u32 q11,d29,d6[1]
+ vst1.64 {q8-q9}, [r7,:256]!
+ vmlal.u32 q12,d29,d7[0]
+ vld1.64 {q7-q8}, [r6, :256]!
+ vmlal.u32 q13,d29,d7[1]
+ vst1.64 {q10-q11}, [r7,:256]!
+
+ vmlal.u32 q6,d28,d0[0]
+ vld1.64 {q9-q10}, [r6, :256]!
+ vmlal.u32 q7,d28,d0[1]
+ vst1.64 {q12-q13}, [r7,:256]!
+ vmlal.u32 q8,d28,d1[0]
+ vld1.64 {q11-q12}, [r6, :256]!
+ vmlal.u32 q9,d28,d1[1]
+ vld1.32 {d4-d7}, [r3]!
+
+ vmlal.u32 q10,d28,d2[0]
+ vld1.64 {q13}, [r6, :128]!
+ vmlal.u32 q11,d28,d2[1]
+ vmlal.u32 q12,d28,d3[0]
+ vmlal.u32 q13,d28,d3[1]
+
+ bne .LNEON_inner
+
+ vmlal.u32 q6,d29,d4[0]
+ add r6,sp,#16
+ vmlal.u32 q7,d29,d4[1]
+ sub r1,r1,r5,lsl#2 @ rewind r1
+ vmlal.u32 q8,d29,d5[0]
+ vld1.64 {q5}, [sp,:128]
+ vmlal.u32 q9,d29,d5[1]
+ subs r9,r9,#1
+
+ vmlal.u32 q10,d29,d6[0]
+ vst1.64 {q6-q7}, [r7,:256]!
+ vmlal.u32 q11,d29,d6[1]
+ vld1.64 {q6}, [r6, :128]!
+ vshr.u64 d10,d10,#16
+ vst1.64 {q8-q9}, [r7,:256]!
+ vmlal.u32 q12,d29,d7[0]
+ vld1.64 {q7-q8}, [r6, :256]!
+ vmlal.u32 q13,d29,d7[1]
+
+ vst1.64 {q10-q11}, [r7,:256]!
+ vadd.u64 d10,d10,d11
+ vst1.64 {q12-q13}, [r7,:256]!
+ vshr.u64 d10,d10,#16
+
+ bne .LNEON_outer
+
+ mov r7,sp
+ mov r8,r5
+
+.LNEON_tail:
+ vadd.u64 d12,d12,d10
+ vld1.64 {q9-q10}, [r6, :256]!
+ vshr.u64 d10,d12,#16
+ vadd.u64 d13,d13,d10
+ vld1.64 {q11-q12}, [r6, :256]!
+ vshr.u64 d10,d13,#16
+ vld1.64 {q13}, [r6, :128]!
+ vzip.16 d12,d13
+
+.LNEON_tail2:
+ vadd.u64 d14,d14,d10
+ vst1.32 {d12[0]}, [r7, :32]!
+ vshr.u64 d10,d14,#16
+ vadd.u64 d15,d15,d10
+ vshr.u64 d10,d15,#16
+ vzip.16 d14,d15
+
+ vadd.u64 d16,d16,d10
+ vst1.32 {d14[0]}, [r7, :32]!
+ vshr.u64 d10,d16,#16
+ vadd.u64 d17,d17,d10
+ vshr.u64 d10,d17,#16
+ vzip.16 d16,d17
+
+ vadd.u64 d18,d18,d10
+ vst1.32 {d16[0]}, [r7, :32]!
+ vshr.u64 d10,d18,#16
+ vadd.u64 d19,d19,d10
+ vshr.u64 d10,d19,#16
+ vzip.16 d18,d19
+
+ vadd.u64 d20,d20,d10
+ vst1.32 {d18[0]}, [r7, :32]!
+ vshr.u64 d10,d20,#16
+ vadd.u64 d21,d21,d10
+ vshr.u64 d10,d21,#16
+ vzip.16 d20,d21
+
+ vadd.u64 d22,d22,d10
+ vst1.32 {d20[0]}, [r7, :32]!
+ vshr.u64 d10,d22,#16
+ vadd.u64 d23,d23,d10
+ vshr.u64 d10,d23,#16
+ vzip.16 d22,d23
+
+ vadd.u64 d24,d24,d10
+ vst1.32 {d22[0]}, [r7, :32]!
+ vshr.u64 d10,d24,#16
+ vadd.u64 d25,d25,d10
+ vld1.64 {q6}, [r6, :128]!
+ vshr.u64 d10,d25,#16
+ vzip.16 d24,d25
+
+ vadd.u64 d26,d26,d10
+ vst1.32 {d24[0]}, [r7, :32]!
+ vshr.u64 d10,d26,#16
+ vadd.u64 d27,d27,d10
+ vld1.64 {q7-q8}, [r6, :256]!
+ vshr.u64 d10,d27,#16
+ vzip.16 d26,d27
+ subs r8,r8,#8
+ vst1.32 {d26[0]}, [r7, :32]!
+
+ bne .LNEON_tail
+
+ vst1.32 {d10[0]}, [r7, :32] @ top-most bit
+ sub r3,r3,r5,lsl#2 @ rewind r3
+ subs r1,sp,#0 @ clear carry flag
+ add r2,sp,r5,lsl#2
+
+.LNEON_sub:
+ ldmia r1!, {r4-r7}
+ ldmia r3!, {r8-r11}
+ sbcs r8, r4,r8
+ sbcs r9, r5,r9
+ sbcs r10,r6,r10
+ sbcs r11,r7,r11
+ teq r1,r2 @ preserves carry
+ stmia r0!, {r8-r11}
+ bne .LNEON_sub
+
+ ldr r10, [r1] @ load top-most bit
+ veor q0,q0,q0
+ sub r11,r2,sp @ this is num*4
+ veor q1,q1,q1
+ mov r1,sp
+ sub r0,r0,r11 @ rewind r0
+ mov r3,r2 @ second 3/4th of frame
+ sbcs r10,r10,#0 @ result is carry flag
+
+.LNEON_copy_n_zap:
+ ldmia r1!, {r4-r7}
+ ldmia r0, {r8-r11}
+ movcc r8, r4
+ vst1.64 {q0-q1}, [r3,:256]! @ wipe
+ movcc r9, r5
+ movcc r10,r6
+ vst1.64 {q0-q1}, [r3,:256]! @ wipe
+ movcc r11,r7
+ ldmia r1, {r4-r7}
+ stmia r0!, {r8-r11}
+ sub r1,r1,#16
+ ldmia r0, {r8-r11}
+ movcc r8, r4
+ vst1.64 {q0-q1}, [r1,:256]! @ wipe
+ movcc r9, r5
+ movcc r10,r6
+ vst1.64 {q0-q1}, [r3,:256]! @ wipe
+ movcc r11,r7
+ teq r1,r2 @ preserves carry
+ stmia r0!, {r8-r11}
+ bne .LNEON_copy_n_zap
+
+ sub sp,ip,#96
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r11}
+ bx lr @ .word 0xe12fff1e
+.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
+#endif
+.asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+#if __ARM_ARCH__>=7
+.comm OPENSSL_armcap_P,4,4
+#endif
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-mont.pl
new file mode 100644
index 00000000..72bad8e3
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/armv4-mont.pl
@@ -0,0 +1,675 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# January 2007.
+
+# Montgomery multiplication for ARMv4.
+#
+# Performance improvement naturally varies among CPU implementations
+# and compilers. The code was observed to provide +65-35% improvement
+# [depending on key length, less for longer keys] on ARM920T, and
+# +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
+# base and compiler generated code with in-lined umull and even umlal
+# instructions. The latter means that this code didn't really have an
+# "advantage" of utilizing some "secret" instruction.
+#
+# The code is interoperable with Thumb ISA and is rather compact, less
+# than 1/2KB. Windows CE port would be trivial, as it's exclusively
+# about decorations, ABI and instruction syntax are identical.
+
+# November 2013
+#
+# Add NEON code path, which handles lengths divisible by 8. RSA/DSA
+# performance improvement on Cortex-A8 is ~45-100% depending on key
+# length, more for longer keys. On Cortex-A15 the span is ~10-105%.
+# On Snapdragon S4 improvement was measured to vary from ~70% to
+# incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is
+# rather because original integer-only code seems to perform
+# suboptimally on S4. Situation on Cortex-A9 is unfortunately
+# different. It's being looked into, but the trouble is that
+# performance for vectors longer than 256 bits is actually couple
+# of percent worse than for integer-only code. The code is chosen
+# for execution on all NEON-capable processors, because gain on
+# others outweighs the marginal loss on Cortex-A9.
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$num="r0"; # starts as num argument, but holds &tp[num-1]
+$ap="r1";
+$bp="r2"; $bi="r2"; $rp="r2";
+$np="r3";
+$tp="r4";
+$aj="r5";
+$nj="r6";
+$tj="r7";
+$n0="r8";
+########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer
+$alo="r10"; # sl, gcc uses it to keep @GOT
+$ahi="r11"; # fp
+$nlo="r12"; # ip
+########### # r13 is stack pointer
+$nhi="r14"; # lr
+########### # r15 is program counter
+
+#### argument block layout relative to &tp[num-1], a.k.a. $num
+$_rp="$num,#12*4";
+# ap permanently resides in r1
+$_bp="$num,#13*4";
+# np permanently resides in r3
+$_n0="$num,#14*4";
+$_num="$num,#15*4"; $_bpend=$_num;
+
+$code=<<___;
+#include "arm_arch.h"
+
+.text
+.code 32
+
+#if __ARM_ARCH__>=7
+.align 5
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-bn_mul_mont
+#endif
+
+.global bn_mul_mont
+.type bn_mul_mont,%function
+
+.align 5
+bn_mul_mont:
+ ldr ip,[sp,#4] @ load num
+ stmdb sp!,{r0,r2} @ sp points at argument block
+#if __ARM_ARCH__>=7
+ tst ip,#7
+ bne .Lialu
+ adr r0,bn_mul_mont
+ ldr r2,.LOPENSSL_armcap
+ ldr r0,[r0,r2]
+ tst r0,#1 @ NEON available?
+ ldmia sp, {r0,r2}
+ beq .Lialu
+ add sp,sp,#8
+ b bn_mul8x_mont_neon
+.align 4
+.Lialu:
+#endif
+ cmp ip,#2
+ mov $num,ip @ load num
+ movlt r0,#0
+ addlt sp,sp,#2*4
+ blt .Labrt
+
+ stmdb sp!,{r4-r12,lr} @ save 10 registers
+
+ mov $num,$num,lsl#2 @ rescale $num for byte count
+ sub sp,sp,$num @ alloca(4*num)
+ sub sp,sp,#4 @ +extra dword
+ sub $num,$num,#4 @ "num=num-1"
+ add $tp,$bp,$num @ &bp[num-1]
+
+ add $num,sp,$num @ $num to point at &tp[num-1]
+ ldr $n0,[$_n0] @ &n0
+ ldr $bi,[$bp] @ bp[0]
+ ldr $aj,[$ap],#4 @ ap[0],ap++
+ ldr $nj,[$np],#4 @ np[0],np++
+ ldr $n0,[$n0] @ *n0
+ str $tp,[$_bpend] @ save &bp[num]
+
+ umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0]
+ str $n0,[$_n0] @ save n0 value
+ mul $n0,$alo,$n0 @ "tp[0]"*n0
+ mov $nlo,#0
+ umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]"
+ mov $tp,sp
+
+.L1st:
+ ldr $aj,[$ap],#4 @ ap[j],ap++
+ mov $alo,$ahi
+ ldr $nj,[$np],#4 @ np[j],np++
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0]
+ mov $nhi,#0
+ umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
+ adds $nlo,$nlo,$alo
+ str $nlo,[$tp],#4 @ tp[j-1]=,tp++
+ adc $nlo,$nhi,#0
+ cmp $tp,$num
+ bne .L1st
+
+ adds $nlo,$nlo,$ahi
+ ldr $tp,[$_bp] @ restore bp
+ mov $nhi,#0
+ ldr $n0,[$_n0] @ restore n0
+ adc $nhi,$nhi,#0
+ str $nlo,[$num] @ tp[num-1]=
+ str $nhi,[$num,#4] @ tp[num]=
+
+.Louter:
+ sub $tj,$num,sp @ "original" $num-1 value
+ sub $ap,$ap,$tj @ "rewind" ap to &ap[1]
+ ldr $bi,[$tp,#4]! @ *(++bp)
+ sub $np,$np,$tj @ "rewind" np to &np[1]
+ ldr $aj,[$ap,#-4] @ ap[0]
+ ldr $alo,[sp] @ tp[0]
+ ldr $nj,[$np,#-4] @ np[0]
+ ldr $tj,[sp,#4] @ tp[1]
+
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0]
+ str $tp,[$_bp] @ save bp
+ mul $n0,$alo,$n0
+ mov $nlo,#0
+ umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]"
+ mov $tp,sp
+
+.Linner:
+ ldr $aj,[$ap],#4 @ ap[j],ap++
+ adds $alo,$ahi,$tj @ +=tp[j]
+ ldr $nj,[$np],#4 @ np[j],np++
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i]
+ mov $nhi,#0
+ umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
+ adc $ahi,$ahi,#0
+ ldr $tj,[$tp,#8] @ tp[j+1]
+ adds $nlo,$nlo,$alo
+ str $nlo,[$tp],#4 @ tp[j-1]=,tp++
+ adc $nlo,$nhi,#0
+ cmp $tp,$num
+ bne .Linner
+
+ adds $nlo,$nlo,$ahi
+ mov $nhi,#0
+ ldr $tp,[$_bp] @ restore bp
+ adc $nhi,$nhi,#0
+ ldr $n0,[$_n0] @ restore n0
+ adds $nlo,$nlo,$tj
+ ldr $tj,[$_bpend] @ restore &bp[num]
+ adc $nhi,$nhi,#0
+ str $nlo,[$num] @ tp[num-1]=
+ str $nhi,[$num,#4] @ tp[num]=
+
+ cmp $tp,$tj
+ bne .Louter
+
+ ldr $rp,[$_rp] @ pull rp
+ add $num,$num,#4 @ $num to point at &tp[num]
+ sub $aj,$num,sp @ "original" num value
+ mov $tp,sp @ "rewind" $tp
+ mov $ap,$tp @ "borrow" $ap
+ sub $np,$np,$aj @ "rewind" $np to &np[0]
+
+ subs $tj,$tj,$tj @ "clear" carry flag
+.Lsub: ldr $tj,[$tp],#4
+ ldr $nj,[$np],#4
+ sbcs $tj,$tj,$nj @ tp[j]-np[j]
+ str $tj,[$rp],#4 @ rp[j]=
+ teq $tp,$num @ preserve carry
+ bne .Lsub
+ sbcs $nhi,$nhi,#0 @ upmost carry
+ mov $tp,sp @ "rewind" $tp
+ sub $rp,$rp,$aj @ "rewind" $rp
+
+ and $ap,$tp,$nhi
+ bic $np,$rp,$nhi
+ orr $ap,$ap,$np @ ap=borrow?tp:rp
+
+.Lcopy: ldr $tj,[$ap],#4 @ copy or in-place refresh
+ str sp,[$tp],#4 @ zap tp
+ str $tj,[$rp],#4
+ cmp $tp,$num
+ bne .Lcopy
+
+ add sp,$num,#4 @ skip over tp[num+1]
+ ldmia sp!,{r4-r12,lr} @ restore registers
+ add sp,sp,#2*4 @ skip over {r0,r2}
+ mov r0,#1
+.Labrt:
+#if __ARM_ARCH__>=5
+ ret @ bx lr
+#else
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size bn_mul_mont,.-bn_mul_mont
+___
+{
+sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
+sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
+
+my ($A0,$A1,$A2,$A3)=map("d$_",(0..3));
+my ($N0,$N1,$N2,$N3)=map("d$_",(4..7));
+my ($Z,$Temp)=("q4","q5");
+my ($A0xB,$A1xB,$A2xB,$A3xB,$A4xB,$A5xB,$A6xB,$A7xB)=map("q$_",(6..13));
+my ($Bi,$Ni,$M0)=map("d$_",(28..31));
+my $zero=&Dlo($Z);
+my $temp=&Dlo($Temp);
+
+my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5));
+my ($tinptr,$toutptr,$inner,$outer)=map("r$_",(6..9));
+
+$code.=<<___;
+#if __ARM_ARCH__>=7
+.fpu neon
+
+.type bn_mul8x_mont_neon,%function
+.align 5
+bn_mul8x_mont_neon:
+ mov ip,sp
+ stmdb sp!,{r4-r11}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+ ldmia ip,{r4-r5} @ load rest of parameter block
+
+ sub $toutptr,sp,#16
+ vld1.32 {${Bi}[0]}, [$bptr,:32]!
+ sub $toutptr,$toutptr,$num,lsl#4
+ vld1.32 {$A0-$A3}, [$aptr]! @ can't specify :32 :-(
+ and $toutptr,$toutptr,#-64
+ vld1.32 {${M0}[0]}, [$n0,:32]
+ mov sp,$toutptr @ alloca
+ veor $zero,$zero,$zero
+ subs $inner,$num,#8
+ vzip.16 $Bi,$zero
+
+ vmull.u32 $A0xB,$Bi,${A0}[0]
+ vmull.u32 $A1xB,$Bi,${A0}[1]
+ vmull.u32 $A2xB,$Bi,${A1}[0]
+ vshl.i64 $temp,`&Dhi("$A0xB")`,#16
+ vmull.u32 $A3xB,$Bi,${A1}[1]
+
+ vadd.u64 $temp,$temp,`&Dlo("$A0xB")`
+ veor $zero,$zero,$zero
+ vmul.u32 $Ni,$temp,$M0
+
+ vmull.u32 $A4xB,$Bi,${A2}[0]
+ vld1.32 {$N0-$N3}, [$nptr]!
+ vmull.u32 $A5xB,$Bi,${A2}[1]
+ vmull.u32 $A6xB,$Bi,${A3}[0]
+ vzip.16 $Ni,$zero
+ vmull.u32 $A7xB,$Bi,${A3}[1]
+
+ bne .LNEON_1st
+
+ @ special case for num=8, everything is in register bank...
+
+ vmlal.u32 $A0xB,$Ni,${N0}[0]
+ sub $outer,$num,#1
+ vmlal.u32 $A1xB,$Ni,${N0}[1]
+ vmlal.u32 $A2xB,$Ni,${N1}[0]
+ vmlal.u32 $A3xB,$Ni,${N1}[1]
+
+ vmlal.u32 $A4xB,$Ni,${N2}[0]
+ vmov $Temp,$A0xB
+ vmlal.u32 $A5xB,$Ni,${N2}[1]
+ vmov $A0xB,$A1xB
+ vmlal.u32 $A6xB,$Ni,${N3}[0]
+ vmov $A1xB,$A2xB
+ vmlal.u32 $A7xB,$Ni,${N3}[1]
+ vmov $A2xB,$A3xB
+ vmov $A3xB,$A4xB
+ vshr.u64 $temp,$temp,#16
+ vmov $A4xB,$A5xB
+ vmov $A5xB,$A6xB
+ vadd.u64 $temp,$temp,`&Dhi("$Temp")`
+ vmov $A6xB,$A7xB
+ veor $A7xB,$A7xB
+ vshr.u64 $temp,$temp,#16
+
+ b .LNEON_outer8
+
+.align 4
+.LNEON_outer8:
+ vld1.32 {${Bi}[0]}, [$bptr,:32]!
+ veor $zero,$zero,$zero
+ vzip.16 $Bi,$zero
+ vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
+
+ vmlal.u32 $A0xB,$Bi,${A0}[0]
+ vmlal.u32 $A1xB,$Bi,${A0}[1]
+ vmlal.u32 $A2xB,$Bi,${A1}[0]
+ vshl.i64 $temp,`&Dhi("$A0xB")`,#16
+ vmlal.u32 $A3xB,$Bi,${A1}[1]
+
+ vadd.u64 $temp,$temp,`&Dlo("$A0xB")`
+ veor $zero,$zero,$zero
+ subs $outer,$outer,#1
+ vmul.u32 $Ni,$temp,$M0
+
+ vmlal.u32 $A4xB,$Bi,${A2}[0]
+ vmlal.u32 $A5xB,$Bi,${A2}[1]
+ vmlal.u32 $A6xB,$Bi,${A3}[0]
+ vzip.16 $Ni,$zero
+ vmlal.u32 $A7xB,$Bi,${A3}[1]
+
+ vmlal.u32 $A0xB,$Ni,${N0}[0]
+ vmlal.u32 $A1xB,$Ni,${N0}[1]
+ vmlal.u32 $A2xB,$Ni,${N1}[0]
+ vmlal.u32 $A3xB,$Ni,${N1}[1]
+
+ vmlal.u32 $A4xB,$Ni,${N2}[0]
+ vmov $Temp,$A0xB
+ vmlal.u32 $A5xB,$Ni,${N2}[1]
+ vmov $A0xB,$A1xB
+ vmlal.u32 $A6xB,$Ni,${N3}[0]
+ vmov $A1xB,$A2xB
+ vmlal.u32 $A7xB,$Ni,${N3}[1]
+ vmov $A2xB,$A3xB
+ vmov $A3xB,$A4xB
+ vshr.u64 $temp,$temp,#16
+ vmov $A4xB,$A5xB
+ vmov $A5xB,$A6xB
+ vadd.u64 $temp,$temp,`&Dhi("$Temp")`
+ vmov $A6xB,$A7xB
+ veor $A7xB,$A7xB
+ vshr.u64 $temp,$temp,#16
+
+ bne .LNEON_outer8
+
+ vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
+ mov $toutptr,sp
+ vshr.u64 $temp,`&Dlo("$A0xB")`,#16
+ mov $inner,$num
+ vadd.u64 `&Dhi("$A0xB")`,`&Dhi("$A0xB")`,$temp
+ add $tinptr,sp,#16
+ vshr.u64 $temp,`&Dhi("$A0xB")`,#16
+ vzip.16 `&Dlo("$A0xB")`,`&Dhi("$A0xB")`
+
+ b .LNEON_tail2
+
+.align 4
+.LNEON_1st:
+ vmlal.u32 $A0xB,$Ni,${N0}[0]
+ vld1.32 {$A0-$A3}, [$aptr]!
+ vmlal.u32 $A1xB,$Ni,${N0}[1]
+ subs $inner,$inner,#8
+ vmlal.u32 $A2xB,$Ni,${N1}[0]
+ vmlal.u32 $A3xB,$Ni,${N1}[1]
+
+ vmlal.u32 $A4xB,$Ni,${N2}[0]
+ vld1.32 {$N0-$N1}, [$nptr]!
+ vmlal.u32 $A5xB,$Ni,${N2}[1]
+ vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
+ vmlal.u32 $A6xB,$Ni,${N3}[0]
+ vmlal.u32 $A7xB,$Ni,${N3}[1]
+ vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
+
+ vmull.u32 $A0xB,$Bi,${A0}[0]
+ vld1.32 {$N2-$N3}, [$nptr]!
+ vmull.u32 $A1xB,$Bi,${A0}[1]
+ vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
+ vmull.u32 $A2xB,$Bi,${A1}[0]
+ vmull.u32 $A3xB,$Bi,${A1}[1]
+ vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
+
+ vmull.u32 $A4xB,$Bi,${A2}[0]
+ vmull.u32 $A5xB,$Bi,${A2}[1]
+ vmull.u32 $A6xB,$Bi,${A3}[0]
+ vmull.u32 $A7xB,$Bi,${A3}[1]
+
+ bne .LNEON_1st
+
+ vmlal.u32 $A0xB,$Ni,${N0}[0]
+ add $tinptr,sp,#16
+ vmlal.u32 $A1xB,$Ni,${N0}[1]
+ sub $aptr,$aptr,$num,lsl#2 @ rewind $aptr
+ vmlal.u32 $A2xB,$Ni,${N1}[0]
+ vld1.64 {$Temp}, [sp,:128]
+ vmlal.u32 $A3xB,$Ni,${N1}[1]
+ sub $outer,$num,#1
+
+ vmlal.u32 $A4xB,$Ni,${N2}[0]
+ vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
+ vmlal.u32 $A5xB,$Ni,${N2}[1]
+ vshr.u64 $temp,$temp,#16
+ vld1.64 {$A0xB}, [$tinptr, :128]!
+ vmlal.u32 $A6xB,$Ni,${N3}[0]
+ vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
+ vmlal.u32 $A7xB,$Ni,${N3}[1]
+
+ vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
+ vadd.u64 $temp,$temp,`&Dhi("$Temp")`
+ veor $Z,$Z,$Z
+ vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
+ vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
+ vst1.64 {$Z}, [$toutptr,:128]
+ vshr.u64 $temp,$temp,#16
+
+ b .LNEON_outer
+
+.align 4
+.LNEON_outer:
+ vld1.32 {${Bi}[0]}, [$bptr,:32]!
+ sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr
+ vld1.32 {$A0-$A3}, [$aptr]!
+ veor $zero,$zero,$zero
+ mov $toutptr,sp
+ vzip.16 $Bi,$zero
+ sub $inner,$num,#8
+ vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
+
+ vmlal.u32 $A0xB,$Bi,${A0}[0]
+ vld1.64 {$A3xB-$A4xB},[$tinptr,:256]!
+ vmlal.u32 $A1xB,$Bi,${A0}[1]
+ vmlal.u32 $A2xB,$Bi,${A1}[0]
+ vld1.64 {$A5xB-$A6xB},[$tinptr,:256]!
+ vmlal.u32 $A3xB,$Bi,${A1}[1]
+
+ vshl.i64 $temp,`&Dhi("$A0xB")`,#16
+ veor $zero,$zero,$zero
+ vadd.u64 $temp,$temp,`&Dlo("$A0xB")`
+ vld1.64 {$A7xB},[$tinptr,:128]!
+ vmul.u32 $Ni,$temp,$M0
+
+ vmlal.u32 $A4xB,$Bi,${A2}[0]
+ vld1.32 {$N0-$N3}, [$nptr]!
+ vmlal.u32 $A5xB,$Bi,${A2}[1]
+ vmlal.u32 $A6xB,$Bi,${A3}[0]
+ vzip.16 $Ni,$zero
+ vmlal.u32 $A7xB,$Bi,${A3}[1]
+
+.LNEON_inner:
+ vmlal.u32 $A0xB,$Ni,${N0}[0]
+ vld1.32 {$A0-$A3}, [$aptr]!
+ vmlal.u32 $A1xB,$Ni,${N0}[1]
+ subs $inner,$inner,#8
+ vmlal.u32 $A2xB,$Ni,${N1}[0]
+ vmlal.u32 $A3xB,$Ni,${N1}[1]
+ vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
+
+ vmlal.u32 $A4xB,$Ni,${N2}[0]
+ vld1.64 {$A0xB}, [$tinptr, :128]!
+ vmlal.u32 $A5xB,$Ni,${N2}[1]
+ vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
+ vmlal.u32 $A6xB,$Ni,${N3}[0]
+ vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
+ vmlal.u32 $A7xB,$Ni,${N3}[1]
+ vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
+
+ vmlal.u32 $A0xB,$Bi,${A0}[0]
+ vld1.64 {$A3xB-$A4xB}, [$tinptr, :256]!
+ vmlal.u32 $A1xB,$Bi,${A0}[1]
+ vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
+ vmlal.u32 $A2xB,$Bi,${A1}[0]
+ vld1.64 {$A5xB-$A6xB}, [$tinptr, :256]!
+ vmlal.u32 $A3xB,$Bi,${A1}[1]
+ vld1.32 {$N0-$N3}, [$nptr]!
+
+ vmlal.u32 $A4xB,$Bi,${A2}[0]
+ vld1.64 {$A7xB}, [$tinptr, :128]!
+ vmlal.u32 $A5xB,$Bi,${A2}[1]
+ vmlal.u32 $A6xB,$Bi,${A3}[0]
+ vmlal.u32 $A7xB,$Bi,${A3}[1]
+
+ bne .LNEON_inner
+
+ vmlal.u32 $A0xB,$Ni,${N0}[0]
+ add $tinptr,sp,#16
+ vmlal.u32 $A1xB,$Ni,${N0}[1]
+ sub $aptr,$aptr,$num,lsl#2 @ rewind $aptr
+ vmlal.u32 $A2xB,$Ni,${N1}[0]
+ vld1.64 {$Temp}, [sp,:128]
+ vmlal.u32 $A3xB,$Ni,${N1}[1]
+ subs $outer,$outer,#1
+
+ vmlal.u32 $A4xB,$Ni,${N2}[0]
+ vst1.64 {$A0xB-$A1xB}, [$toutptr,:256]!
+ vmlal.u32 $A5xB,$Ni,${N2}[1]
+ vld1.64 {$A0xB}, [$tinptr, :128]!
+ vshr.u64 $temp,$temp,#16
+ vst1.64 {$A2xB-$A3xB}, [$toutptr,:256]!
+ vmlal.u32 $A6xB,$Ni,${N3}[0]
+ vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
+ vmlal.u32 $A7xB,$Ni,${N3}[1]
+
+ vst1.64 {$A4xB-$A5xB}, [$toutptr,:256]!
+ vadd.u64 $temp,$temp,`&Dhi("$Temp")`
+ vst1.64 {$A6xB-$A7xB}, [$toutptr,:256]!
+ vshr.u64 $temp,$temp,#16
+
+ bne .LNEON_outer
+
+ mov $toutptr,sp
+ mov $inner,$num
+
+.LNEON_tail:
+ vadd.u64 `&Dlo("$A0xB")`,`&Dlo("$A0xB")`,$temp
+ vld1.64 {$A3xB-$A4xB}, [$tinptr, :256]!
+ vshr.u64 $temp,`&Dlo("$A0xB")`,#16
+ vadd.u64 `&Dhi("$A0xB")`,`&Dhi("$A0xB")`,$temp
+ vld1.64 {$A5xB-$A6xB}, [$tinptr, :256]!
+ vshr.u64 $temp,`&Dhi("$A0xB")`,#16
+ vld1.64 {$A7xB}, [$tinptr, :128]!
+ vzip.16 `&Dlo("$A0xB")`,`&Dhi("$A0xB")`
+
+.LNEON_tail2:
+ vadd.u64 `&Dlo("$A1xB")`,`&Dlo("$A1xB")`,$temp
+ vst1.32 {`&Dlo("$A0xB")`[0]}, [$toutptr, :32]!
+ vshr.u64 $temp,`&Dlo("$A1xB")`,#16
+ vadd.u64 `&Dhi("$A1xB")`,`&Dhi("$A1xB")`,$temp
+ vshr.u64 $temp,`&Dhi("$A1xB")`,#16
+ vzip.16 `&Dlo("$A1xB")`,`&Dhi("$A1xB")`
+
+ vadd.u64 `&Dlo("$A2xB")`,`&Dlo("$A2xB")`,$temp
+ vst1.32 {`&Dlo("$A1xB")`[0]}, [$toutptr, :32]!
+ vshr.u64 $temp,`&Dlo("$A2xB")`,#16
+ vadd.u64 `&Dhi("$A2xB")`,`&Dhi("$A2xB")`,$temp
+ vshr.u64 $temp,`&Dhi("$A2xB")`,#16
+ vzip.16 `&Dlo("$A2xB")`,`&Dhi("$A2xB")`
+
+ vadd.u64 `&Dlo("$A3xB")`,`&Dlo("$A3xB")`,$temp
+ vst1.32 {`&Dlo("$A2xB")`[0]}, [$toutptr, :32]!
+ vshr.u64 $temp,`&Dlo("$A3xB")`,#16
+ vadd.u64 `&Dhi("$A3xB")`,`&Dhi("$A3xB")`,$temp
+ vshr.u64 $temp,`&Dhi("$A3xB")`,#16
+ vzip.16 `&Dlo("$A3xB")`,`&Dhi("$A3xB")`
+
+ vadd.u64 `&Dlo("$A4xB")`,`&Dlo("$A4xB")`,$temp
+ vst1.32 {`&Dlo("$A3xB")`[0]}, [$toutptr, :32]!
+ vshr.u64 $temp,`&Dlo("$A4xB")`,#16
+ vadd.u64 `&Dhi("$A4xB")`,`&Dhi("$A4xB")`,$temp
+ vshr.u64 $temp,`&Dhi("$A4xB")`,#16
+ vzip.16 `&Dlo("$A4xB")`,`&Dhi("$A4xB")`
+
+ vadd.u64 `&Dlo("$A5xB")`,`&Dlo("$A5xB")`,$temp
+ vst1.32 {`&Dlo("$A4xB")`[0]}, [$toutptr, :32]!
+ vshr.u64 $temp,`&Dlo("$A5xB")`,#16
+ vadd.u64 `&Dhi("$A5xB")`,`&Dhi("$A5xB")`,$temp
+ vshr.u64 $temp,`&Dhi("$A5xB")`,#16
+ vzip.16 `&Dlo("$A5xB")`,`&Dhi("$A5xB")`
+
+ vadd.u64 `&Dlo("$A6xB")`,`&Dlo("$A6xB")`,$temp
+ vst1.32 {`&Dlo("$A5xB")`[0]}, [$toutptr, :32]!
+ vshr.u64 $temp,`&Dlo("$A6xB")`,#16
+ vadd.u64 `&Dhi("$A6xB")`,`&Dhi("$A6xB")`,$temp
+ vld1.64 {$A0xB}, [$tinptr, :128]!
+ vshr.u64 $temp,`&Dhi("$A6xB")`,#16
+ vzip.16 `&Dlo("$A6xB")`,`&Dhi("$A6xB")`
+
+ vadd.u64 `&Dlo("$A7xB")`,`&Dlo("$A7xB")`,$temp
+ vst1.32 {`&Dlo("$A6xB")`[0]}, [$toutptr, :32]!
+ vshr.u64 $temp,`&Dlo("$A7xB")`,#16
+ vadd.u64 `&Dhi("$A7xB")`,`&Dhi("$A7xB")`,$temp
+ vld1.64 {$A1xB-$A2xB}, [$tinptr, :256]!
+ vshr.u64 $temp,`&Dhi("$A7xB")`,#16
+ vzip.16 `&Dlo("$A7xB")`,`&Dhi("$A7xB")`
+ subs $inner,$inner,#8
+ vst1.32 {`&Dlo("$A7xB")`[0]}, [$toutptr, :32]!
+
+ bne .LNEON_tail
+
+ vst1.32 {${temp}[0]}, [$toutptr, :32] @ top-most bit
+ sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr
+ subs $aptr,sp,#0 @ clear carry flag
+ add $bptr,sp,$num,lsl#2
+
+.LNEON_sub:
+ ldmia $aptr!, {r4-r7}
+ ldmia $nptr!, {r8-r11}
+ sbcs r8, r4,r8
+ sbcs r9, r5,r9
+ sbcs r10,r6,r10
+ sbcs r11,r7,r11
+ teq $aptr,$bptr @ preserves carry
+ stmia $rptr!, {r8-r11}
+ bne .LNEON_sub
+
+ ldr r10, [$aptr] @ load top-most bit
+ veor q0,q0,q0
+ sub r11,$bptr,sp @ this is num*4
+ veor q1,q1,q1
+ mov $aptr,sp
+ sub $rptr,$rptr,r11 @ rewind $rptr
+ mov $nptr,$bptr @ second 3/4th of frame
+ sbcs r10,r10,#0 @ result is carry flag
+
+.LNEON_copy_n_zap:
+ ldmia $aptr!, {r4-r7}
+ ldmia $rptr, {r8-r11}
+ movcc r8, r4
+ vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
+ movcc r9, r5
+ movcc r10,r6
+ vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
+ movcc r11,r7
+ ldmia $aptr, {r4-r7}
+ stmia $rptr!, {r8-r11}
+ sub $aptr,$aptr,#16
+ ldmia $rptr, {r8-r11}
+ movcc r8, r4
+ vst1.64 {q0-q1}, [$aptr,:256]! @ wipe
+ movcc r9, r5
+ movcc r10,r6
+ vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
+ movcc r11,r7
+ teq $aptr,$bptr @ preserves carry
+ stmia $rptr!, {r8-r11}
+ bne .LNEON_copy_n_zap
+
+ sub sp,ip,#96
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r11}
+ ret @ bx lr
+.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
+#endif
+___
+}
+$code.=<<___;
+.asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+#if __ARM_ARCH__>=7
+.comm OPENSSL_armcap_P,4,4
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+$code =~ s/\bret\b/bx lr/gm;
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-586.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-586.S
new file mode 100644
index 00000000..66695e26
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-586.S
@@ -0,0 +1,1533 @@
+.file "crypto/bn/asm/bn-586.s"
+.text
+.globl bn_mul_add_words
+.type bn_mul_add_words,@function
+.align 16
+bn_mul_add_words:
+.L_bn_mul_add_words_begin:
+ call .L000PIC_me_up
+.L000PIC_me_up:
+ popl %eax
+ leal _GLOBAL_OFFSET_TABLE_+[.-.L000PIC_me_up](%eax),%eax
+ movl OPENSSL_ia32cap_P@GOT(%eax),%eax
+ btl $26,(%eax)
+ jnc .L001maw_non_sse2
+ movl 4(%esp),%eax
+ movl 8(%esp),%edx
+ movl 12(%esp),%ecx
+ movd 16(%esp),%mm0
+ pxor %mm1,%mm1
+ jmp .L002maw_sse2_entry
+.align 16
+.L003maw_sse2_unrolled:
+ movd (%eax),%mm3
+ paddq %mm3,%mm1
+ movd (%edx),%mm2
+ pmuludq %mm0,%mm2
+ movd 4(%edx),%mm4
+ pmuludq %mm0,%mm4
+ movd 8(%edx),%mm6
+ pmuludq %mm0,%mm6
+ movd 12(%edx),%mm7
+ pmuludq %mm0,%mm7
+ paddq %mm2,%mm1
+ movd 4(%eax),%mm3
+ paddq %mm4,%mm3
+ movd 8(%eax),%mm5
+ paddq %mm6,%mm5
+ movd 12(%eax),%mm4
+ paddq %mm4,%mm7
+ movd %mm1,(%eax)
+ movd 16(%edx),%mm2
+ pmuludq %mm0,%mm2
+ psrlq $32,%mm1
+ movd 20(%edx),%mm4
+ pmuludq %mm0,%mm4
+ paddq %mm3,%mm1
+ movd 24(%edx),%mm6
+ pmuludq %mm0,%mm6
+ movd %mm1,4(%eax)
+ psrlq $32,%mm1
+ movd 28(%edx),%mm3
+ addl $32,%edx
+ pmuludq %mm0,%mm3
+ paddq %mm5,%mm1
+ movd 16(%eax),%mm5
+ paddq %mm5,%mm2
+ movd %mm1,8(%eax)
+ psrlq $32,%mm1
+ paddq %mm7,%mm1
+ movd 20(%eax),%mm5
+ paddq %mm5,%mm4
+ movd %mm1,12(%eax)
+ psrlq $32,%mm1
+ paddq %mm2,%mm1
+ movd 24(%eax),%mm5
+ paddq %mm5,%mm6
+ movd %mm1,16(%eax)
+ psrlq $32,%mm1
+ paddq %mm4,%mm1
+ movd 28(%eax),%mm5
+ paddq %mm5,%mm3
+ movd %mm1,20(%eax)
+ psrlq $32,%mm1
+ paddq %mm6,%mm1
+ movd %mm1,24(%eax)
+ psrlq $32,%mm1
+ paddq %mm3,%mm1
+ movd %mm1,28(%eax)
+ leal 32(%eax),%eax
+ psrlq $32,%mm1
+ subl $8,%ecx
+ jz .L004maw_sse2_exit
+.L002maw_sse2_entry:
+ testl $4294967288,%ecx
+ jnz .L003maw_sse2_unrolled
+.align 4
+.L005maw_sse2_loop:
+ movd (%edx),%mm2
+ movd (%eax),%mm3
+ pmuludq %mm0,%mm2
+ leal 4(%edx),%edx
+ paddq %mm3,%mm1
+ paddq %mm2,%mm1
+ movd %mm1,(%eax)
+ subl $1,%ecx
+ psrlq $32,%mm1
+ leal 4(%eax),%eax
+ jnz .L005maw_sse2_loop
+.L004maw_sse2_exit:
+ movd %mm1,%eax
+ emms
+ ret
+.align 16
+.L001maw_non_sse2:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+
+ xorl %esi,%esi
+ movl 20(%esp),%edi
+ movl 28(%esp),%ecx
+ movl 24(%esp),%ebx
+ andl $4294967288,%ecx
+ movl 32(%esp),%ebp
+ pushl %ecx
+ jz .L006maw_finish
+.align 16
+.L007maw_loop:
+
+ movl (%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl (%edi),%eax
+ adcl $0,%edx
+ movl %eax,(%edi)
+ movl %edx,%esi
+
+ movl 4(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 4(%edi),%eax
+ adcl $0,%edx
+ movl %eax,4(%edi)
+ movl %edx,%esi
+
+ movl 8(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 8(%edi),%eax
+ adcl $0,%edx
+ movl %eax,8(%edi)
+ movl %edx,%esi
+
+ movl 12(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 12(%edi),%eax
+ adcl $0,%edx
+ movl %eax,12(%edi)
+ movl %edx,%esi
+
+ movl 16(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 16(%edi),%eax
+ adcl $0,%edx
+ movl %eax,16(%edi)
+ movl %edx,%esi
+
+ movl 20(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 20(%edi),%eax
+ adcl $0,%edx
+ movl %eax,20(%edi)
+ movl %edx,%esi
+
+ movl 24(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 24(%edi),%eax
+ adcl $0,%edx
+ movl %eax,24(%edi)
+ movl %edx,%esi
+
+ movl 28(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 28(%edi),%eax
+ adcl $0,%edx
+ movl %eax,28(%edi)
+ movl %edx,%esi
+
+ subl $8,%ecx
+ leal 32(%ebx),%ebx
+ leal 32(%edi),%edi
+ jnz .L007maw_loop
+.L006maw_finish:
+ movl 32(%esp),%ecx
+ andl $7,%ecx
+ jnz .L008maw_finish2
+ jmp .L009maw_end
+.L008maw_finish2:
+
+ movl (%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl (%edi),%eax
+ adcl $0,%edx
+ decl %ecx
+ movl %eax,(%edi)
+ movl %edx,%esi
+ jz .L009maw_end
+
+ movl 4(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 4(%edi),%eax
+ adcl $0,%edx
+ decl %ecx
+ movl %eax,4(%edi)
+ movl %edx,%esi
+ jz .L009maw_end
+
+ movl 8(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 8(%edi),%eax
+ adcl $0,%edx
+ decl %ecx
+ movl %eax,8(%edi)
+ movl %edx,%esi
+ jz .L009maw_end
+
+ movl 12(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 12(%edi),%eax
+ adcl $0,%edx
+ decl %ecx
+ movl %eax,12(%edi)
+ movl %edx,%esi
+ jz .L009maw_end
+
+ movl 16(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 16(%edi),%eax
+ adcl $0,%edx
+ decl %ecx
+ movl %eax,16(%edi)
+ movl %edx,%esi
+ jz .L009maw_end
+
+ movl 20(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 20(%edi),%eax
+ adcl $0,%edx
+ decl %ecx
+ movl %eax,20(%edi)
+ movl %edx,%esi
+ jz .L009maw_end
+
+ movl 24(%ebx),%eax
+ mull %ebp
+ addl %esi,%eax
+ adcl $0,%edx
+ addl 24(%edi),%eax
+ adcl $0,%edx
+ movl %eax,24(%edi)
+ movl %edx,%esi
+.L009maw_end:
+ movl %esi,%eax
+ popl %ecx
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.size bn_mul_add_words,.-.L_bn_mul_add_words_begin
+.globl bn_mul_words
+.type bn_mul_words,@function
+.align 16
+bn_mul_words:
+.L_bn_mul_words_begin:
+ call .L010PIC_me_up
+.L010PIC_me_up:
+ popl %eax
+ leal _GLOBAL_OFFSET_TABLE_+[.-.L010PIC_me_up](%eax),%eax
+ movl OPENSSL_ia32cap_P@GOT(%eax),%eax
+ btl $26,(%eax)
+ jnc .L011mw_non_sse2
+ movl 4(%esp),%eax
+ movl 8(%esp),%edx
+ movl 12(%esp),%ecx
+ movd 16(%esp),%mm0
+ pxor %mm1,%mm1
+.align 16
+.L012mw_sse2_loop:
+ movd (%edx),%mm2
+ pmuludq %mm0,%mm2
+ leal 4(%edx),%edx
+ paddq %mm2,%mm1
+ movd %mm1,(%eax)
+ subl $1,%ecx
+ psrlq $32,%mm1
+ leal 4(%eax),%eax
+ jnz .L012mw_sse2_loop
+ movd %mm1,%eax
+ emms
+ ret
+.align 16
+.L011mw_non_sse2:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+
+ xorl %esi,%esi
+ movl 20(%esp),%edi
+ movl 24(%esp),%ebx
+ movl 28(%esp),%ebp
+ movl 32(%esp),%ecx
+ andl $4294967288,%ebp
+ jz .L013mw_finish
+.L014mw_loop:
+
+ movl (%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,(%edi)
+ movl %edx,%esi
+
+ movl 4(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,4(%edi)
+ movl %edx,%esi
+
+ movl 8(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,8(%edi)
+ movl %edx,%esi
+
+ movl 12(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,12(%edi)
+ movl %edx,%esi
+
+ movl 16(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,16(%edi)
+ movl %edx,%esi
+
+ movl 20(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,20(%edi)
+ movl %edx,%esi
+
+ movl 24(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,24(%edi)
+ movl %edx,%esi
+
+ movl 28(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,28(%edi)
+ movl %edx,%esi
+
+ addl $32,%ebx
+ addl $32,%edi
+ subl $8,%ebp
+ jz .L013mw_finish
+ jmp .L014mw_loop
+.L013mw_finish:
+ movl 28(%esp),%ebp
+ andl $7,%ebp
+ jnz .L015mw_finish2
+ jmp .L016mw_end
+.L015mw_finish2:
+
+ movl (%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,(%edi)
+ movl %edx,%esi
+ decl %ebp
+ jz .L016mw_end
+
+ movl 4(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,4(%edi)
+ movl %edx,%esi
+ decl %ebp
+ jz .L016mw_end
+
+ movl 8(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,8(%edi)
+ movl %edx,%esi
+ decl %ebp
+ jz .L016mw_end
+
+ movl 12(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,12(%edi)
+ movl %edx,%esi
+ decl %ebp
+ jz .L016mw_end
+
+ movl 16(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,16(%edi)
+ movl %edx,%esi
+ decl %ebp
+ jz .L016mw_end
+
+ movl 20(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,20(%edi)
+ movl %edx,%esi
+ decl %ebp
+ jz .L016mw_end
+
+ movl 24(%ebx),%eax
+ mull %ecx
+ addl %esi,%eax
+ adcl $0,%edx
+ movl %eax,24(%edi)
+ movl %edx,%esi
+.L016mw_end:
+ movl %esi,%eax
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.size bn_mul_words,.-.L_bn_mul_words_begin
+.globl bn_sqr_words
+.type bn_sqr_words,@function
+.align 16
+bn_sqr_words:
+.L_bn_sqr_words_begin:
+ call .L017PIC_me_up
+.L017PIC_me_up:
+ popl %eax
+ leal _GLOBAL_OFFSET_TABLE_+[.-.L017PIC_me_up](%eax),%eax
+ movl OPENSSL_ia32cap_P@GOT(%eax),%eax
+ btl $26,(%eax)
+ jnc .L018sqr_non_sse2
+ movl 4(%esp),%eax
+ movl 8(%esp),%edx
+ movl 12(%esp),%ecx
+.align 16
+.L019sqr_sse2_loop:
+ movd (%edx),%mm0
+ pmuludq %mm0,%mm0
+ leal 4(%edx),%edx
+ movq %mm0,(%eax)
+ subl $1,%ecx
+ leal 8(%eax),%eax
+ jnz .L019sqr_sse2_loop
+ emms
+ ret
+.align 16
+.L018sqr_non_sse2:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+
+ movl 20(%esp),%esi
+ movl 24(%esp),%edi
+ movl 28(%esp),%ebx
+ andl $4294967288,%ebx
+ jz .L020sw_finish
+.L021sw_loop:
+
+ movl (%edi),%eax
+ mull %eax
+ movl %eax,(%esi)
+ movl %edx,4(%esi)
+
+ movl 4(%edi),%eax
+ mull %eax
+ movl %eax,8(%esi)
+ movl %edx,12(%esi)
+
+ movl 8(%edi),%eax
+ mull %eax
+ movl %eax,16(%esi)
+ movl %edx,20(%esi)
+
+ movl 12(%edi),%eax
+ mull %eax
+ movl %eax,24(%esi)
+ movl %edx,28(%esi)
+
+ movl 16(%edi),%eax
+ mull %eax
+ movl %eax,32(%esi)
+ movl %edx,36(%esi)
+
+ movl 20(%edi),%eax
+ mull %eax
+ movl %eax,40(%esi)
+ movl %edx,44(%esi)
+
+ movl 24(%edi),%eax
+ mull %eax
+ movl %eax,48(%esi)
+ movl %edx,52(%esi)
+
+ movl 28(%edi),%eax
+ mull %eax
+ movl %eax,56(%esi)
+ movl %edx,60(%esi)
+
+ addl $32,%edi
+ addl $64,%esi
+ subl $8,%ebx
+ jnz .L021sw_loop
+.L020sw_finish:
+ movl 28(%esp),%ebx
+ andl $7,%ebx
+ jz .L022sw_end
+
+ movl (%edi),%eax
+ mull %eax
+ movl %eax,(%esi)
+ decl %ebx
+ movl %edx,4(%esi)
+ jz .L022sw_end
+
+ movl 4(%edi),%eax
+ mull %eax
+ movl %eax,8(%esi)
+ decl %ebx
+ movl %edx,12(%esi)
+ jz .L022sw_end
+
+ movl 8(%edi),%eax
+ mull %eax
+ movl %eax,16(%esi)
+ decl %ebx
+ movl %edx,20(%esi)
+ jz .L022sw_end
+
+ movl 12(%edi),%eax
+ mull %eax
+ movl %eax,24(%esi)
+ decl %ebx
+ movl %edx,28(%esi)
+ jz .L022sw_end
+
+ movl 16(%edi),%eax
+ mull %eax
+ movl %eax,32(%esi)
+ decl %ebx
+ movl %edx,36(%esi)
+ jz .L022sw_end
+
+ movl 20(%edi),%eax
+ mull %eax
+ movl %eax,40(%esi)
+ decl %ebx
+ movl %edx,44(%esi)
+ jz .L022sw_end
+
+ movl 24(%edi),%eax
+ mull %eax
+ movl %eax,48(%esi)
+ movl %edx,52(%esi)
+.L022sw_end:
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.size bn_sqr_words,.-.L_bn_sqr_words_begin
+.globl bn_div_words
+.type bn_div_words,@function
+.align 16
+bn_div_words:
+.L_bn_div_words_begin:
+ movl 4(%esp),%edx
+ movl 8(%esp),%eax
+ movl 12(%esp),%ecx
+ divl %ecx
+ ret
+.size bn_div_words,.-.L_bn_div_words_begin
+.globl bn_add_words
+.type bn_add_words,@function
+.align 16
+bn_add_words:
+.L_bn_add_words_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+
+ movl 20(%esp),%ebx
+ movl 24(%esp),%esi
+ movl 28(%esp),%edi
+ movl 32(%esp),%ebp
+ xorl %eax,%eax
+ andl $4294967288,%ebp
+ jz .L023aw_finish
+.L024aw_loop:
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,(%ebx)
+
+ movl 4(%esi),%ecx
+ movl 4(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,4(%ebx)
+
+ movl 8(%esi),%ecx
+ movl 8(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,8(%ebx)
+
+ movl 12(%esi),%ecx
+ movl 12(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,12(%ebx)
+
+ movl 16(%esi),%ecx
+ movl 16(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,16(%ebx)
+
+ movl 20(%esi),%ecx
+ movl 20(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,20(%ebx)
+
+ movl 24(%esi),%ecx
+ movl 24(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,24(%ebx)
+
+ movl 28(%esi),%ecx
+ movl 28(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,28(%ebx)
+
+ addl $32,%esi
+ addl $32,%edi
+ addl $32,%ebx
+ subl $8,%ebp
+ jnz .L024aw_loop
+.L023aw_finish:
+ movl 32(%esp),%ebp
+ andl $7,%ebp
+ jz .L025aw_end
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,(%ebx)
+ jz .L025aw_end
+
+ movl 4(%esi),%ecx
+ movl 4(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,4(%ebx)
+ jz .L025aw_end
+
+ movl 8(%esi),%ecx
+ movl 8(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,8(%ebx)
+ jz .L025aw_end
+
+ movl 12(%esi),%ecx
+ movl 12(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,12(%ebx)
+ jz .L025aw_end
+
+ movl 16(%esi),%ecx
+ movl 16(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,16(%ebx)
+ jz .L025aw_end
+
+ movl 20(%esi),%ecx
+ movl 20(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,20(%ebx)
+ jz .L025aw_end
+
+ movl 24(%esi),%ecx
+ movl 24(%edi),%edx
+ addl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ addl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,24(%ebx)
+.L025aw_end:
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.size bn_add_words,.-.L_bn_add_words_begin
+.globl bn_sub_words
+.type bn_sub_words,@function
+.align 16
+bn_sub_words:
+.L_bn_sub_words_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+
+ movl 20(%esp),%ebx
+ movl 24(%esp),%esi
+ movl 28(%esp),%edi
+ movl 32(%esp),%ebp
+ xorl %eax,%eax
+ andl $4294967288,%ebp
+ jz .L026aw_finish
+.L027aw_loop:
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,(%ebx)
+
+ movl 4(%esi),%ecx
+ movl 4(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,4(%ebx)
+
+ movl 8(%esi),%ecx
+ movl 8(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,8(%ebx)
+
+ movl 12(%esi),%ecx
+ movl 12(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,12(%ebx)
+
+ movl 16(%esi),%ecx
+ movl 16(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,16(%ebx)
+
+ movl 20(%esi),%ecx
+ movl 20(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,20(%ebx)
+
+ movl 24(%esi),%ecx
+ movl 24(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,24(%ebx)
+
+ movl 28(%esi),%ecx
+ movl 28(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,28(%ebx)
+
+ addl $32,%esi
+ addl $32,%edi
+ addl $32,%ebx
+ subl $8,%ebp
+ jnz .L027aw_loop
+.L026aw_finish:
+ movl 32(%esp),%ebp
+ andl $7,%ebp
+ jz .L028aw_end
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,(%ebx)
+ jz .L028aw_end
+
+ movl 4(%esi),%ecx
+ movl 4(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,4(%ebx)
+ jz .L028aw_end
+
+ movl 8(%esi),%ecx
+ movl 8(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,8(%ebx)
+ jz .L028aw_end
+
+ movl 12(%esi),%ecx
+ movl 12(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,12(%ebx)
+ jz .L028aw_end
+
+ movl 16(%esi),%ecx
+ movl 16(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,16(%ebx)
+ jz .L028aw_end
+
+ movl 20(%esi),%ecx
+ movl 20(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,20(%ebx)
+ jz .L028aw_end
+
+ movl 24(%esi),%ecx
+ movl 24(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,24(%ebx)
+.L028aw_end:
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.size bn_sub_words,.-.L_bn_sub_words_begin
+.globl bn_sub_part_words
+.type bn_sub_part_words,@function
+.align 16
+bn_sub_part_words:
+.L_bn_sub_part_words_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+
+ movl 20(%esp),%ebx
+ movl 24(%esp),%esi
+ movl 28(%esp),%edi
+ movl 32(%esp),%ebp
+ xorl %eax,%eax
+ andl $4294967288,%ebp
+ jz .L029aw_finish
+.L030aw_loop:
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,(%ebx)
+
+ movl 4(%esi),%ecx
+ movl 4(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,4(%ebx)
+
+ movl 8(%esi),%ecx
+ movl 8(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,8(%ebx)
+
+ movl 12(%esi),%ecx
+ movl 12(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,12(%ebx)
+
+ movl 16(%esi),%ecx
+ movl 16(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,16(%ebx)
+
+ movl 20(%esi),%ecx
+ movl 20(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,20(%ebx)
+
+ movl 24(%esi),%ecx
+ movl 24(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,24(%ebx)
+
+ movl 28(%esi),%ecx
+ movl 28(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,28(%ebx)
+
+ addl $32,%esi
+ addl $32,%edi
+ addl $32,%ebx
+ subl $8,%ebp
+ jnz .L030aw_loop
+.L029aw_finish:
+ movl 32(%esp),%ebp
+ andl $7,%ebp
+ jz .L031aw_end
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,(%ebx)
+ addl $4,%esi
+ addl $4,%edi
+ addl $4,%ebx
+ decl %ebp
+ jz .L031aw_end
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,(%ebx)
+ addl $4,%esi
+ addl $4,%edi
+ addl $4,%ebx
+ decl %ebp
+ jz .L031aw_end
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,(%ebx)
+ addl $4,%esi
+ addl $4,%edi
+ addl $4,%ebx
+ decl %ebp
+ jz .L031aw_end
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,(%ebx)
+ addl $4,%esi
+ addl $4,%edi
+ addl $4,%ebx
+ decl %ebp
+ jz .L031aw_end
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,(%ebx)
+ addl $4,%esi
+ addl $4,%edi
+ addl $4,%ebx
+ decl %ebp
+ jz .L031aw_end
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,(%ebx)
+ addl $4,%esi
+ addl $4,%edi
+ addl $4,%ebx
+ decl %ebp
+ jz .L031aw_end
+
+ movl (%esi),%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,(%ebx)
+ addl $4,%esi
+ addl $4,%edi
+ addl $4,%ebx
+.L031aw_end:
+ cmpl $0,36(%esp)
+ je .L032pw_end
+ movl 36(%esp),%ebp
+ cmpl $0,%ebp
+ je .L032pw_end
+ jge .L033pw_pos
+
+ movl $0,%edx
+ subl %ebp,%edx
+ movl %edx,%ebp
+ andl $4294967288,%ebp
+ jz .L034pw_neg_finish
+.L035pw_neg_loop:
+
+ movl $0,%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,(%ebx)
+
+ movl $0,%ecx
+ movl 4(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,4(%ebx)
+
+ movl $0,%ecx
+ movl 8(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,8(%ebx)
+
+ movl $0,%ecx
+ movl 12(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,12(%ebx)
+
+ movl $0,%ecx
+ movl 16(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,16(%ebx)
+
+ movl $0,%ecx
+ movl 20(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,20(%ebx)
+
+ movl $0,%ecx
+ movl 24(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,24(%ebx)
+
+ movl $0,%ecx
+ movl 28(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,28(%ebx)
+
+ addl $32,%edi
+ addl $32,%ebx
+ subl $8,%ebp
+ jnz .L035pw_neg_loop
+.L034pw_neg_finish:
+ movl 36(%esp),%edx
+ movl $0,%ebp
+ subl %edx,%ebp
+ andl $7,%ebp
+ jz .L032pw_end
+
+ movl $0,%ecx
+ movl (%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,(%ebx)
+ jz .L032pw_end
+
+ movl $0,%ecx
+ movl 4(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,4(%ebx)
+ jz .L032pw_end
+
+ movl $0,%ecx
+ movl 8(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,8(%ebx)
+ jz .L032pw_end
+
+ movl $0,%ecx
+ movl 12(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,12(%ebx)
+ jz .L032pw_end
+
+ movl $0,%ecx
+ movl 16(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,16(%ebx)
+ jz .L032pw_end
+
+ movl $0,%ecx
+ movl 20(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ decl %ebp
+ movl %ecx,20(%ebx)
+ jz .L032pw_end
+
+ movl $0,%ecx
+ movl 24(%edi),%edx
+ subl %eax,%ecx
+ movl $0,%eax
+ adcl %eax,%eax
+ subl %edx,%ecx
+ adcl $0,%eax
+ movl %ecx,24(%ebx)
+ jmp .L032pw_end
+.L033pw_pos:
+ andl $4294967288,%ebp
+ jz .L036pw_pos_finish
+.L037pw_pos_loop:
+
+ movl (%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,(%ebx)
+ jnc .L038pw_nc0
+
+ movl 4(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,4(%ebx)
+ jnc .L039pw_nc1
+
+ movl 8(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,8(%ebx)
+ jnc .L040pw_nc2
+
+ movl 12(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,12(%ebx)
+ jnc .L041pw_nc3
+
+ movl 16(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,16(%ebx)
+ jnc .L042pw_nc4
+
+ movl 20(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,20(%ebx)
+ jnc .L043pw_nc5
+
+ movl 24(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,24(%ebx)
+ jnc .L044pw_nc6
+
+ movl 28(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,28(%ebx)
+ jnc .L045pw_nc7
+
+ addl $32,%esi
+ addl $32,%ebx
+ subl $8,%ebp
+ jnz .L037pw_pos_loop
+.L036pw_pos_finish:
+ movl 36(%esp),%ebp
+ andl $7,%ebp
+ jz .L032pw_end
+
+ movl (%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,(%ebx)
+ jnc .L046pw_tail_nc0
+ decl %ebp
+ jz .L032pw_end
+
+ movl 4(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,4(%ebx)
+ jnc .L047pw_tail_nc1
+ decl %ebp
+ jz .L032pw_end
+
+ movl 8(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,8(%ebx)
+ jnc .L048pw_tail_nc2
+ decl %ebp
+ jz .L032pw_end
+
+ movl 12(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,12(%ebx)
+ jnc .L049pw_tail_nc3
+ decl %ebp
+ jz .L032pw_end
+
+ movl 16(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,16(%ebx)
+ jnc .L050pw_tail_nc4
+ decl %ebp
+ jz .L032pw_end
+
+ movl 20(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,20(%ebx)
+ jnc .L051pw_tail_nc5
+ decl %ebp
+ jz .L032pw_end
+
+ movl 24(%esi),%ecx
+ subl %eax,%ecx
+ movl %ecx,24(%ebx)
+ jnc .L052pw_tail_nc6
+ movl $1,%eax
+ jmp .L032pw_end
+.L053pw_nc_loop:
+ movl (%esi),%ecx
+ movl %ecx,(%ebx)
+.L038pw_nc0:
+ movl 4(%esi),%ecx
+ movl %ecx,4(%ebx)
+.L039pw_nc1:
+ movl 8(%esi),%ecx
+ movl %ecx,8(%ebx)
+.L040pw_nc2:
+ movl 12(%esi),%ecx
+ movl %ecx,12(%ebx)
+.L041pw_nc3:
+ movl 16(%esi),%ecx
+ movl %ecx,16(%ebx)
+.L042pw_nc4:
+ movl 20(%esi),%ecx
+ movl %ecx,20(%ebx)
+.L043pw_nc5:
+ movl 24(%esi),%ecx
+ movl %ecx,24(%ebx)
+.L044pw_nc6:
+ movl 28(%esi),%ecx
+ movl %ecx,28(%ebx)
+.L045pw_nc7:
+
+ addl $32,%esi
+ addl $32,%ebx
+ subl $8,%ebp
+ jnz .L053pw_nc_loop
+ movl 36(%esp),%ebp
+ andl $7,%ebp
+ jz .L054pw_nc_end
+ movl (%esi),%ecx
+ movl %ecx,(%ebx)
+.L046pw_tail_nc0:
+ decl %ebp
+ jz .L054pw_nc_end
+ movl 4(%esi),%ecx
+ movl %ecx,4(%ebx)
+.L047pw_tail_nc1:
+ decl %ebp
+ jz .L054pw_nc_end
+ movl 8(%esi),%ecx
+ movl %ecx,8(%ebx)
+.L048pw_tail_nc2:
+ decl %ebp
+ jz .L054pw_nc_end
+ movl 12(%esi),%ecx
+ movl %ecx,12(%ebx)
+.L049pw_tail_nc3:
+ decl %ebp
+ jz .L054pw_nc_end
+ movl 16(%esi),%ecx
+ movl %ecx,16(%ebx)
+.L050pw_tail_nc4:
+ decl %ebp
+ jz .L054pw_nc_end
+ movl 20(%esi),%ecx
+ movl %ecx,20(%ebx)
+.L051pw_tail_nc5:
+ decl %ebp
+ jz .L054pw_nc_end
+ movl 24(%esi),%ecx
+ movl %ecx,24(%ebx)
+.L052pw_tail_nc6:
+.L054pw_nc_end:
+ movl $0,%eax
+.L032pw_end:
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.size bn_sub_part_words,.-.L_bn_sub_part_words_begin
+.comm OPENSSL_ia32cap_P,8,4
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-586.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-586.pl
new file mode 100644
index 00000000..332ef3e9
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-586.pl
@@ -0,0 +1,774 @@
+#!/usr/local/bin/perl
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+$sse2=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+&external_label("OPENSSL_ia32cap_P") if ($sse2);
+
+&bn_mul_add_words("bn_mul_add_words");
+&bn_mul_words("bn_mul_words");
+&bn_sqr_words("bn_sqr_words");
+&bn_div_words("bn_div_words");
+&bn_add_words("bn_add_words");
+&bn_sub_words("bn_sub_words");
+&bn_sub_part_words("bn_sub_part_words");
+
+&asm_finish();
+
+sub bn_mul_add_words
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
+
+ $r="eax";
+ $a="edx";
+ $c="ecx";
+
+ if ($sse2) {
+ &picmeup("eax","OPENSSL_ia32cap_P");
+ &bt(&DWP(0,"eax"),26);
+ &jnc(&label("maw_non_sse2"));
+
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &mov($c,&wparam(2));
+ &movd("mm0",&wparam(3)); # mm0 = w
+ &pxor("mm1","mm1"); # mm1 = carry_in
+ &jmp(&label("maw_sse2_entry"));
+
+ &set_label("maw_sse2_unrolled",16);
+ &movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0]
+ &paddq("mm1","mm3"); # mm1 = carry_in + r[0]
+ &movd("mm2",&DWP(0,$a,"",0)); # mm2 = a[0]
+ &pmuludq("mm2","mm0"); # mm2 = w*a[0]
+ &movd("mm4",&DWP(4,$a,"",0)); # mm4 = a[1]
+ &pmuludq("mm4","mm0"); # mm4 = w*a[1]
+ &movd("mm6",&DWP(8,$a,"",0)); # mm6 = a[2]
+ &pmuludq("mm6","mm0"); # mm6 = w*a[2]
+ &movd("mm7",&DWP(12,$a,"",0)); # mm7 = a[3]
+ &pmuludq("mm7","mm0"); # mm7 = w*a[3]
+ &paddq("mm1","mm2"); # mm1 = carry_in + r[0] + w*a[0]
+ &movd("mm3",&DWP(4,$r,"",0)); # mm3 = r[1]
+ &paddq("mm3","mm4"); # mm3 = r[1] + w*a[1]
+ &movd("mm5",&DWP(8,$r,"",0)); # mm5 = r[2]
+ &paddq("mm5","mm6"); # mm5 = r[2] + w*a[2]
+ &movd("mm4",&DWP(12,$r,"",0)); # mm4 = r[3]
+ &paddq("mm7","mm4"); # mm7 = r[3] + w*a[3]
+ &movd(&DWP(0,$r,"",0),"mm1");
+ &movd("mm2",&DWP(16,$a,"",0)); # mm2 = a[4]
+ &pmuludq("mm2","mm0"); # mm2 = w*a[4]
+ &psrlq("mm1",32); # mm1 = carry0
+ &movd("mm4",&DWP(20,$a,"",0)); # mm4 = a[5]
+ &pmuludq("mm4","mm0"); # mm4 = w*a[5]
+ &paddq("mm1","mm3"); # mm1 = carry0 + r[1] + w*a[1]
+ &movd("mm6",&DWP(24,$a,"",0)); # mm6 = a[6]
+ &pmuludq("mm6","mm0"); # mm6 = w*a[6]
+ &movd(&DWP(4,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry1
+ &movd("mm3",&DWP(28,$a,"",0)); # mm3 = a[7]
+ &add($a,32);
+ &pmuludq("mm3","mm0"); # mm3 = w*a[7]
+ &paddq("mm1","mm5"); # mm1 = carry1 + r[2] + w*a[2]
+ &movd("mm5",&DWP(16,$r,"",0)); # mm5 = r[4]
+ &paddq("mm2","mm5"); # mm2 = r[4] + w*a[4]
+ &movd(&DWP(8,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry2
+ &paddq("mm1","mm7"); # mm1 = carry2 + r[3] + w*a[3]
+ &movd("mm5",&DWP(20,$r,"",0)); # mm5 = r[5]
+ &paddq("mm4","mm5"); # mm4 = r[5] + w*a[5]
+ &movd(&DWP(12,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry3
+ &paddq("mm1","mm2"); # mm1 = carry3 + r[4] + w*a[4]
+ &movd("mm5",&DWP(24,$r,"",0)); # mm5 = r[6]
+ &paddq("mm6","mm5"); # mm6 = r[6] + w*a[6]
+ &movd(&DWP(16,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry4
+ &paddq("mm1","mm4"); # mm1 = carry4 + r[5] + w*a[5]
+ &movd("mm5",&DWP(28,$r,"",0)); # mm5 = r[7]
+ &paddq("mm3","mm5"); # mm3 = r[7] + w*a[7]
+ &movd(&DWP(20,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry5
+ &paddq("mm1","mm6"); # mm1 = carry5 + r[6] + w*a[6]
+ &movd(&DWP(24,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry6
+ &paddq("mm1","mm3"); # mm1 = carry6 + r[7] + w*a[7]
+ &movd(&DWP(28,$r,"",0),"mm1");
+ &lea($r,&DWP(32,$r));
+ &psrlq("mm1",32); # mm1 = carry_out
+
+ &sub($c,8);
+ &jz(&label("maw_sse2_exit"));
+ &set_label("maw_sse2_entry");
+ &test($c,0xfffffff8);
+ &jnz(&label("maw_sse2_unrolled"));
+
+ &set_label("maw_sse2_loop",4);
+ &movd("mm2",&DWP(0,$a)); # mm2 = a[i]
+ &movd("mm3",&DWP(0,$r)); # mm3 = r[i]
+ &pmuludq("mm2","mm0"); # a[i] *= w
+ &lea($a,&DWP(4,$a));
+ &paddq("mm1","mm3"); # carry += r[i]
+ &paddq("mm1","mm2"); # carry += a[i]*w
+ &movd(&DWP(0,$r),"mm1"); # r[i] = carry_low
+ &sub($c,1);
+ &psrlq("mm1",32); # carry = carry_high
+ &lea($r,&DWP(4,$r));
+ &jnz(&label("maw_sse2_loop"));
+ &set_label("maw_sse2_exit");
+ &movd("eax","mm1"); # c = carry_out
+ &emms();
+ &ret();
+
+ &set_label("maw_non_sse2",16);
+ }
+
+ # function_begin prologue
+ &push("ebp");
+ &push("ebx");
+ &push("esi");
+ &push("edi");
+
+ &comment("");
+ $Low="eax";
+ $High="edx";
+ $a="ebx";
+ $w="ebp";
+ $r="edi";
+ $c="esi";
+
+ &xor($c,$c); # clear carry
+ &mov($r,&wparam(0)); #
+
+ &mov("ecx",&wparam(2)); #
+ &mov($a,&wparam(1)); #
+
+ &and("ecx",0xfffffff8); # num / 8
+ &mov($w,&wparam(3)); #
+
+ &push("ecx"); # Up the stack for a tmp variable
+
+ &jz(&label("maw_finish"));
+
+ &set_label("maw_loop",16);
+
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+
+ &mov("eax",&DWP($i,$a)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+= c
+ &adc("edx",0); # H(t)+=carry
+ &add("eax",&DWP($i,$r)); # L(t)+= *r
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i,$r),"eax"); # *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ }
+
+ &comment("");
+ &sub("ecx",8);
+ &lea($a,&DWP(32,$a));
+ &lea($r,&DWP(32,$r));
+ &jnz(&label("maw_loop"));
+
+ &set_label("maw_finish",0);
+ &mov("ecx",&wparam(2)); # get num
+ &and("ecx",7);
+ &jnz(&label("maw_finish2")); # helps branch prediction
+ &jmp(&label("maw_end"));
+
+ &set_label("maw_finish2",1);
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ &adc("edx",0); # H(t)+=carry
+ &add("eax",&DWP($i*4,$r)); # L(t)+= *r
+ &adc("edx",0); # H(t)+=carry
+ &dec("ecx") if ($i != 7-1);
+ &mov(&DWP($i*4,$r),"eax"); # *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ &jz(&label("maw_end")) if ($i != 7-1);
+ }
+ &set_label("maw_end",0);
+ &mov("eax",$c);
+
+ &pop("ecx"); # clear variable from
+
+ &function_end($name);
+ }
+
+sub bn_mul_words
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
+
+ $r="eax";
+ $a="edx";
+ $c="ecx";
+
+ if ($sse2) {
+ &picmeup("eax","OPENSSL_ia32cap_P");
+ &bt(&DWP(0,"eax"),26);
+ &jnc(&label("mw_non_sse2"));
+
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &mov($c,&wparam(2));
+ &movd("mm0",&wparam(3)); # mm0 = w
+ &pxor("mm1","mm1"); # mm1 = carry = 0
+
+ &set_label("mw_sse2_loop",16);
+ &movd("mm2",&DWP(0,$a)); # mm2 = a[i]
+ &pmuludq("mm2","mm0"); # a[i] *= w
+ &lea($a,&DWP(4,$a));
+ &paddq("mm1","mm2"); # carry += a[i]*w
+ &movd(&DWP(0,$r),"mm1"); # r[i] = carry_low
+ &sub($c,1);
+ &psrlq("mm1",32); # carry = carry_high
+ &lea($r,&DWP(4,$r));
+ &jnz(&label("mw_sse2_loop"));
+
+ &movd("eax","mm1"); # return carry
+ &emms();
+ &ret();
+ &set_label("mw_non_sse2",16);
+ }
+
+ # function_begin prologue
+ &push("ebp");
+ &push("ebx");
+ &push("esi");
+ &push("edi");
+
+ &comment("");
+ $Low="eax";
+ $High="edx";
+ $a="ebx";
+ $w="ecx";
+ $r="edi";
+ $c="esi";
+ $num="ebp";
+
+ &xor($c,$c); # clear carry
+ &mov($r,&wparam(0)); #
+ &mov($a,&wparam(1)); #
+ &mov($num,&wparam(2)); #
+ &mov($w,&wparam(3)); #
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("mw_finish"));
+
+ &set_label("mw_loop",0);
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ # XXX
+
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
+
+ &mov($c,"edx"); # c= H(t);
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,32);
+ &sub($num,8);
+ &jz(&label("mw_finish"));
+ &jmp(&label("mw_loop"));
+
+ &set_label("mw_finish",0);
+ &mov($num,&wparam(2)); # get num
+ &and($num,7);
+ &jnz(&label("mw_finish2"));
+ &jmp(&label("mw_end"));
+
+ &set_label("mw_finish2",1);
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0));# *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ # XXX
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ &dec($num) if ($i != 7-1);
+ &jz(&label("mw_end")) if ($i != 7-1);
+ }
+ &set_label("mw_end",0);
+ &mov("eax",$c);
+
+ &function_end($name);
+ }
+
+sub bn_sqr_words
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
+
+ $r="eax";
+ $a="edx";
+ $c="ecx";
+
+ if ($sse2) {
+ &picmeup("eax","OPENSSL_ia32cap_P");
+ &bt(&DWP(0,"eax"),26);
+ &jnc(&label("sqr_non_sse2"));
+
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &mov($c,&wparam(2));
+
+ &set_label("sqr_sse2_loop",16);
+ &movd("mm0",&DWP(0,$a)); # mm0 = a[i]
+ &pmuludq("mm0","mm0"); # a[i] *= a[i]
+ &lea($a,&DWP(4,$a)); # a++
+ &movq(&QWP(0,$r),"mm0"); # r[i] = a[i]*a[i]
+ &sub($c,1);
+ &lea($r,&DWP(8,$r)); # r += 2
+ &jnz(&label("sqr_sse2_loop"));
+
+ &emms();
+ &ret();
+ &set_label("sqr_non_sse2",16);
+ }
+
+ # function_begin prologue
+ &push("ebp");
+ &push("ebx");
+ &push("esi");
+ &push("edi");
+
+ &comment("");
+ $r="esi";
+ $a="edi";
+ $num="ebx";
+
+ &mov($r,&wparam(0)); #
+ &mov($a,&wparam(1)); #
+ &mov($num,&wparam(2)); #
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("sw_finish"));
+
+ &set_label("sw_loop",0);
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ # XXX
+ &mul("eax"); # *a * *a
+ &mov(&DWP($i*2,$r,"",0),"eax"); #
+ &mov(&DWP($i*2+4,$r,"",0),"edx");#
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,64);
+ &sub($num,8);
+ &jnz(&label("sw_loop"));
+
+ &set_label("sw_finish",0);
+ &mov($num,&wparam(2)); # get num
+ &and($num,7);
+ &jz(&label("sw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0)); # *a
+ # XXX
+ &mul("eax"); # *a * *a
+ &mov(&DWP($i*8,$r,"",0),"eax"); #
+ &dec($num) if ($i != 7-1);
+ &mov(&DWP($i*8+4,$r,"",0),"edx");
+ &jz(&label("sw_end")) if ($i != 7-1);
+ }
+ &set_label("sw_end",0);
+
+ &function_end($name);
+ }
+
+sub bn_div_words
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,"");
+ &mov("edx",&wparam(0)); #
+ &mov("eax",&wparam(1)); #
+ &mov("ecx",&wparam(2)); #
+ &div("ecx");
+ &ret();
+ &function_end_B($name);
+ }
+
+sub bn_add_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &add($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &add($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &add($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &add($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
+sub bn_sub_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
+sub bn_sub_part_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP(0,$a,"",0)); # *a
+ &mov($tmp2,&DWP(0,$b,"",0));# *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP(0,$r,"",0),$tmp1); # *r
+ &add($a, 4);
+ &add($b, 4);
+ &add($r, 4);
+ &dec($num) if ($i != 6);
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+ &cmp(&wparam(4),0);
+ &je(&label("pw_end"));
+
+ &mov($num,&wparam(4)); # get dl
+ &cmp($num,0);
+ &je(&label("pw_end"));
+ &jge(&label("pw_pos"));
+
+ &comment("pw_neg");
+ &mov($tmp2,0);
+ &sub($tmp2,$num);
+ &mov($num,$tmp2);
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("pw_neg_finish"));
+
+ &set_label("pw_neg_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("dl<0 Round $i");
+
+ &mov($tmp1,0);
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("pw_neg_loop"));
+
+ &set_label("pw_neg_finish",0);
+ &mov($tmp2,&wparam(4)); # get dl
+ &mov($num,0);
+ &sub($num,$tmp2);
+ &and($num,7);
+ &jz(&label("pw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("dl<0 Tail Round $i");
+ &mov($tmp1,0);
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jz(&label("pw_end")) if ($i != 6);
+ }
+
+ &jmp(&label("pw_end"));
+
+ &set_label("pw_pos",0);
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("pw_pos_finish"));
+
+ &set_label("pw_pos_loop",0);
+
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("dl>0 Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &sub($tmp1,$c);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jnc(&label("pw_nc".$i));
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("pw_pos_loop"));
+
+ &set_label("pw_pos_finish",0);
+ &mov($num,&wparam(4)); # get dl
+ &and($num,7);
+ &jz(&label("pw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("dl>0 Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &sub($tmp1,$c);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jnc(&label("pw_tail_nc".$i));
+ &dec($num) if ($i != 6);
+ &jz(&label("pw_end")) if ($i != 6);
+ }
+ &mov($c,1);
+ &jmp(&label("pw_end"));
+
+ &set_label("pw_nc_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &set_label("pw_nc".$i,0);
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("pw_nc_loop"));
+
+ &mov($num,&wparam(4)); # get dl
+ &and($num,7);
+ &jz(&label("pw_nc_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &set_label("pw_tail_nc".$i,0);
+ &dec($num) if ($i != 6);
+ &jz(&label("pw_nc_end")) if ($i != 6);
+ }
+
+ &set_label("pw_nc_end",0);
+ &mov($c,0);
+
+ &set_label("pw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-mips.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-mips.S
new file mode 100644
index 00000000..2e7cccb7
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/bn-mips.S
@@ -0,0 +1,2175 @@
+.set mips2
+.rdata
+.asciiz "mips3.s, Version 1.2"
+.asciiz "MIPS II/III/IV ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+.text
+.set noat
+
+.align 5
+.globl bn_mul_add_words
+.ent bn_mul_add_words
+bn_mul_add_words:
+ .set noreorder
+ bgtz $6,bn_mul_add_words_internal
+ move $2,$0
+ jr $31
+ move $4,$2
+.end bn_mul_add_words
+
+.align 5
+.ent bn_mul_add_words_internal
+bn_mul_add_words_internal:
+ .set reorder
+ li $3,-4
+ and $8,$6,$3
+ beqz $8,.L_bn_mul_add_words_tail
+
+.L_bn_mul_add_words_loop:
+ lw $12,0($5)
+ multu $12,$7
+ lw $13,0($4)
+ lw $14,4($5)
+ lw $15,4($4)
+ lw $8,2*4($5)
+ lw $9,2*4($4)
+ addu $13,$2
+ sltu $2,$13,$2 # All manuals say it "compares 32-bit
+ # values", but it seems to work fine
+ # even on 64-bit registers.
+ mflo $1
+ mfhi $12
+ addu $13,$1
+ addu $2,$12
+ multu $14,$7
+ sltu $1,$13,$1
+ sw $13,0($4)
+ addu $2,$1
+
+ lw $10,3*4($5)
+ lw $11,3*4($4)
+ addu $15,$2
+ sltu $2,$15,$2
+ mflo $1
+ mfhi $14
+ addu $15,$1
+ addu $2,$14
+ multu $8,$7
+ sltu $1,$15,$1
+ sw $15,4($4)
+ addu $2,$1
+
+ subu $6,4
+ addu $4,4*4
+ addu $5,4*4
+ addu $9,$2
+ sltu $2,$9,$2
+ mflo $1
+ mfhi $8
+ addu $9,$1
+ addu $2,$8
+ multu $10,$7
+ sltu $1,$9,$1
+ sw $9,-2*4($4)
+ addu $2,$1
+
+
+ and $8,$6,$3
+ addu $11,$2
+ sltu $2,$11,$2
+ mflo $1
+ mfhi $10
+ addu $11,$1
+ addu $2,$10
+ sltu $1,$11,$1
+ sw $11,-4($4)
+ .set noreorder
+ bgtz $8,.L_bn_mul_add_words_loop
+ addu $2,$1
+
+ beqz $6,.L_bn_mul_add_words_return
+ nop
+
+.L_bn_mul_add_words_tail:
+ .set reorder
+ lw $12,0($5)
+ multu $12,$7
+ lw $13,0($4)
+ subu $6,1
+ addu $13,$2
+ sltu $2,$13,$2
+ mflo $1
+ mfhi $12
+ addu $13,$1
+ addu $2,$12
+ sltu $1,$13,$1
+ sw $13,0($4)
+ addu $2,$1
+ beqz $6,.L_bn_mul_add_words_return
+
+ lw $12,4($5)
+ multu $12,$7
+ lw $13,4($4)
+ subu $6,1
+ addu $13,$2
+ sltu $2,$13,$2
+ mflo $1
+ mfhi $12
+ addu $13,$1
+ addu $2,$12
+ sltu $1,$13,$1
+ sw $13,4($4)
+ addu $2,$1
+ beqz $6,.L_bn_mul_add_words_return
+
+ lw $12,2*4($5)
+ multu $12,$7
+ lw $13,2*4($4)
+ addu $13,$2
+ sltu $2,$13,$2
+ mflo $1
+ mfhi $12
+ addu $13,$1
+ addu $2,$12
+ sltu $1,$13,$1
+ sw $13,2*4($4)
+ addu $2,$1
+
+.L_bn_mul_add_words_return:
+ .set noreorder
+ jr $31
+ move $4,$2
+.end bn_mul_add_words_internal
+
+.align 5
+.globl bn_mul_words
+.ent bn_mul_words
+bn_mul_words:
+ .set noreorder
+ bgtz $6,bn_mul_words_internal
+ move $2,$0
+ jr $31
+ move $4,$2
+.end bn_mul_words
+
+.align 5
+.ent bn_mul_words_internal
+bn_mul_words_internal:
+ .set reorder
+ li $3,-4
+ and $8,$6,$3
+ beqz $8,.L_bn_mul_words_tail
+
+.L_bn_mul_words_loop:
+ lw $12,0($5)
+ multu $12,$7
+ lw $14,4($5)
+ lw $8,2*4($5)
+ lw $10,3*4($5)
+ mflo $1
+ mfhi $12
+ addu $2,$1
+ sltu $13,$2,$1
+ multu $14,$7
+ sw $2,0($4)
+ addu $2,$13,$12
+
+ subu $6,4
+ addu $4,4*4
+ addu $5,4*4
+ mflo $1
+ mfhi $14
+ addu $2,$1
+ sltu $15,$2,$1
+ multu $8,$7
+ sw $2,-3*4($4)
+ addu $2,$15,$14
+
+ mflo $1
+ mfhi $8
+ addu $2,$1
+ sltu $9,$2,$1
+ multu $10,$7
+ sw $2,-2*4($4)
+ addu $2,$9,$8
+
+ and $8,$6,$3
+ mflo $1
+ mfhi $10
+ addu $2,$1
+ sltu $11,$2,$1
+ sw $2,-4($4)
+ .set noreorder
+ bgtz $8,.L_bn_mul_words_loop
+ addu $2,$11,$10
+
+ beqz $6,.L_bn_mul_words_return
+ nop
+
+.L_bn_mul_words_tail:
+ .set reorder
+ lw $12,0($5)
+ multu $12,$7
+ subu $6,1
+ mflo $1
+ mfhi $12
+ addu $2,$1
+ sltu $13,$2,$1
+ sw $2,0($4)
+ addu $2,$13,$12
+ beqz $6,.L_bn_mul_words_return
+
+ lw $12,4($5)
+ multu $12,$7
+ subu $6,1
+ mflo $1
+ mfhi $12
+ addu $2,$1
+ sltu $13,$2,$1
+ sw $2,4($4)
+ addu $2,$13,$12
+ beqz $6,.L_bn_mul_words_return
+
+ lw $12,2*4($5)
+ multu $12,$7
+ mflo $1
+ mfhi $12
+ addu $2,$1
+ sltu $13,$2,$1
+ sw $2,2*4($4)
+ addu $2,$13,$12
+
+.L_bn_mul_words_return:
+ .set noreorder
+ jr $31
+ move $4,$2
+.end bn_mul_words_internal
+
+.align 5
+.globl bn_sqr_words
+.ent bn_sqr_words
+bn_sqr_words:
+ .set noreorder
+ bgtz $6,bn_sqr_words_internal
+ move $2,$0
+ jr $31
+ move $4,$2
+.end bn_sqr_words
+
+.align 5
+.ent bn_sqr_words_internal
+bn_sqr_words_internal:
+ .set reorder
+ li $3,-4
+ and $8,$6,$3
+ beqz $8,.L_bn_sqr_words_tail
+
+.L_bn_sqr_words_loop:
+ lw $12,0($5)
+ multu $12,$12
+ lw $14,4($5)
+ lw $8,2*4($5)
+ lw $10,3*4($5)
+ mflo $13
+ mfhi $12
+ sw $13,0($4)
+ sw $12,4($4)
+
+ multu $14,$14
+ subu $6,4
+ addu $4,8*4
+ addu $5,4*4
+ mflo $15
+ mfhi $14
+ sw $15,-6*4($4)
+ sw $14,-5*4($4)
+
+ multu $8,$8
+ mflo $9
+ mfhi $8
+ sw $9,-4*4($4)
+ sw $8,-3*4($4)
+
+
+ multu $10,$10
+ and $8,$6,$3
+ mflo $11
+ mfhi $10
+ sw $11,-2*4($4)
+
+ .set noreorder
+ bgtz $8,.L_bn_sqr_words_loop
+ sw $10,-4($4)
+
+ beqz $6,.L_bn_sqr_words_return
+ nop
+
+.L_bn_sqr_words_tail:
+ .set reorder
+ lw $12,0($5)
+ multu $12,$12
+ subu $6,1
+ mflo $13
+ mfhi $12
+ sw $13,0($4)
+ sw $12,4($4)
+ beqz $6,.L_bn_sqr_words_return
+
+ lw $12,4($5)
+ multu $12,$12
+ subu $6,1
+ mflo $13
+ mfhi $12
+ sw $13,2*4($4)
+ sw $12,3*4($4)
+ beqz $6,.L_bn_sqr_words_return
+
+ lw $12,2*4($5)
+ multu $12,$12
+ mflo $13
+ mfhi $12
+ sw $13,4*4($4)
+ sw $12,5*4($4)
+
+.L_bn_sqr_words_return:
+ .set noreorder
+ jr $31
+ move $4,$2
+
+.end bn_sqr_words_internal
+
+.align 5
+.globl bn_add_words
+.ent bn_add_words
+bn_add_words:
+ .set noreorder
+ bgtz $7,bn_add_words_internal
+ move $2,$0
+ jr $31
+ move $4,$2
+.end bn_add_words
+
+.align 5
+.ent bn_add_words_internal
+bn_add_words_internal:
+ .set reorder
+ li $3,-4
+ and $1,$7,$3
+ beqz $1,.L_bn_add_words_tail
+
+.L_bn_add_words_loop:
+ lw $12,0($5)
+ lw $8,0($6)
+ subu $7,4
+ lw $13,4($5)
+ and $1,$7,$3
+ lw $14,2*4($5)
+ addu $6,4*4
+ lw $15,3*4($5)
+ addu $4,4*4
+ lw $9,-3*4($6)
+ addu $5,4*4
+ lw $10,-2*4($6)
+ lw $11,-4($6)
+ addu $8,$12
+ sltu $24,$8,$12
+ addu $12,$8,$2
+ sltu $2,$12,$8
+ sw $12,-4*4($4)
+ addu $2,$24
+
+ addu $9,$13
+ sltu $25,$9,$13
+ addu $13,$9,$2
+ sltu $2,$13,$9
+ sw $13,-3*4($4)
+ addu $2,$25
+
+ addu $10,$14
+ sltu $24,$10,$14
+ addu $14,$10,$2
+ sltu $2,$14,$10
+ sw $14,-2*4($4)
+ addu $2,$24
+
+ addu $11,$15
+ sltu $25,$11,$15
+ addu $15,$11,$2
+ sltu $2,$15,$11
+ sw $15,-4($4)
+
+ .set noreorder
+ bgtz $1,.L_bn_add_words_loop
+ addu $2,$25
+
+ beqz $7,.L_bn_add_words_return
+ nop
+
+.L_bn_add_words_tail:
+ .set reorder
+ lw $12,0($5)
+ lw $8,0($6)
+ addu $8,$12
+ subu $7,1
+ sltu $24,$8,$12
+ addu $12,$8,$2
+ sltu $2,$12,$8
+ sw $12,0($4)
+ addu $2,$24
+ beqz $7,.L_bn_add_words_return
+
+ lw $13,4($5)
+ lw $9,4($6)
+ addu $9,$13
+ subu $7,1
+ sltu $25,$9,$13
+ addu $13,$9,$2
+ sltu $2,$13,$9
+ sw $13,4($4)
+ addu $2,$25
+ beqz $7,.L_bn_add_words_return
+
+ lw $14,2*4($5)
+ lw $10,2*4($6)
+ addu $10,$14
+ sltu $24,$10,$14
+ addu $14,$10,$2
+ sltu $2,$14,$10
+ sw $14,2*4($4)
+ addu $2,$24
+
+.L_bn_add_words_return:
+ .set noreorder
+ jr $31
+ move $4,$2
+
+.end bn_add_words_internal
+
+.align 5
+.globl bn_sub_words
+.ent bn_sub_words
+bn_sub_words:
+ .set noreorder
+ bgtz $7,bn_sub_words_internal
+ move $2,$0
+ jr $31
+ move $4,$0
+.end bn_sub_words
+
+.align 5
+.ent bn_sub_words_internal
+bn_sub_words_internal:
+ .set reorder
+ li $3,-4
+ and $1,$7,$3
+ beqz $1,.L_bn_sub_words_tail
+
+.L_bn_sub_words_loop:
+ lw $12,0($5)
+ lw $8,0($6)
+ subu $7,4
+ lw $13,4($5)
+ and $1,$7,$3
+ lw $14,2*4($5)
+ addu $6,4*4
+ lw $15,3*4($5)
+ addu $4,4*4
+ lw $9,-3*4($6)
+ addu $5,4*4
+ lw $10,-2*4($6)
+ lw $11,-4($6)
+ sltu $24,$12,$8
+ subu $8,$12,$8
+ subu $12,$8,$2
+ sgtu $2,$12,$8
+ sw $12,-4*4($4)
+ addu $2,$24
+
+ sltu $25,$13,$9
+ subu $9,$13,$9
+ subu $13,$9,$2
+ sgtu $2,$13,$9
+ sw $13,-3*4($4)
+ addu $2,$25
+
+
+ sltu $24,$14,$10
+ subu $10,$14,$10
+ subu $14,$10,$2
+ sgtu $2,$14,$10
+ sw $14,-2*4($4)
+ addu $2,$24
+
+ sltu $25,$15,$11
+ subu $11,$15,$11
+ subu $15,$11,$2
+ sgtu $2,$15,$11
+ sw $15,-4($4)
+
+ .set noreorder
+ bgtz $1,.L_bn_sub_words_loop
+ addu $2,$25
+
+ beqz $7,.L_bn_sub_words_return
+ nop
+
+.L_bn_sub_words_tail:
+ .set reorder
+ lw $12,0($5)
+ lw $8,0($6)
+ subu $7,1
+ sltu $24,$12,$8
+ subu $8,$12,$8
+ subu $12,$8,$2
+ sgtu $2,$12,$8
+ sw $12,0($4)
+ addu $2,$24
+ beqz $7,.L_bn_sub_words_return
+
+ lw $13,4($5)
+ subu $7,1
+ lw $9,4($6)
+ sltu $25,$13,$9
+ subu $9,$13,$9
+ subu $13,$9,$2
+ sgtu $2,$13,$9
+ sw $13,4($4)
+ addu $2,$25
+ beqz $7,.L_bn_sub_words_return
+
+ lw $14,2*4($5)
+ lw $10,2*4($6)
+ sltu $24,$14,$10
+ subu $10,$14,$10
+ subu $14,$10,$2
+ sgtu $2,$14,$10
+ sw $14,2*4($4)
+ addu $2,$24
+
+.L_bn_sub_words_return:
+ .set noreorder
+ jr $31
+ move $4,$2
+.end bn_sub_words_internal
+
+.align 5
+.globl bn_div_3_words
+.ent bn_div_3_words
+bn_div_3_words:
+ .set noreorder
+ move $7,$4 # we know that bn_div_words does not
+ # touch $7, $10, $11 and preserves $6
+ # so that we can save two arguments
+ # and return address in registers
+ # instead of stack:-)
+
+ lw $4,($7)
+ move $10,$5
+ bne $4,$6,bn_div_3_words_internal
+ lw $5,-4($7)
+ li $2,-1
+ jr $31
+ move $4,$2
+.end bn_div_3_words
+
+.align 5
+.ent bn_div_3_words_internal
+bn_div_3_words_internal:
+ .set reorder
+ move $11,$31
+ bal bn_div_words_internal
+ move $31,$11
+ multu $10,$2
+ lw $14,-2*4($7)
+ move $8,$0
+ mfhi $13
+ mflo $12
+ sltu $24,$13,$5
+.L_bn_div_3_words_inner_loop:
+ bnez $24,.L_bn_div_3_words_inner_loop_done
+ sgeu $1,$14,$12
+ seq $25,$13,$5
+ and $1,$25
+ sltu $15,$12,$10
+ addu $5,$6
+ subu $13,$15
+ subu $12,$10
+ sltu $24,$13,$5
+ sltu $8,$5,$6
+ or $24,$8
+ .set noreorder
+ beqz $1,.L_bn_div_3_words_inner_loop
+ subu $2,1
+ addu $2,1
+ .set reorder
+.L_bn_div_3_words_inner_loop_done:
+ .set noreorder
+ jr $31
+ move $4,$2
+.end bn_div_3_words_internal
+
+.align 5
+.globl bn_div_words
+.ent bn_div_words
+bn_div_words:
+ .set noreorder
+ bnez $6,bn_div_words_internal
+ li $2,-1 # I would rather signal div-by-zero
+ # which can be done with 'break 7'
+ jr $31
+ move $4,$2
+.end bn_div_words
+
+.align 5
+.ent bn_div_words_internal
+bn_div_words_internal:
+ move $3,$0
+ bltz $6,.L_bn_div_words_body
+ move $25,$3
+ sll $6,1
+ bgtz $6,.-4
+ addu $25,1
+
+ .set reorder
+ negu $13,$25
+ li $14,-1
+ sll $14,$13
+ and $14,$4
+ srl $1,$5,$13
+ .set noreorder
+ beqz $14,.+12
+ nop
+ break 6 # signal overflow
+ .set reorder
+ sll $4,$25
+ sll $5,$25
+ or $4,$1
+.L_bn_div_words_body:
+ srl $3,$6,4*4 # bits
+ sgeu $1,$4,$6
+ .set noreorder
+ beqz $1,.+12
+ nop
+ subu $4,$6
+ .set reorder
+
+ li $8,-1
+ srl $9,$4,4*4 # bits
+ srl $8,4*4 # q=0xffffffff
+ beq $3,$9,.L_bn_div_words_skip_div1
+ divu $0,$4,$3
+ mflo $8
+.L_bn_div_words_skip_div1:
+ multu $6,$8
+ sll $15,$4,4*4 # bits
+ srl $1,$5,4*4 # bits
+ or $15,$1
+ mflo $12
+ mfhi $13
+.L_bn_div_words_inner_loop1:
+ sltu $14,$15,$12
+ seq $24,$9,$13
+ sltu $1,$9,$13
+ and $14,$24
+ sltu $2,$12,$6
+ or $1,$14
+ .set noreorder
+ beqz $1,.L_bn_div_words_inner_loop1_done
+ subu $13,$2
+ subu $12,$6
+ b .L_bn_div_words_inner_loop1
+ subu $8,1
+ .set reorder
+.L_bn_div_words_inner_loop1_done:
+
+ sll $5,4*4 # bits
+ subu $4,$15,$12
+ sll $2,$8,4*4 # bits
+
+ li $8,-1
+ srl $9,$4,4*4 # bits
+ srl $8,4*4 # q=0xffffffff
+ beq $3,$9,.L_bn_div_words_skip_div2
+ divu $0,$4,$3
+ mflo $8
+.L_bn_div_words_skip_div2:
+ multu $6,$8
+ sll $15,$4,4*4 # bits
+ srl $1,$5,4*4 # bits
+ or $15,$1
+ mflo $12
+ mfhi $13
+.L_bn_div_words_inner_loop2:
+ sltu $14,$15,$12
+ seq $24,$9,$13
+ sltu $1,$9,$13
+ and $14,$24
+ sltu $3,$12,$6
+ or $1,$14
+ .set noreorder
+ beqz $1,.L_bn_div_words_inner_loop2_done
+ subu $13,$3
+ subu $12,$6
+ b .L_bn_div_words_inner_loop2
+ subu $8,1
+ .set reorder
+.L_bn_div_words_inner_loop2_done:
+
+ subu $4,$15,$12
+ or $2,$8
+ srl $3,$4,$25 # $3 contains remainder if anybody wants it
+ srl $6,$25 # restore $6
+
+ .set noreorder
+ move $5,$3
+ jr $31
+ move $4,$2
+.end bn_div_words_internal
+
+.align 5
+.globl bn_mul_comba8
+.ent bn_mul_comba8
+bn_mul_comba8:
+ .set noreorder
+ .frame $29,6*4,$31
+ .mask 0x003f0000,-4
+ subu $29,6*4
+ sw $21,5*4($29)
+ sw $20,4*4($29)
+ sw $19,3*4($29)
+ sw $18,2*4($29)
+ sw $17,1*4($29)
+ sw $16,0*4($29)
+
+ .set reorder
+ lw $12,0($5) # If compiled with -mips3 option on
+ # R5000 box assembler barks on this
+ # 1ine with "should not have mult/div
+ # as last instruction in bb (R10K
+ # bug)" warning. If anybody out there
+ # has a clue about how to circumvent
+ # this do send me a note.
+ # <appro@fy.chalmers.se>
+
+ lw $8,0($6)
+ lw $13,4($5)
+ lw $14,2*4($5)
+ multu $12,$8 # mul_add_c(a[0],b[0],c1,c2,c3);
+ lw $15,3*4($5)
+ lw $9,4($6)
+ lw $10,2*4($6)
+ lw $11,3*4($6)
+ mflo $2
+ mfhi $3
+
+ lw $16,4*4($5)
+ lw $18,5*4($5)
+ multu $12,$9 # mul_add_c(a[0],b[1],c2,c3,c1);
+ lw $20,6*4($5)
+ lw $5,7*4($5)
+ lw $17,4*4($6)
+ lw $19,5*4($6)
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $13,$8 # mul_add_c(a[1],b[0],c2,c3,c1);
+ addu $7,$25,$1
+ lw $21,6*4($6)
+ lw $6,7*4($6)
+ sw $2,0($4) # r[0]=c1;
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $14,$8 # mul_add_c(a[2],b[0],c3,c1,c2);
+ addu $25,$1
+ addu $7,$25
+ sltu $2,$7,$25
+ sw $3,4($4) # r[1]=c2;
+
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $13,$9 # mul_add_c(a[1],b[1],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $12,$10 # mul_add_c(a[0],b[2],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $3,$2,$25
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $12,$11 # mul_add_c(a[0],b[3],c1,c2,c3);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,2*4($4) # r[2]=c3;
+
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $13,$10 # mul_add_c(a[1],b[2],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $7,$3,$25
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $14,$9 # mul_add_c(a[2],b[1],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $15,$8 # mul_add_c(a[3],b[0],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $16,$8 # mul_add_c(a[4],b[0],c2,c3,c1);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ sw $2,3*4($4) # r[3]=c1;
+
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $15,$9 # mul_add_c(a[3],b[1],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $2,$7,$25
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $14,$10 # mul_add_c(a[2],b[2],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $13,$11 # mul_add_c(a[1],b[3],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $12,$17 # mul_add_c(a[0],b[4],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $12,$19 # mul_add_c(a[0],b[5],c3,c1,c2);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ sw $3,4*4($4) # r[4]=c2;
+
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $13,$17 # mul_add_c(a[1],b[4],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $3,$2,$25
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $14,$11 # mul_add_c(a[2],b[3],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $15,$10 # mul_add_c(a[3],b[2],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $16,$9 # mul_add_c(a[4],b[1],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $18,$8 # mul_add_c(a[5],b[0],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $20,$8 # mul_add_c(a[6],b[0],c1,c2,c3);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,5*4($4) # r[5]=c3;
+
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $18,$9 # mul_add_c(a[5],b[1],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $7,$3,$25
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $16,$10 # mul_add_c(a[4],b[2],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $15,$11 # mul_add_c(a[3],b[3],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $14,$17 # mul_add_c(a[2],b[4],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $13,$19 # mul_add_c(a[1],b[5],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $12,$21 # mul_add_c(a[0],b[6],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $12,$6 # mul_add_c(a[0],b[7],c2,c3,c1);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ sw $2,6*4($4) # r[6]=c1;
+
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $13,$21 # mul_add_c(a[1],b[6],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $2,$7,$25
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $14,$19 # mul_add_c(a[2],b[5],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $15,$17 # mul_add_c(a[3],b[4],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $16,$11 # mul_add_c(a[4],b[3],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $18,$10 # mul_add_c(a[5],b[2],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $20,$9 # mul_add_c(a[6],b[1],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $5,$8 # mul_add_c(a[7],b[0],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $5,$9 # mul_add_c(a[7],b[1],c3,c1,c2);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ sw $3,7*4($4) # r[7]=c2;
+
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $20,$10 # mul_add_c(a[6],b[2],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $3,$2,$25
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $18,$11 # mul_add_c(a[5],b[3],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $16,$17 # mul_add_c(a[4],b[4],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $15,$19 # mul_add_c(a[3],b[5],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $14,$21 # mul_add_c(a[2],b[6],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $13,$6 # mul_add_c(a[1],b[7],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $14,$6 # mul_add_c(a[2],b[7],c1,c2,c3);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,8*4($4) # r[8]=c3;
+
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $15,$21 # mul_add_c(a[3],b[6],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $7,$3,$25
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $16,$19 # mul_add_c(a[4],b[5],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $18,$17 # mul_add_c(a[5],b[4],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $20,$11 # mul_add_c(a[6],b[3],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $5,$10 # mul_add_c(a[7],b[2],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $5,$11 # mul_add_c(a[7],b[3],c2,c3,c1);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ sw $2,9*4($4) # r[9]=c1;
+
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $20,$17 # mul_add_c(a[6],b[4],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $2,$7,$25
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $18,$19 # mul_add_c(a[5],b[5],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $16,$21 # mul_add_c(a[4],b[6],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $15,$6 # mul_add_c(a[3],b[7],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $16,$6 # mul_add_c(a[4],b[7],c3,c1,c2);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ sw $3,10*4($4) # r[10]=c2;
+
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $18,$21 # mul_add_c(a[5],b[6],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $3,$2,$25
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $20,$19 # mul_add_c(a[6],b[5],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $5,$17 # mul_add_c(a[7],b[4],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $5,$19 # mul_add_c(a[7],b[5],c1,c2,c3);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,11*4($4) # r[11]=c3;
+
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $20,$21 # mul_add_c(a[6],b[6],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $7,$3,$25
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $18,$6 # mul_add_c(a[5],b[7],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $20,$6 # mul_add_c(a[6],b[7],c2,c3,c1);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ sw $2,12*4($4) # r[12]=c1;
+
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $5,$21 # mul_add_c(a[7],b[6],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $2,$7,$25
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $5,$6 # mul_add_c(a[7],b[7],c3,c1,c2);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ sw $3,13*4($4) # r[13]=c2;
+
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sw $7,14*4($4) # r[14]=c3;
+ sw $2,15*4($4) # r[15]=c1;
+
+ .set noreorder
+ lw $21,5*4($29)
+ lw $20,4*4($29)
+ lw $19,3*4($29)
+ lw $18,2*4($29)
+ lw $17,1*4($29)
+ lw $16,0*4($29)
+ jr $31
+ addu $29,6*4
+.end bn_mul_comba8
+
+.align 5
+.globl bn_mul_comba4
+.ent bn_mul_comba4
+bn_mul_comba4:
+ .set reorder
+ lw $12,0($5)
+ lw $8,0($6)
+ lw $13,4($5)
+ lw $14,2*4($5)
+ multu $12,$8 # mul_add_c(a[0],b[0],c1,c2,c3);
+ lw $15,3*4($5)
+ lw $9,4($6)
+ lw $10,2*4($6)
+ lw $11,3*4($6)
+ mflo $2
+ mfhi $3
+ sw $2,0($4)
+
+ multu $12,$9 # mul_add_c(a[0],b[1],c2,c3,c1);
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $13,$8 # mul_add_c(a[1],b[0],c2,c3,c1);
+ addu $7,$25,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $14,$8 # mul_add_c(a[2],b[0],c3,c1,c2);
+ addu $25,$1
+ addu $7,$25
+ sltu $2,$7,$25
+ sw $3,4($4)
+
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $13,$9 # mul_add_c(a[1],b[1],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $12,$10 # mul_add_c(a[0],b[2],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $3,$2,$25
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $12,$11 # mul_add_c(a[0],b[3],c1,c2,c3);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,2*4($4)
+
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $13,$10 # mul_add_c(a[1],b[2],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $7,$3,$25
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $14,$9 # mul_add_c(a[2],b[1],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $15,$8 # mul_add_c(a[3],b[0],c1,c2,c3);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $15,$9 # mul_add_c(a[3],b[1],c2,c3,c1);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ sw $2,3*4($4)
+
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $14,$10 # mul_add_c(a[2],b[2],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $2,$7,$25
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $13,$11 # mul_add_c(a[1],b[3],c2,c3,c1);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $14,$11 # mul_add_c(a[2],b[3],c3,c1,c2);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ sw $3,4*4($4)
+
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $15,$10 # mul_add_c(a[3],b[2],c3,c1,c2);
+ addu $25,$1
+ addu $2,$25
+ sltu $3,$2,$25
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $15,$11 # mul_add_c(a[3],b[3],c1,c2,c3);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,5*4($4)
+
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sw $2,6*4($4)
+ sw $3,7*4($4)
+
+ .set noreorder
+ jr $31
+ nop
+.end bn_mul_comba4
+
+.align 5
+.globl bn_sqr_comba8
+.ent bn_sqr_comba8
+bn_sqr_comba8:
+ .set reorder
+ lw $12,0($5)
+ lw $13,4($5)
+ lw $14,2*4($5)
+ lw $15,3*4($5)
+
+ multu $12,$12 # mul_add_c(a[0],b[0],c1,c2,c3);
+ lw $8,4*4($5)
+ lw $9,5*4($5)
+ lw $10,6*4($5)
+ lw $11,7*4($5)
+ mflo $2
+ mfhi $3
+ sw $2,0($4)
+
+ multu $12,$13 # mul_add_c2(a[0],b[1],c2,c3,c1);
+ mflo $24
+ mfhi $25
+ slt $2,$25,$0
+ sll $25,1
+ multu $14,$12 # mul_add_c2(a[2],b[0],c3,c1,c2);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $7,$25,$1
+ sw $3,4($4)
+
+ mflo $24
+ mfhi $25
+ slt $3,$25,$0
+ sll $25,1
+ multu $13,$13 # mul_add_c(a[1],b[1],c3,c1,c2);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $12,$15 # mul_add_c2(a[0],b[3],c1,c2,c3);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,2*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $7,$25,$0
+ sll $25,1
+ multu $13,$14 # mul_add_c2(a[1],b[2],c1,c2,c3);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $7,$1
+ multu $8,$12 # mul_add_c2(a[4],b[0],c2,c3,c1);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ sw $2,3*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $2,$25,$0
+ sll $25,1
+ multu $15,$13 # mul_add_c2(a[3],b[1],c2,c3,c1);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $2,$1
+ multu $14,$14 # mul_add_c(a[2],b[2],c2,c3,c1);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $12,$9 # mul_add_c2(a[0],b[5],c3,c1,c2);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ sw $3,4*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $3,$25,$0
+ sll $25,1
+ multu $13,$8 # mul_add_c2(a[1],b[4],c3,c1,c2);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $3,$1
+ multu $14,$15 # mul_add_c2(a[2],b[3],c3,c1,c2);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ multu $10,$12 # mul_add_c2(a[6],b[0],c1,c2,c3);
+ addu $3,$1
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,5*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $7,$25,$0
+ sll $25,1
+ multu $9,$13 # mul_add_c2(a[5],b[1],c1,c2,c3);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $7,$1
+ multu $8,$14 # mul_add_c2(a[4],b[2],c1,c2,c3);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $7,$1
+ multu $15,$15 # mul_add_c(a[3],b[3],c1,c2,c3);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $12,$11 # mul_add_c2(a[0],b[7],c2,c3,c1);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ sw $2,6*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $2,$25,$0
+ sll $25,1
+ multu $13,$10 # mul_add_c2(a[1],b[6],c2,c3,c1);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $2,$1
+ multu $14,$9 # mul_add_c2(a[2],b[5],c2,c3,c1);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $2,$1
+ multu $15,$8 # mul_add_c2(a[3],b[4],c2,c3,c1);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $2,$1
+ multu $11,$13 # mul_add_c2(a[7],b[1],c3,c1,c2);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ sw $3,7*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $3,$25,$0
+ sll $25,1
+ multu $10,$14 # mul_add_c2(a[6],b[2],c3,c1,c2);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $3,$1
+ multu $9,$15 # mul_add_c2(a[5],b[3],c3,c1,c2);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $3,$1
+ multu $8,$8 # mul_add_c(a[4],b[4],c3,c1,c2);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $14,$11 # mul_add_c2(a[2],b[7],c1,c2,c3);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,8*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $7,$25,$0
+ sll $25,1
+ multu $15,$10 # mul_add_c2(a[3],b[6],c1,c2,c3);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $7,$1
+ multu $8,$9 # mul_add_c2(a[4],b[5],c1,c2,c3);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $7,$1
+ multu $11,$15 # mul_add_c2(a[7],b[3],c2,c3,c1);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ sw $2,9*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $2,$25,$0
+ sll $25,1
+ multu $10,$8 # mul_add_c2(a[6],b[4],c2,c3,c1);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $2,$1
+ multu $9,$9 # mul_add_c(a[5],b[5],c2,c3,c1);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $8,$11 # mul_add_c2(a[4],b[7],c3,c1,c2);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ sw $3,10*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $3,$25,$0
+ sll $25,1
+ multu $9,$10 # mul_add_c2(a[5],b[6],c3,c1,c2);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $3,$1
+ multu $11,$9 # mul_add_c2(a[7],b[5],c1,c2,c3);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,11*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $7,$25,$0
+ sll $25,1
+ multu $10,$10 # mul_add_c(a[6],b[6],c1,c2,c3);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ multu $10,$11 # mul_add_c2(a[6],b[7],c2,c3,c1);
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ sw $2,12*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $2,$25,$0
+ sll $25,1
+ multu $11,$11 # mul_add_c(a[7],b[7],c3,c1,c2);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ sw $3,13*4($4)
+
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sw $7,14*4($4)
+ sw $2,15*4($4)
+
+ .set noreorder
+ jr $31
+ nop
+.end bn_sqr_comba8
+
+.align 5
+.globl bn_sqr_comba4
+.ent bn_sqr_comba4
+bn_sqr_comba4:
+ .set reorder
+ lw $12,0($5)
+ lw $13,4($5)
+ multu $12,$12 # mul_add_c(a[0],b[0],c1,c2,c3);
+ lw $14,2*4($5)
+ lw $15,3*4($5)
+ mflo $2
+ mfhi $3
+ sw $2,0($4)
+
+ multu $12,$13 # mul_add_c2(a[0],b[1],c2,c3,c1);
+ mflo $24
+ mfhi $25
+ slt $2,$25,$0
+ sll $25,1
+ multu $14,$12 # mul_add_c2(a[2],b[0],c3,c1,c2);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $7,$25,$1
+ sw $3,4($4)
+
+ mflo $24
+ mfhi $25
+ slt $3,$25,$0
+ sll $25,1
+ multu $13,$13 # mul_add_c(a[1],b[1],c3,c1,c2);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ mflo $24
+ mfhi $25
+ addu $7,$24
+ sltu $1,$7,$24
+ multu $12,$15 # mul_add_c2(a[0],b[3],c1,c2,c3);
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,2*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $7,$25,$0
+ sll $25,1
+ multu $13,$14 # mul_add_c(a2[1],b[2],c1,c2,c3);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ mflo $24
+ mfhi $25
+ slt $1,$25,$0
+ addu $7,$1
+ multu $15,$13 # mul_add_c2(a[3],b[1],c2,c3,c1);
+ sll $25,1
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sltu $1,$3,$25
+ addu $7,$1
+ sw $2,3*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $2,$25,$0
+ sll $25,1
+ multu $14,$14 # mul_add_c(a[2],b[2],c2,c3,c1);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $3,$24
+ sltu $1,$3,$24
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ mflo $24
+ mfhi $25
+ addu $3,$24
+ sltu $1,$3,$24
+ multu $14,$15 # mul_add_c2(a[2],b[3],c3,c1,c2);
+ addu $25,$1
+ addu $7,$25
+ sltu $1,$7,$25
+ addu $2,$1
+ sw $3,4*4($4)
+
+ mflo $24
+ mfhi $25
+ slt $3,$25,$0
+ sll $25,1
+ multu $15,$15 # mul_add_c(a[3],b[3],c1,c2,c3);
+ slt $6,$24,$0
+ addu $25,$6
+ sll $24,1
+ addu $7,$24
+ sltu $1,$7,$24
+ addu $25,$1
+ addu $2,$25
+ sltu $1,$2,$25
+ addu $3,$1
+ sw $7,5*4($4)
+
+ mflo $24
+ mfhi $25
+ addu $2,$24
+ sltu $1,$2,$24
+ addu $25,$1
+ addu $3,$25
+ sw $2,6*4($4)
+ sw $3,7*4($4)
+
+ .set noreorder
+ jr $31
+ nop
+.end bn_sqr_comba4
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/co-586.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/co-586.S
new file mode 100644
index 00000000..3cb80735
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/co-586.S
@@ -0,0 +1,1254 @@
+.file "crypto/bn/asm/co-586.s"
+.text
+.globl bn_mul_comba8
+.type bn_mul_comba8,@function
+.align 16
+bn_mul_comba8:
+.L_bn_mul_comba8_begin:
+ pushl %esi
+ movl 12(%esp),%esi
+ pushl %edi
+ movl 20(%esp),%edi
+ pushl %ebp
+ pushl %ebx
+ xorl %ebx,%ebx
+ movl (%esi),%eax
+ xorl %ecx,%ecx
+ movl (%edi),%edx
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 20(%esp),%eax
+ adcl %edx,%ecx
+ movl (%edi),%edx
+ adcl $0,%ebp
+ movl %ebx,(%eax)
+ movl 4(%esi),%eax
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl (%esi),%eax
+ adcl %edx,%ebp
+ movl 4(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 20(%esp),%eax
+ adcl %edx,%ebp
+ movl (%edi),%edx
+ adcl $0,%ebx
+ movl %ecx,4(%eax)
+ movl 8(%esi),%eax
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 4(%esi),%eax
+ adcl %edx,%ebx
+ movl 4(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl (%esi),%eax
+ adcl %edx,%ebx
+ movl 8(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 20(%esp),%eax
+ adcl %edx,%ebx
+ movl (%edi),%edx
+ adcl $0,%ecx
+ movl %ebp,8(%eax)
+ movl 12(%esi),%eax
+
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 8(%esi),%eax
+ adcl %edx,%ecx
+ movl 4(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 4(%esi),%eax
+ adcl %edx,%ecx
+ movl 8(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl (%esi),%eax
+ adcl %edx,%ecx
+ movl 12(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 20(%esp),%eax
+ adcl %edx,%ecx
+ movl (%edi),%edx
+ adcl $0,%ebp
+ movl %ebx,12(%eax)
+ movl 16(%esi),%eax
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 12(%esi),%eax
+ adcl %edx,%ebp
+ movl 4(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 8(%esi),%eax
+ adcl %edx,%ebp
+ movl 8(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 4(%esi),%eax
+ adcl %edx,%ebp
+ movl 12(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl (%esi),%eax
+ adcl %edx,%ebp
+ movl 16(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 20(%esp),%eax
+ adcl %edx,%ebp
+ movl (%edi),%edx
+ adcl $0,%ebx
+ movl %ecx,16(%eax)
+ movl 20(%esi),%eax
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 16(%esi),%eax
+ adcl %edx,%ebx
+ movl 4(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 12(%esi),%eax
+ adcl %edx,%ebx
+ movl 8(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 8(%esi),%eax
+ adcl %edx,%ebx
+ movl 12(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 4(%esi),%eax
+ adcl %edx,%ebx
+ movl 16(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl (%esi),%eax
+ adcl %edx,%ebx
+ movl 20(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 20(%esp),%eax
+ adcl %edx,%ebx
+ movl (%edi),%edx
+ adcl $0,%ecx
+ movl %ebp,20(%eax)
+ movl 24(%esi),%eax
+
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 20(%esi),%eax
+ adcl %edx,%ecx
+ movl 4(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 16(%esi),%eax
+ adcl %edx,%ecx
+ movl 8(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 12(%esi),%eax
+ adcl %edx,%ecx
+ movl 12(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 8(%esi),%eax
+ adcl %edx,%ecx
+ movl 16(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 4(%esi),%eax
+ adcl %edx,%ecx
+ movl 20(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl (%esi),%eax
+ adcl %edx,%ecx
+ movl 24(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 20(%esp),%eax
+ adcl %edx,%ecx
+ movl (%edi),%edx
+ adcl $0,%ebp
+ movl %ebx,24(%eax)
+ movl 28(%esi),%eax
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 24(%esi),%eax
+ adcl %edx,%ebp
+ movl 4(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 20(%esi),%eax
+ adcl %edx,%ebp
+ movl 8(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 16(%esi),%eax
+ adcl %edx,%ebp
+ movl 12(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 12(%esi),%eax
+ adcl %edx,%ebp
+ movl 16(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 8(%esi),%eax
+ adcl %edx,%ebp
+ movl 20(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 4(%esi),%eax
+ adcl %edx,%ebp
+ movl 24(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl (%esi),%eax
+ adcl %edx,%ebp
+ movl 28(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 20(%esp),%eax
+ adcl %edx,%ebp
+ movl 4(%edi),%edx
+ adcl $0,%ebx
+ movl %ecx,28(%eax)
+ movl 28(%esi),%eax
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 24(%esi),%eax
+ adcl %edx,%ebx
+ movl 8(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 20(%esi),%eax
+ adcl %edx,%ebx
+ movl 12(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 16(%esi),%eax
+ adcl %edx,%ebx
+ movl 16(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 12(%esi),%eax
+ adcl %edx,%ebx
+ movl 20(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 8(%esi),%eax
+ adcl %edx,%ebx
+ movl 24(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 4(%esi),%eax
+ adcl %edx,%ebx
+ movl 28(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 20(%esp),%eax
+ adcl %edx,%ebx
+ movl 8(%edi),%edx
+ adcl $0,%ecx
+ movl %ebp,32(%eax)
+ movl 28(%esi),%eax
+
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 24(%esi),%eax
+ adcl %edx,%ecx
+ movl 12(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 20(%esi),%eax
+ adcl %edx,%ecx
+ movl 16(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 16(%esi),%eax
+ adcl %edx,%ecx
+ movl 20(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 12(%esi),%eax
+ adcl %edx,%ecx
+ movl 24(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 8(%esi),%eax
+ adcl %edx,%ecx
+ movl 28(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 20(%esp),%eax
+ adcl %edx,%ecx
+ movl 12(%edi),%edx
+ adcl $0,%ebp
+ movl %ebx,36(%eax)
+ movl 28(%esi),%eax
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 24(%esi),%eax
+ adcl %edx,%ebp
+ movl 16(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 20(%esi),%eax
+ adcl %edx,%ebp
+ movl 20(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 16(%esi),%eax
+ adcl %edx,%ebp
+ movl 24(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 12(%esi),%eax
+ adcl %edx,%ebp
+ movl 28(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 20(%esp),%eax
+ adcl %edx,%ebp
+ movl 16(%edi),%edx
+ adcl $0,%ebx
+ movl %ecx,40(%eax)
+ movl 28(%esi),%eax
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 24(%esi),%eax
+ adcl %edx,%ebx
+ movl 20(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 20(%esi),%eax
+ adcl %edx,%ebx
+ movl 24(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 16(%esi),%eax
+ adcl %edx,%ebx
+ movl 28(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 20(%esp),%eax
+ adcl %edx,%ebx
+ movl 20(%edi),%edx
+ adcl $0,%ecx
+ movl %ebp,44(%eax)
+ movl 28(%esi),%eax
+
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 24(%esi),%eax
+ adcl %edx,%ecx
+ movl 24(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 20(%esi),%eax
+ adcl %edx,%ecx
+ movl 28(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 20(%esp),%eax
+ adcl %edx,%ecx
+ movl 24(%edi),%edx
+ adcl $0,%ebp
+ movl %ebx,48(%eax)
+ movl 28(%esi),%eax
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 24(%esi),%eax
+ adcl %edx,%ebp
+ movl 28(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 20(%esp),%eax
+ adcl %edx,%ebp
+ movl 28(%edi),%edx
+ adcl $0,%ebx
+ movl %ecx,52(%eax)
+ movl 28(%esi),%eax
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 20(%esp),%eax
+ adcl %edx,%ebx
+ adcl $0,%ecx
+ movl %ebp,56(%eax)
+
+
+ movl %ebx,60(%eax)
+ popl %ebx
+ popl %ebp
+ popl %edi
+ popl %esi
+ ret
+.size bn_mul_comba8,.-.L_bn_mul_comba8_begin
+.globl bn_mul_comba4
+.type bn_mul_comba4,@function
+.align 16
+bn_mul_comba4:
+.L_bn_mul_comba4_begin:
+ pushl %esi
+ movl 12(%esp),%esi
+ pushl %edi
+ movl 20(%esp),%edi
+ pushl %ebp
+ pushl %ebx
+ xorl %ebx,%ebx
+ movl (%esi),%eax
+ xorl %ecx,%ecx
+ movl (%edi),%edx
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 20(%esp),%eax
+ adcl %edx,%ecx
+ movl (%edi),%edx
+ adcl $0,%ebp
+ movl %ebx,(%eax)
+ movl 4(%esi),%eax
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl (%esi),%eax
+ adcl %edx,%ebp
+ movl 4(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 20(%esp),%eax
+ adcl %edx,%ebp
+ movl (%edi),%edx
+ adcl $0,%ebx
+ movl %ecx,4(%eax)
+ movl 8(%esi),%eax
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 4(%esi),%eax
+ adcl %edx,%ebx
+ movl 4(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl (%esi),%eax
+ adcl %edx,%ebx
+ movl 8(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 20(%esp),%eax
+ adcl %edx,%ebx
+ movl (%edi),%edx
+ adcl $0,%ecx
+ movl %ebp,8(%eax)
+ movl 12(%esi),%eax
+
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 8(%esi),%eax
+ adcl %edx,%ecx
+ movl 4(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 4(%esi),%eax
+ adcl %edx,%ecx
+ movl 8(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl (%esi),%eax
+ adcl %edx,%ecx
+ movl 12(%edi),%edx
+ adcl $0,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 20(%esp),%eax
+ adcl %edx,%ecx
+ movl 4(%edi),%edx
+ adcl $0,%ebp
+ movl %ebx,12(%eax)
+ movl 12(%esi),%eax
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 8(%esi),%eax
+ adcl %edx,%ebp
+ movl 8(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 4(%esi),%eax
+ adcl %edx,%ebp
+ movl 12(%edi),%edx
+ adcl $0,%ebx
+
+ mull %edx
+ addl %eax,%ecx
+ movl 20(%esp),%eax
+ adcl %edx,%ebp
+ movl 8(%edi),%edx
+ adcl $0,%ebx
+ movl %ecx,16(%eax)
+ movl 12(%esi),%eax
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 8(%esi),%eax
+ adcl %edx,%ebx
+ movl 12(%edi),%edx
+ adcl $0,%ecx
+
+ mull %edx
+ addl %eax,%ebp
+ movl 20(%esp),%eax
+ adcl %edx,%ebx
+ movl 12(%edi),%edx
+ adcl $0,%ecx
+ movl %ebp,20(%eax)
+ movl 12(%esi),%eax
+
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%ebx
+ movl 20(%esp),%eax
+ adcl %edx,%ecx
+ adcl $0,%ebp
+ movl %ebx,24(%eax)
+
+
+ movl %ecx,28(%eax)
+ popl %ebx
+ popl %ebp
+ popl %edi
+ popl %esi
+ ret
+.size bn_mul_comba4,.-.L_bn_mul_comba4_begin
+.globl bn_sqr_comba8
+.type bn_sqr_comba8,@function
+.align 16
+bn_sqr_comba8:
+.L_bn_sqr_comba8_begin:
+ pushl %esi
+ pushl %edi
+ pushl %ebp
+ pushl %ebx
+ movl 20(%esp),%edi
+ movl 24(%esp),%esi
+ xorl %ebx,%ebx
+ xorl %ecx,%ecx
+ movl (%esi),%eax
+
+ xorl %ebp,%ebp
+
+ mull %eax
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl (%esi),%edx
+ adcl $0,%ebp
+ movl %ebx,(%edi)
+ movl 4(%esi),%eax
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 8(%esi),%eax
+ adcl $0,%ebx
+ movl %ecx,4(%edi)
+ movl (%esi),%edx
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ecx
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 4(%esi),%eax
+ adcl $0,%ecx
+
+ mull %eax
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl (%esi),%edx
+ adcl $0,%ecx
+ movl %ebp,8(%edi)
+ movl 12(%esi),%eax
+
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebp
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 8(%esi),%eax
+ adcl $0,%ebp
+ movl 4(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebp
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 16(%esi),%eax
+ adcl $0,%ebp
+ movl %ebx,12(%edi)
+ movl (%esi),%edx
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 12(%esi),%eax
+ adcl $0,%ebx
+ movl 4(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 8(%esi),%eax
+ adcl $0,%ebx
+
+ mull %eax
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl (%esi),%edx
+ adcl $0,%ebx
+ movl %ecx,16(%edi)
+ movl 20(%esi),%eax
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ecx
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 16(%esi),%eax
+ adcl $0,%ecx
+ movl 4(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ecx
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 12(%esi),%eax
+ adcl $0,%ecx
+ movl 8(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ecx
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 24(%esi),%eax
+ adcl $0,%ecx
+ movl %ebp,20(%edi)
+ movl (%esi),%edx
+
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebp
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 20(%esi),%eax
+ adcl $0,%ebp
+ movl 4(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebp
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 16(%esi),%eax
+ adcl $0,%ebp
+ movl 8(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebp
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 12(%esi),%eax
+ adcl $0,%ebp
+
+ mull %eax
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl (%esi),%edx
+ adcl $0,%ebp
+ movl %ebx,24(%edi)
+ movl 28(%esi),%eax
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 24(%esi),%eax
+ adcl $0,%ebx
+ movl 4(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 20(%esi),%eax
+ adcl $0,%ebx
+ movl 8(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 16(%esi),%eax
+ adcl $0,%ebx
+ movl 12(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 28(%esi),%eax
+ adcl $0,%ebx
+ movl %ecx,28(%edi)
+ movl 4(%esi),%edx
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ecx
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 24(%esi),%eax
+ adcl $0,%ecx
+ movl 8(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ecx
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 20(%esi),%eax
+ adcl $0,%ecx
+ movl 12(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ecx
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 16(%esi),%eax
+ adcl $0,%ecx
+
+ mull %eax
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 8(%esi),%edx
+ adcl $0,%ecx
+ movl %ebp,32(%edi)
+ movl 28(%esi),%eax
+
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebp
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 24(%esi),%eax
+ adcl $0,%ebp
+ movl 12(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebp
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 20(%esi),%eax
+ adcl $0,%ebp
+ movl 16(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebp
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 28(%esi),%eax
+ adcl $0,%ebp
+ movl %ebx,36(%edi)
+ movl 12(%esi),%edx
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 24(%esi),%eax
+ adcl $0,%ebx
+ movl 16(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 20(%esi),%eax
+ adcl $0,%ebx
+
+ mull %eax
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 16(%esi),%edx
+ adcl $0,%ebx
+ movl %ecx,40(%edi)
+ movl 28(%esi),%eax
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ecx
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 24(%esi),%eax
+ adcl $0,%ecx
+ movl 20(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ecx
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 28(%esi),%eax
+ adcl $0,%ecx
+ movl %ebp,44(%edi)
+ movl 20(%esi),%edx
+
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebp
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 24(%esi),%eax
+ adcl $0,%ebp
+
+ mull %eax
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 24(%esi),%edx
+ adcl $0,%ebp
+ movl %ebx,48(%edi)
+ movl 28(%esi),%eax
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 28(%esi),%eax
+ adcl $0,%ebx
+ movl %ecx,52(%edi)
+
+
+ xorl %ecx,%ecx
+
+ mull %eax
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ adcl $0,%ecx
+ movl %ebp,56(%edi)
+
+ movl %ebx,60(%edi)
+ popl %ebx
+ popl %ebp
+ popl %edi
+ popl %esi
+ ret
+.size bn_sqr_comba8,.-.L_bn_sqr_comba8_begin
+.globl bn_sqr_comba4
+.type bn_sqr_comba4,@function
+.align 16
+bn_sqr_comba4:
+.L_bn_sqr_comba4_begin:
+ pushl %esi
+ pushl %edi
+ pushl %ebp
+ pushl %ebx
+ movl 20(%esp),%edi
+ movl 24(%esp),%esi
+ xorl %ebx,%ebx
+ xorl %ecx,%ecx
+ movl (%esi),%eax
+
+ xorl %ebp,%ebp
+
+ mull %eax
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl (%esi),%edx
+ adcl $0,%ebp
+ movl %ebx,(%edi)
+ movl 4(%esi),%eax
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 8(%esi),%eax
+ adcl $0,%ebx
+ movl %ecx,4(%edi)
+ movl (%esi),%edx
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ecx
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 4(%esi),%eax
+ adcl $0,%ecx
+
+ mull %eax
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl (%esi),%edx
+ adcl $0,%ecx
+ movl %ebp,8(%edi)
+ movl 12(%esi),%eax
+
+
+ xorl %ebp,%ebp
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebp
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 8(%esi),%eax
+ adcl $0,%ebp
+ movl 4(%esi),%edx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebp
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ movl 12(%esi),%eax
+ adcl $0,%ebp
+ movl %ebx,12(%edi)
+ movl 4(%esi),%edx
+
+
+ xorl %ebx,%ebx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ebx
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 8(%esi),%eax
+ adcl $0,%ebx
+
+ mull %eax
+ addl %eax,%ecx
+ adcl %edx,%ebp
+ movl 8(%esi),%edx
+ adcl $0,%ebx
+ movl %ecx,16(%edi)
+ movl 12(%esi),%eax
+
+
+ xorl %ecx,%ecx
+
+ mull %edx
+ addl %eax,%eax
+ adcl %edx,%edx
+ adcl $0,%ecx
+ addl %eax,%ebp
+ adcl %edx,%ebx
+ movl 12(%esi),%eax
+ adcl $0,%ecx
+ movl %ebp,20(%edi)
+
+
+ xorl %ebp,%ebp
+
+ mull %eax
+ addl %eax,%ebx
+ adcl %edx,%ecx
+ adcl $0,%ebp
+ movl %ebx,24(%edi)
+
+ movl %ecx,28(%edi)
+ popl %ebx
+ popl %ebp
+ popl %edi
+ popl %esi
+ ret
+.size bn_sqr_comba4,.-.L_bn_sqr_comba4_begin
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/co-586.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/co-586.pl
new file mode 100644
index 00000000..57101a6b
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/co-586.pl
@@ -0,0 +1,287 @@
+#!/usr/local/bin/perl
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+&bn_mul_comba("bn_mul_comba8",8);
+&bn_mul_comba("bn_mul_comba4",4);
+&bn_sqr_comba("bn_sqr_comba8",8);
+&bn_sqr_comba("bn_sqr_comba4",4);
+
+&asm_finish();
+
+sub mul_add_c
+ {
+ local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("mul a[$ai]*b[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$b,"",0));
+
+ &mul("edx");
+ &add($c0,"eax");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # laod next a
+ &mov("eax",&wparam(0)) if $pos > 0; # load r[]
+ ###
+ &adc($c1,"edx");
+ &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0; # laod next b
+ &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b
+ ###
+ &adc($c2,0);
+ # is pos > 1, it means it is the last loop
+ &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[];
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a
+ }
+
+sub sqr_add_c
+ {
+ local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("sqr a[$ai]*a[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$b,"",0));
+
+ if ($ai == $bi)
+ { &mul("eax");}
+ else
+ { &mul("edx");}
+ &add($c0,"eax");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
+ ###
+ &adc($c1,"edx");
+ &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb);
+ ###
+ &adc($c2,0);
+ # is pos > 1, it means it is the last loop
+ &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
+ }
+
+sub sqr_add_c2
+ {
+ local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("sqr a[$ai]*a[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$a,"",0));
+
+ if ($ai == $bi)
+ { &mul("eax");}
+ else
+ { &mul("edx");}
+ &add("eax","eax");
+ ###
+ &adc("edx","edx");
+ ###
+ &adc($c2,0);
+ &add($c0,"eax");
+ &adc($c1,"edx");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
+ &adc($c2,0);
+ &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
+ &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb);
+ ###
+ }
+
+sub bn_mul_comba
+ {
+ local($name,$num)=@_;
+ local($a,$b,$c0,$c1,$c2);
+ local($i,$as,$ae,$bs,$be,$ai,$bi);
+ local($tot,$end);
+
+ &function_begin_B($name,"");
+
+ $c0="ebx";
+ $c1="ecx";
+ $c2="ebp";
+ $a="esi";
+ $b="edi";
+
+ $as=0;
+ $ae=0;
+ $bs=0;
+ $be=0;
+ $tot=$num+$num-1;
+
+ &push("esi");
+ &mov($a,&wparam(1));
+ &push("edi");
+ &mov($b,&wparam(2));
+ &push("ebp");
+ &push("ebx");
+
+ &xor($c0,$c0);
+ &mov("eax",&DWP(0,$a,"",0)); # load the first word
+ &xor($c1,$c1);
+ &mov("edx",&DWP(0,$b,"",0)); # load the first second
+
+ for ($i=0; $i<$tot; $i++)
+ {
+ $ai=$as;
+ $bi=$bs;
+ $end=$be+1;
+
+ &comment("################## Calculate word $i");
+
+ for ($j=$bs; $j<$end; $j++)
+ {
+ &xor($c2,$c2) if ($j == $bs);
+ if (($j+1) == $end)
+ {
+ $v=1;
+ $v=2 if (($i+1) == $tot);
+ }
+ else
+ { $v=0; }
+ if (($j+1) != $end)
+ {
+ $na=($ai-1);
+ $nb=($bi+1);
+ }
+ else
+ {
+ $na=$as+($i < ($num-1));
+ $nb=$bs+($i >= ($num-1));
+ }
+#printf STDERR "[$ai,$bi] -> [$na,$nb]\n";
+ &mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb);
+ if ($v)
+ {
+ &comment("saved r[$i]");
+ # &mov("eax",&wparam(0));
+ # &mov(&DWP($i*4,"eax","",0),$c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ }
+ $ai--;
+ $bi++;
+ }
+ $as++ if ($i < ($num-1));
+ $ae++ if ($i >= ($num-1));
+
+ $bs++ if ($i >= ($num-1));
+ $be++ if ($i < ($num-1));
+ }
+ &comment("save r[$i]");
+ # &mov("eax",&wparam(0));
+ &mov(&DWP($i*4,"eax","",0),$c0);
+
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
+
+sub bn_sqr_comba
+ {
+ local($name,$num)=@_;
+ local($r,$a,$c0,$c1,$c2)=@_;
+ local($i,$as,$ae,$bs,$be,$ai,$bi);
+ local($b,$tot,$end,$half);
+
+ &function_begin_B($name,"");
+
+ $c0="ebx";
+ $c1="ecx";
+ $c2="ebp";
+ $a="esi";
+ $r="edi";
+
+ &push("esi");
+ &push("edi");
+ &push("ebp");
+ &push("ebx");
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &xor($c0,$c0);
+ &xor($c1,$c1);
+ &mov("eax",&DWP(0,$a,"",0)); # load the first word
+
+ $as=0;
+ $ae=0;
+ $bs=0;
+ $be=0;
+ $tot=$num+$num-1;
+
+ for ($i=0; $i<$tot; $i++)
+ {
+ $ai=$as;
+ $bi=$bs;
+ $end=$be+1;
+
+ &comment("############### Calculate word $i");
+ for ($j=$bs; $j<$end; $j++)
+ {
+ &xor($c2,$c2) if ($j == $bs);
+ if (($ai-1) < ($bi+1))
+ {
+ $v=1;
+ $v=2 if ($i+1) == $tot;
+ }
+ else
+ { $v=0; }
+ if (!$v)
+ {
+ $na=$ai-1;
+ $nb=$bi+1;
+ }
+ else
+ {
+ $na=$as+($i < ($num-1));
+ $nb=$bs+($i >= ($num-1));
+ }
+ if ($ai == $bi)
+ {
+ &sqr_add_c($r,$a,$ai,$bi,
+ $c0,$c1,$c2,$v,$i,$na,$nb);
+ }
+ else
+ {
+ &sqr_add_c2($r,$a,$ai,$bi,
+ $c0,$c1,$c2,$v,$i,$na,$nb);
+ }
+ if ($v)
+ {
+ &comment("saved r[$i]");
+ #&mov(&DWP($i*4,$r,"",0),$c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ last;
+ }
+ $ai--;
+ $bi++;
+ }
+ $as++ if ($i < ($num-1));
+ $ae++ if ($i >= ($num-1));
+
+ $bs++ if ($i >= ($num-1));
+ $be++ if ($i < ($num-1));
+ }
+ &mov(&DWP($i*4,$r,"",0),$c0);
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ia64-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ia64-mont.pl
new file mode 100644
index 00000000..e2586584
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ia64-mont.pl
@@ -0,0 +1,851 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# January 2010
+#
+# "Teaser" Montgomery multiplication module for IA-64. There are
+# several possibilities for improvement:
+#
+# - modulo-scheduling outer loop would eliminate quite a number of
+# stalls after ldf8, xma and getf.sig outside inner loop and
+# improve shorter key performance;
+# - shorter vector support [with input vectors being fetched only
+# once] should be added;
+# - 2x unroll with help of n0[1] would make the code scalable on
+# "wider" IA-64, "wider" than Itanium 2 that is, which is not of
+# acute interest, because upcoming Tukwila's individual cores are
+# reportedly based on Itanium 2 design;
+# - dedicated squaring procedure(?);
+#
+# January 2010
+#
+# Shorter vector support is implemented by zero-padding ap and np
+# vectors up to 8 elements, or 512 bits. This means that 256-bit
+# inputs will be processed only 2 times faster than 512-bit inputs,
+# not 4 [as one would expect, because algorithm complexity is n^2].
+# The reason for padding is that inputs shorter than 512 bits won't
+# be processed faster anyway, because minimal critical path of the
+# core loop happens to match 512-bit timing. Either way, it resulted
+# in >100% improvement of 512-bit RSA sign benchmark and 50% - of
+# 1024-bit one [in comparison to original version of *this* module].
+#
+# So far 'openssl speed rsa dsa' output on 900MHz Itanium 2 *with*
+# this module is:
+# sign verify sign/s verify/s
+# rsa 512 bits 0.000290s 0.000024s 3452.8 42031.4
+# rsa 1024 bits 0.000793s 0.000058s 1261.7 17172.0
+# rsa 2048 bits 0.005908s 0.000148s 169.3 6754.0
+# rsa 4096 bits 0.033456s 0.000469s 29.9 2133.6
+# dsa 512 bits 0.000253s 0.000198s 3949.9 5057.0
+# dsa 1024 bits 0.000585s 0.000607s 1708.4 1647.4
+# dsa 2048 bits 0.001453s 0.001703s 688.1 587.4
+#
+# ... and *without* (but still with ia64.S):
+#
+# rsa 512 bits 0.000670s 0.000041s 1491.8 24145.5
+# rsa 1024 bits 0.001988s 0.000080s 502.9 12499.3
+# rsa 2048 bits 0.008702s 0.000189s 114.9 5293.9
+# rsa 4096 bits 0.043860s 0.000533s 22.8 1875.9
+# dsa 512 bits 0.000441s 0.000427s 2265.3 2340.6
+# dsa 1024 bits 0.000823s 0.000867s 1215.6 1153.2
+# dsa 2048 bits 0.001894s 0.002179s 528.1 458.9
+#
+# As it can be seen, RSA sign performance improves by 130-30%,
+# hereafter less for longer keys, while verify - by 74-13%.
+# DSA performance improves by 115-30%.
+
+if ($^O eq "hpux") {
+ $ADDP="addp4";
+ for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
+} else { $ADDP="add"; }
+
+$code=<<___;
+.explicit
+.text
+
+// int bn_mul_mont (BN_ULONG *rp,const BN_ULONG *ap,
+// const BN_ULONG *bp,const BN_ULONG *np,
+// const BN_ULONG *n0p,int num);
+.align 64
+.global bn_mul_mont#
+.proc bn_mul_mont#
+bn_mul_mont:
+ .prologue
+ .body
+{ .mmi; cmp4.le p6,p7=2,r37;;
+(p6) cmp4.lt.unc p8,p9=8,r37
+ mov ret0=r0 };;
+{ .bbb;
+(p9) br.cond.dptk.many bn_mul_mont_8
+(p8) br.cond.dpnt.many bn_mul_mont_general
+(p7) br.ret.spnt.many b0 };;
+.endp bn_mul_mont#
+
+prevfs=r2; prevpr=r3; prevlc=r10; prevsp=r11;
+
+rptr=r8; aptr=r9; bptr=r14; nptr=r15;
+tptr=r16; // &tp[0]
+tp_1=r17; // &tp[-1]
+num=r18; len=r19; lc=r20;
+topbit=r21; // carry bit from tmp[num]
+
+n0=f6;
+m0=f7;
+bi=f8;
+
+.align 64
+.local bn_mul_mont_general#
+.proc bn_mul_mont_general#
+bn_mul_mont_general:
+ .prologue
+{ .mmi; .save ar.pfs,prevfs
+ alloc prevfs=ar.pfs,6,2,0,8
+ $ADDP aptr=0,in1
+ .save ar.lc,prevlc
+ mov prevlc=ar.lc }
+{ .mmi; .vframe prevsp
+ mov prevsp=sp
+ $ADDP bptr=0,in2
+ .save pr,prevpr
+ mov prevpr=pr };;
+
+ .body
+ .rotf alo[6],nlo[4],ahi[8],nhi[6]
+ .rotr a[3],n[3],t[2]
+
+{ .mmi; ldf8 bi=[bptr],8 // (*bp++)
+ ldf8 alo[4]=[aptr],16 // ap[0]
+ $ADDP r30=8,in1 };;
+{ .mmi; ldf8 alo[3]=[r30],16 // ap[1]
+ ldf8 alo[2]=[aptr],16 // ap[2]
+ $ADDP in4=0,in4 };;
+{ .mmi; ldf8 alo[1]=[r30] // ap[3]
+ ldf8 n0=[in4] // n0
+ $ADDP rptr=0,in0 }
+{ .mmi; $ADDP nptr=0,in3
+ mov r31=16
+ zxt4 num=in5 };;
+{ .mmi; ldf8 nlo[2]=[nptr],8 // np[0]
+ shladd len=num,3,r0
+ shladd r31=num,3,r31 };;
+{ .mmi; ldf8 nlo[1]=[nptr],8 // np[1]
+ add lc=-5,num
+ sub r31=sp,r31 };;
+{ .mfb; and sp=-16,r31 // alloca
+ xmpy.hu ahi[2]=alo[4],bi // ap[0]*bp[0]
+ nop.b 0 }
+{ .mfb; nop.m 0
+ xmpy.lu alo[4]=alo[4],bi
+ brp.loop.imp .L1st_ctop,.L1st_cend-16
+ };;
+{ .mfi; nop.m 0
+ xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[0]
+ add tp_1=8,sp }
+{ .mfi; nop.m 0
+ xma.lu alo[3]=alo[3],bi,ahi[2]
+ mov pr.rot=0x20001f<<16
+ // ------^----- (p40) at first (p23)
+ // ----------^^ p[16:20]=1
+ };;
+{ .mfi; nop.m 0
+ xmpy.lu m0=alo[4],n0 // (ap[0]*bp[0])*n0
+ mov ar.lc=lc }
+{ .mfi; nop.m 0
+ fcvt.fxu.s1 nhi[1]=f0
+ mov ar.ec=8 };;
+
+.align 32
+.L1st_ctop:
+.pred.rel "mutex",p40,p42
+{ .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++)
+ (p18) xma.hu ahi[0]=alo[2],bi,ahi[1]
+ (p40) add n[2]=n[2],a[2] } // (p23) }
+{ .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++)(p16)
+ (p18) xma.lu alo[2]=alo[2],bi,ahi[1]
+ (p42) add n[2]=n[2],a[2],1 };; // (p23)
+{ .mfi; (p21) getf.sig a[0]=alo[5]
+ (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1]
+ (p42) cmp.leu p41,p39=n[2],a[2] } // (p23)
+{ .mfi; (p23) st8 [tp_1]=n[2],8
+ (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1]
+ (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23)
+{ .mmb; (p21) getf.sig n[0]=nlo[3]
+ (p16) nop.m 0
+ br.ctop.sptk .L1st_ctop };;
+.L1st_cend:
+
+{ .mmi; getf.sig a[0]=ahi[6] // (p24)
+ getf.sig n[0]=nhi[4]
+ add num=-1,num };; // num--
+{ .mmi; .pred.rel "mutex",p40,p42
+(p40) add n[0]=n[0],a[0]
+(p42) add n[0]=n[0],a[0],1
+ sub aptr=aptr,len };; // rewind
+{ .mmi; .pred.rel "mutex",p40,p42
+(p40) cmp.ltu p41,p39=n[0],a[0]
+(p42) cmp.leu p41,p39=n[0],a[0]
+ sub nptr=nptr,len };;
+{ .mmi; .pred.rel "mutex",p39,p41
+(p39) add topbit=r0,r0
+(p41) add topbit=r0,r0,1
+ nop.i 0 }
+{ .mmi; st8 [tp_1]=n[0]
+ add tptr=16,sp
+ add tp_1=8,sp };;
+
+.Louter:
+{ .mmi; ldf8 bi=[bptr],8 // (*bp++)
+ ldf8 ahi[3]=[tptr] // tp[0]
+ add r30=8,aptr };;
+{ .mmi; ldf8 alo[4]=[aptr],16 // ap[0]
+ ldf8 alo[3]=[r30],16 // ap[1]
+ add r31=8,nptr };;
+{ .mfb; ldf8 alo[2]=[aptr],16 // ap[2]
+ xma.hu ahi[2]=alo[4],bi,ahi[3] // ap[0]*bp[i]+tp[0]
+ brp.loop.imp .Linner_ctop,.Linner_cend-16
+ }
+{ .mfb; ldf8 alo[1]=[r30] // ap[3]
+ xma.lu alo[4]=alo[4],bi,ahi[3]
+ clrrrb.pr };;
+{ .mfi; ldf8 nlo[2]=[nptr],16 // np[0]
+ xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[i]
+ nop.i 0 }
+{ .mfi; ldf8 nlo[1]=[r31] // np[1]
+ xma.lu alo[3]=alo[3],bi,ahi[2]
+ mov pr.rot=0x20101f<<16
+ // ------^----- (p40) at first (p23)
+ // --------^--- (p30) at first (p22)
+ // ----------^^ p[16:20]=1
+ };;
+{ .mfi; st8 [tptr]=r0 // tp[0] is already accounted
+ xmpy.lu m0=alo[4],n0 // (ap[0]*bp[i]+tp[0])*n0
+ mov ar.lc=lc }
+{ .mfi;
+ fcvt.fxu.s1 nhi[1]=f0
+ mov ar.ec=8 };;
+
+// This loop spins in 4*(n+7) ticks on Itanium 2 and should spin in
+// 7*(n+7) ticks on Itanium (the one codenamed Merced). Factor of 7
+// in latter case accounts for two-tick pipeline stall, which means
+// that its performance would be ~20% lower than optimal one. No
+// attempt was made to address this, because original Itanium is
+// hardly represented out in the wild...
+.align 32
+.Linner_ctop:
+.pred.rel "mutex",p40,p42
+.pred.rel "mutex",p30,p32
+{ .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++)
+ (p18) xma.hu ahi[0]=alo[2],bi,ahi[1]
+ (p40) add n[2]=n[2],a[2] } // (p23)
+{ .mfi; (p16) nop.m 0
+ (p18) xma.lu alo[2]=alo[2],bi,ahi[1]
+ (p42) add n[2]=n[2],a[2],1 };; // (p23)
+{ .mfi; (p21) getf.sig a[0]=alo[5]
+ (p16) nop.f 0
+ (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23)
+{ .mfi; (p21) ld8 t[0]=[tptr],8
+ (p16) nop.f 0
+ (p42) cmp.leu p41,p39=n[2],a[2] };; // (p23)
+{ .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++)
+ (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1]
+ (p30) add a[1]=a[1],t[1] } // (p22)
+{ .mfi; (p16) nop.m 0
+ (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1]
+ (p32) add a[1]=a[1],t[1],1 };; // (p22)
+{ .mmi; (p21) getf.sig n[0]=nlo[3]
+ (p16) nop.m 0
+ (p30) cmp.ltu p31,p29=a[1],t[1] } // (p22)
+{ .mmb; (p23) st8 [tp_1]=n[2],8
+ (p32) cmp.leu p31,p29=a[1],t[1] // (p22)
+ br.ctop.sptk .Linner_ctop };;
+.Linner_cend:
+
+{ .mmi; getf.sig a[0]=ahi[6] // (p24)
+ getf.sig n[0]=nhi[4]
+ nop.i 0 };;
+
+{ .mmi; .pred.rel "mutex",p31,p33
+(p31) add a[0]=a[0],topbit
+(p33) add a[0]=a[0],topbit,1
+ mov topbit=r0 };;
+{ .mfi; .pred.rel "mutex",p31,p33
+(p31) cmp.ltu p32,p30=a[0],topbit
+(p33) cmp.leu p32,p30=a[0],topbit
+ }
+{ .mfi; .pred.rel "mutex",p40,p42
+(p40) add n[0]=n[0],a[0]
+(p42) add n[0]=n[0],a[0],1
+ };;
+{ .mmi; .pred.rel "mutex",p44,p46
+(p40) cmp.ltu p41,p39=n[0],a[0]
+(p42) cmp.leu p41,p39=n[0],a[0]
+(p32) add topbit=r0,r0,1 }
+
+{ .mmi; st8 [tp_1]=n[0],8
+ cmp4.ne p6,p0=1,num
+ sub aptr=aptr,len };; // rewind
+{ .mmi; sub nptr=nptr,len
+(p41) add topbit=r0,r0,1
+ add tptr=16,sp }
+{ .mmb; add tp_1=8,sp
+ add num=-1,num // num--
+(p6) br.cond.sptk.many .Louter };;
+
+{ .mbb; add lc=4,lc
+ brp.loop.imp .Lsub_ctop,.Lsub_cend-16
+ clrrrb.pr };;
+{ .mii; nop.m 0
+ mov pr.rot=0x10001<<16
+ // ------^---- (p33) at first (p17)
+ mov ar.lc=lc }
+{ .mii; nop.m 0
+ mov ar.ec=3
+ nop.i 0 };;
+
+.Lsub_ctop:
+.pred.rel "mutex",p33,p35
+{ .mfi; (p16) ld8 t[0]=[tptr],8 // t=*(tp++)
+ (p16) nop.f 0
+ (p33) sub n[1]=t[1],n[1] } // (p17)
+{ .mfi; (p16) ld8 n[0]=[nptr],8 // n=*(np++)
+ (p16) nop.f 0
+ (p35) sub n[1]=t[1],n[1],1 };; // (p17)
+{ .mib; (p18) st8 [rptr]=n[2],8 // *(rp++)=r
+ (p33) cmp.gtu p34,p32=n[1],t[1] // (p17)
+ (p18) nop.b 0 }
+{ .mib; (p18) nop.m 0
+ (p35) cmp.geu p34,p32=n[1],t[1] // (p17)
+ br.ctop.sptk .Lsub_ctop };;
+.Lsub_cend:
+
+{ .mmb; .pred.rel "mutex",p34,p36
+(p34) sub topbit=topbit,r0 // (p19)
+(p36) sub topbit=topbit,r0,1
+ brp.loop.imp .Lcopy_ctop,.Lcopy_cend-16
+ }
+{ .mmb; sub rptr=rptr,len // rewind
+ sub tptr=tptr,len
+ clrrrb.pr };;
+{ .mmi; and aptr=tptr,topbit
+ andcm bptr=rptr,topbit
+ mov pr.rot=1<<16 };;
+{ .mii; or nptr=aptr,bptr
+ mov ar.lc=lc
+ mov ar.ec=3 };;
+
+.Lcopy_ctop:
+{ .mmb; (p16) ld8 n[0]=[nptr],8
+ (p18) st8 [tptr]=r0,8
+ (p16) nop.b 0 }
+{ .mmb; (p16) nop.m 0
+ (p18) st8 [rptr]=n[2],8
+ br.ctop.sptk .Lcopy_ctop };;
+.Lcopy_cend:
+
+{ .mmi; mov ret0=1 // signal "handled"
+ rum 1<<5 // clear um.mfh
+ mov ar.lc=prevlc }
+{ .mib; .restore sp
+ mov sp=prevsp
+ mov pr=prevpr,0x1ffff
+ br.ret.sptk.many b0 };;
+.endp bn_mul_mont_general#
+
+a1=r16; a2=r17; a3=r18; a4=r19; a5=r20; a6=r21; a7=r22; a8=r23;
+n1=r24; n2=r25; n3=r26; n4=r27; n5=r28; n6=r29; n7=r30; n8=r31;
+t0=r15;
+
+ai0=f8; ai1=f9; ai2=f10; ai3=f11; ai4=f12; ai5=f13; ai6=f14; ai7=f15;
+ni0=f16; ni1=f17; ni2=f18; ni3=f19; ni4=f20; ni5=f21; ni6=f22; ni7=f23;
+
+.align 64
+.skip 48 // aligns loop body
+.local bn_mul_mont_8#
+.proc bn_mul_mont_8#
+bn_mul_mont_8:
+ .prologue
+{ .mmi; .save ar.pfs,prevfs
+ alloc prevfs=ar.pfs,6,2,0,8
+ .vframe prevsp
+ mov prevsp=sp
+ .save ar.lc,prevlc
+ mov prevlc=ar.lc }
+{ .mmi; add r17=-6*16,sp
+ add sp=-7*16,sp
+ .save pr,prevpr
+ mov prevpr=pr };;
+
+{ .mmi; .save.gf 0,0x10
+ stf.spill [sp]=f16,-16
+ .save.gf 0,0x20
+ stf.spill [r17]=f17,32
+ add r16=-5*16,prevsp};;
+{ .mmi; .save.gf 0,0x40
+ stf.spill [r16]=f18,32
+ .save.gf 0,0x80
+ stf.spill [r17]=f19,32
+ $ADDP aptr=0,in1 };;
+{ .mmi; .save.gf 0,0x100
+ stf.spill [r16]=f20,32
+ .save.gf 0,0x200
+ stf.spill [r17]=f21,32
+ $ADDP r29=8,in1 };;
+{ .mmi; .save.gf 0,0x400
+ stf.spill [r16]=f22
+ .save.gf 0,0x800
+ stf.spill [r17]=f23
+ $ADDP rptr=0,in0 };;
+
+ .body
+ .rotf bj[8],mj[2],tf[2],alo[10],ahi[10],nlo[10],nhi[10]
+ .rotr t[8]
+
+// load input vectors padding them to 8 elements
+{ .mmi; ldf8 ai0=[aptr],16 // ap[0]
+ ldf8 ai1=[r29],16 // ap[1]
+ $ADDP bptr=0,in2 }
+{ .mmi; $ADDP r30=8,in2
+ $ADDP nptr=0,in3
+ $ADDP r31=8,in3 };;
+{ .mmi; ldf8 bj[7]=[bptr],16 // bp[0]
+ ldf8 bj[6]=[r30],16 // bp[1]
+ cmp4.le p4,p5=3,in5 }
+{ .mmi; ldf8 ni0=[nptr],16 // np[0]
+ ldf8 ni1=[r31],16 // np[1]
+ cmp4.le p6,p7=4,in5 };;
+
+{ .mfi; (p4)ldf8 ai2=[aptr],16 // ap[2]
+ (p5)fcvt.fxu ai2=f0
+ cmp4.le p8,p9=5,in5 }
+{ .mfi; (p6)ldf8 ai3=[r29],16 // ap[3]
+ (p7)fcvt.fxu ai3=f0
+ cmp4.le p10,p11=6,in5 }
+{ .mfi; (p4)ldf8 bj[5]=[bptr],16 // bp[2]
+ (p5)fcvt.fxu bj[5]=f0
+ cmp4.le p12,p13=7,in5 }
+{ .mfi; (p6)ldf8 bj[4]=[r30],16 // bp[3]
+ (p7)fcvt.fxu bj[4]=f0
+ cmp4.le p14,p15=8,in5 }
+{ .mfi; (p4)ldf8 ni2=[nptr],16 // np[2]
+ (p5)fcvt.fxu ni2=f0
+ addp4 r28=-1,in5 }
+{ .mfi; (p6)ldf8 ni3=[r31],16 // np[3]
+ (p7)fcvt.fxu ni3=f0
+ $ADDP in4=0,in4 };;
+
+{ .mfi; ldf8 n0=[in4]
+ fcvt.fxu tf[1]=f0
+ nop.i 0 }
+
+{ .mfi; (p8)ldf8 ai4=[aptr],16 // ap[4]
+ (p9)fcvt.fxu ai4=f0
+ mov t[0]=r0 }
+{ .mfi; (p10)ldf8 ai5=[r29],16 // ap[5]
+ (p11)fcvt.fxu ai5=f0
+ mov t[1]=r0 }
+{ .mfi; (p8)ldf8 bj[3]=[bptr],16 // bp[4]
+ (p9)fcvt.fxu bj[3]=f0
+ mov t[2]=r0 }
+{ .mfi; (p10)ldf8 bj[2]=[r30],16 // bp[5]
+ (p11)fcvt.fxu bj[2]=f0
+ mov t[3]=r0 }
+{ .mfi; (p8)ldf8 ni4=[nptr],16 // np[4]
+ (p9)fcvt.fxu ni4=f0
+ mov t[4]=r0 }
+{ .mfi; (p10)ldf8 ni5=[r31],16 // np[5]
+ (p11)fcvt.fxu ni5=f0
+ mov t[5]=r0 };;
+
+{ .mfi; (p12)ldf8 ai6=[aptr],16 // ap[6]
+ (p13)fcvt.fxu ai6=f0
+ mov t[6]=r0 }
+{ .mfi; (p14)ldf8 ai7=[r29],16 // ap[7]
+ (p15)fcvt.fxu ai7=f0
+ mov t[7]=r0 }
+{ .mfi; (p12)ldf8 bj[1]=[bptr],16 // bp[6]
+ (p13)fcvt.fxu bj[1]=f0
+ mov ar.lc=r28 }
+{ .mfi; (p14)ldf8 bj[0]=[r30],16 // bp[7]
+ (p15)fcvt.fxu bj[0]=f0
+ mov ar.ec=1 }
+{ .mfi; (p12)ldf8 ni6=[nptr],16 // np[6]
+ (p13)fcvt.fxu ni6=f0
+ mov pr.rot=1<<16 }
+{ .mfb; (p14)ldf8 ni7=[r31],16 // np[7]
+ (p15)fcvt.fxu ni7=f0
+ brp.loop.imp .Louter_8_ctop,.Louter_8_cend-16
+ };;
+
+// The loop is scheduled for 32*n ticks on Itanium 2. Actual attempt
+// to measure with help of Interval Time Counter indicated that the
+// factor is a tad higher: 33 or 34, if not 35. Exact measurement and
+// addressing the issue is problematic, because I don't have access
+// to platform-specific instruction-level profiler. On Itanium it
+// should run in 56*n ticks, because of higher xma latency...
+.Louter_8_ctop:
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 0:
+ (p16) xma.hu ahi[0]=ai0,bj[7],tf[1] // ap[0]*b[i]+t[0]
+ (p40) add a3=a3,n3 } // (p17) a3+=n3
+{ .mfi; (p42) add a3=a3,n3,1
+ (p16) xma.lu alo[0]=ai0,bj[7],tf[1]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig a7=alo[8] // 1:
+ (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3
+ (p50) add t[6]=t[6],a3,1 };;
+{ .mfi; (p17) getf.sig a8=ahi[8] // 2:
+ (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0
+ (p40) cmp.ltu p43,p41=a3,n3 }
+{ .mfi; (p42) cmp.leu p43,p41=a3,n3
+ (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n5=nlo[6] // 3:
+ (p48) cmp.ltu p51,p49=t[6],a3
+ (p50) cmp.leu p51,p49=t[6],a3 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p16) nop.m 0 // 4:
+ (p16) xma.hu ahi[1]=ai1,bj[7],ahi[0] // ap[1]*b[i]
+ (p41) add a4=a4,n4 } // (p17) a4+=n4
+{ .mfi; (p43) add a4=a4,n4,1
+ (p16) xma.lu alo[1]=ai1,bj[7],ahi[0]
+ (p16) nop.i 0 };;
+{ .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4
+ (p16) xmpy.lu mj[0]=alo[0],n0 // (ap[0]*b[i]+t[0])*n0
+ (p51) add t[5]=t[5],a4,1 };;
+{ .mfi; (p16) nop.m 0 // 6:
+ (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0
+ (p41) cmp.ltu p42,p40=a4,n4 }
+{ .mfi; (p43) cmp.leu p42,p40=a4,n4
+ (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n6=nlo[7] // 7:
+ (p49) cmp.ltu p50,p48=t[5],a4
+ (p51) cmp.leu p50,p48=t[5],a4 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 8:
+ (p16) xma.hu ahi[2]=ai2,bj[7],ahi[1] // ap[2]*b[i]
+ (p40) add a5=a5,n5 } // (p17) a5+=n5
+{ .mfi; (p42) add a5=a5,n5,1
+ (p16) xma.lu alo[2]=ai2,bj[7],ahi[1]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a1=alo[1] // 9:
+ (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5
+ (p50) add t[4]=t[4],a5,1 };;
+{ .mfi; (p16) nop.m 0 // 10:
+ (p16) xma.hu nhi[0]=ni0,mj[0],alo[0] // np[0]*m0
+ (p40) cmp.ltu p43,p41=a5,n5 }
+{ .mfi; (p42) cmp.leu p43,p41=a5,n5
+ (p16) xma.lu nlo[0]=ni0,mj[0],alo[0]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n7=nlo[8] // 11:
+ (p48) cmp.ltu p51,p49=t[4],a5
+ (p50) cmp.leu p51,p49=t[4],a5 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p17) getf.sig n8=nhi[8] // 12:
+ (p16) xma.hu ahi[3]=ai3,bj[7],ahi[2] // ap[3]*b[i]
+ (p41) add a6=a6,n6 } // (p17) a6+=n6
+{ .mfi; (p43) add a6=a6,n6,1
+ (p16) xma.lu alo[3]=ai3,bj[7],ahi[2]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a2=alo[2] // 13:
+ (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6
+ (p51) add t[3]=t[3],a6,1 };;
+{ .mfi; (p16) nop.m 0 // 14:
+ (p16) xma.hu nhi[1]=ni1,mj[0],nhi[0] // np[1]*m0
+ (p41) cmp.ltu p42,p40=a6,n6 }
+{ .mfi; (p43) cmp.leu p42,p40=a6,n6
+ (p16) xma.lu nlo[1]=ni1,mj[0],nhi[0]
+ (p16) nop.i 0 };;
+{ .mii; (p16) nop.m 0 // 15:
+ (p49) cmp.ltu p50,p48=t[3],a6
+ (p51) cmp.leu p50,p48=t[3],a6 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 16:
+ (p16) xma.hu ahi[4]=ai4,bj[7],ahi[3] // ap[4]*b[i]
+ (p40) add a7=a7,n7 } // (p17) a7+=n7
+{ .mfi; (p42) add a7=a7,n7,1
+ (p16) xma.lu alo[4]=ai4,bj[7],ahi[3]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a3=alo[3] // 17:
+ (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7
+ (p50) add t[2]=t[2],a7,1 };;
+{ .mfi; (p16) nop.m 0 // 18:
+ (p16) xma.hu nhi[2]=ni2,mj[0],nhi[1] // np[2]*m0
+ (p40) cmp.ltu p43,p41=a7,n7 }
+{ .mfi; (p42) cmp.leu p43,p41=a7,n7
+ (p16) xma.lu nlo[2]=ni2,mj[0],nhi[1]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n1=nlo[1] // 19:
+ (p48) cmp.ltu p51,p49=t[2],a7
+ (p50) cmp.leu p51,p49=t[2],a7 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p16) nop.m 0 // 20:
+ (p16) xma.hu ahi[5]=ai5,bj[7],ahi[4] // ap[5]*b[i]
+ (p41) add a8=a8,n8 } // (p17) a8+=n8
+{ .mfi; (p43) add a8=a8,n8,1
+ (p16) xma.lu alo[5]=ai5,bj[7],ahi[4]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a4=alo[4] // 21:
+ (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8
+ (p51) add t[1]=t[1],a8,1 };;
+{ .mfi; (p16) nop.m 0 // 22:
+ (p16) xma.hu nhi[3]=ni3,mj[0],nhi[2] // np[3]*m0
+ (p41) cmp.ltu p42,p40=a8,n8 }
+{ .mfi; (p43) cmp.leu p42,p40=a8,n8
+ (p16) xma.lu nlo[3]=ni3,mj[0],nhi[2]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n2=nlo[2] // 23:
+ (p49) cmp.ltu p50,p48=t[1],a8
+ (p51) cmp.leu p50,p48=t[1],a8 };;
+{ .mfi; (p16) nop.m 0 // 24:
+ (p16) xma.hu ahi[6]=ai6,bj[7],ahi[5] // ap[6]*b[i]
+ (p16) add a1=a1,n1 } // (p16) a1+=n1
+{ .mfi; (p16) nop.m 0
+ (p16) xma.lu alo[6]=ai6,bj[7],ahi[5]
+ (p17) mov t[0]=r0 };;
+{ .mii; (p16) getf.sig a5=alo[5] // 25:
+ (p16) add t0=t[7],a1 // (p16) t[7]+=a1
+ (p42) add t[0]=t[0],r0,1 };;
+{ .mfi; (p16) setf.sig tf[0]=t0 // 26:
+ (p16) xma.hu nhi[4]=ni4,mj[0],nhi[3] // np[4]*m0
+ (p50) add t[0]=t[0],r0,1 }
+{ .mfi; (p16) cmp.ltu.unc p42,p40=a1,n1
+ (p16) xma.lu nlo[4]=ni4,mj[0],nhi[3]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n3=nlo[3] // 27:
+ (p16) cmp.ltu.unc p50,p48=t0,a1
+ (p16) nop.i 0 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 28:
+ (p16) xma.hu ahi[7]=ai7,bj[7],ahi[6] // ap[7]*b[i]
+ (p40) add a2=a2,n2 } // (p16) a2+=n2
+{ .mfi; (p42) add a2=a2,n2,1
+ (p16) xma.lu alo[7]=ai7,bj[7],ahi[6]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a6=alo[6] // 29:
+ (p48) add t[6]=t[6],a2 // (p16) t[6]+=a2
+ (p50) add t[6]=t[6],a2,1 };;
+{ .mfi; (p16) nop.m 0 // 30:
+ (p16) xma.hu nhi[5]=ni5,mj[0],nhi[4] // np[5]*m0
+ (p40) cmp.ltu p41,p39=a2,n2 }
+{ .mfi; (p42) cmp.leu p41,p39=a2,n2
+ (p16) xma.lu nlo[5]=ni5,mj[0],nhi[4]
+ (p16) nop.i 0 };;
+{ .mfi; (p16) getf.sig n4=nlo[4] // 31:
+ (p16) nop.f 0
+ (p48) cmp.ltu p49,p47=t[6],a2 }
+{ .mfb; (p50) cmp.leu p49,p47=t[6],a2
+ (p16) nop.f 0
+ br.ctop.sptk.many .Louter_8_ctop };;
+.Louter_8_cend:
+
+// above loop has to execute one more time, without (p16), which is
+// replaced with merged move of np[8] to GPR bank
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mmi; (p0) getf.sig n1=ni0 // 0:
+ (p40) add a3=a3,n3 // (p17) a3+=n3
+ (p42) add a3=a3,n3,1 };;
+{ .mii; (p17) getf.sig a7=alo[8] // 1:
+ (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3
+ (p50) add t[6]=t[6],a3,1 };;
+{ .mfi; (p17) getf.sig a8=ahi[8] // 2:
+ (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0
+ (p40) cmp.ltu p43,p41=a3,n3 }
+{ .mfi; (p42) cmp.leu p43,p41=a3,n3
+ (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6]
+ (p0) nop.i 0 };;
+{ .mii; (p17) getf.sig n5=nlo[6] // 3:
+ (p48) cmp.ltu p51,p49=t[6],a3
+ (p50) cmp.leu p51,p49=t[6],a3 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mmi; (p0) getf.sig n2=ni1 // 4:
+ (p41) add a4=a4,n4 // (p17) a4+=n4
+ (p43) add a4=a4,n4,1 };;
+{ .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4
+ (p0) nop.f 0
+ (p51) add t[5]=t[5],a4,1 };;
+{ .mfi; (p0) getf.sig n3=ni2 // 6:
+ (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0
+ (p41) cmp.ltu p42,p40=a4,n4 }
+{ .mfi; (p43) cmp.leu p42,p40=a4,n4
+ (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7]
+ (p0) nop.i 0 };;
+{ .mii; (p17) getf.sig n6=nlo[7] // 7:
+ (p49) cmp.ltu p50,p48=t[5],a4
+ (p51) cmp.leu p50,p48=t[5],a4 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mii; (p0) getf.sig n4=ni3 // 8:
+ (p40) add a5=a5,n5 // (p17) a5+=n5
+ (p42) add a5=a5,n5,1 };;
+{ .mii; (p0) nop.m 0 // 9:
+ (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5
+ (p50) add t[4]=t[4],a5,1 };;
+{ .mii; (p0) nop.m 0 // 10:
+ (p40) cmp.ltu p43,p41=a5,n5
+ (p42) cmp.leu p43,p41=a5,n5 };;
+{ .mii; (p17) getf.sig n7=nlo[8] // 11:
+ (p48) cmp.ltu p51,p49=t[4],a5
+ (p50) cmp.leu p51,p49=t[4],a5 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mii; (p17) getf.sig n8=nhi[8] // 12:
+ (p41) add a6=a6,n6 // (p17) a6+=n6
+ (p43) add a6=a6,n6,1 };;
+{ .mii; (p0) getf.sig n5=ni4 // 13:
+ (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6
+ (p51) add t[3]=t[3],a6,1 };;
+{ .mii; (p0) nop.m 0 // 14:
+ (p41) cmp.ltu p42,p40=a6,n6
+ (p43) cmp.leu p42,p40=a6,n6 };;
+{ .mii; (p0) getf.sig n6=ni5 // 15:
+ (p49) cmp.ltu p50,p48=t[3],a6
+ (p51) cmp.leu p50,p48=t[3],a6 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mii; (p0) nop.m 0 // 16:
+ (p40) add a7=a7,n7 // (p17) a7+=n7
+ (p42) add a7=a7,n7,1 };;
+{ .mii; (p0) nop.m 0 // 17:
+ (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7
+ (p50) add t[2]=t[2],a7,1 };;
+{ .mii; (p0) nop.m 0 // 18:
+ (p40) cmp.ltu p43,p41=a7,n7
+ (p42) cmp.leu p43,p41=a7,n7 };;
+{ .mii; (p0) getf.sig n7=ni6 // 19:
+ (p48) cmp.ltu p51,p49=t[2],a7
+ (p50) cmp.leu p51,p49=t[2],a7 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mii; (p0) nop.m 0 // 20:
+ (p41) add a8=a8,n8 // (p17) a8+=n8
+ (p43) add a8=a8,n8,1 };;
+{ .mmi; (p0) nop.m 0 // 21:
+ (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8
+ (p51) add t[1]=t[1],a8,1 }
+{ .mmi; (p17) mov t[0]=r0
+ (p41) cmp.ltu p42,p40=a8,n8
+ (p43) cmp.leu p42,p40=a8,n8 };;
+{ .mmi; (p0) getf.sig n8=ni7 // 22:
+ (p49) cmp.ltu p50,p48=t[1],a8
+ (p51) cmp.leu p50,p48=t[1],a8 }
+{ .mmi; (p42) add t[0]=t[0],r0,1
+ (p0) add r16=-7*16,prevsp
+ (p0) add r17=-6*16,prevsp };;
+
+// subtract np[8] from carrybit|tmp[8]
+// carrybit|tmp[8] layout upon exit from above loop is:
+// t[0]|t[1]|t[2]|t[3]|t[4]|t[5]|t[6]|t[7]|t0 (least significant)
+{ .mmi; (p50)add t[0]=t[0],r0,1
+ add r18=-5*16,prevsp
+ sub n1=t0,n1 };;
+{ .mmi; cmp.gtu p34,p32=n1,t0;;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n2=t[7],n2
+ (p34)sub n2=t[7],n2,1 };;
+{ .mii; (p32)cmp.gtu p35,p33=n2,t[7]
+ (p34)cmp.geu p35,p33=n2,t[7];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub n3=t[6],n3 }
+{ .mmi; (p35)sub n3=t[6],n3,1;;
+ (p33)cmp.gtu p34,p32=n3,t[6]
+ (p35)cmp.geu p34,p32=n3,t[6] };;
+ .pred.rel "mutex",p32,p34
+{ .mii; (p32)sub n4=t[5],n4
+ (p34)sub n4=t[5],n4,1;;
+ (p32)cmp.gtu p35,p33=n4,t[5] }
+{ .mmi; (p34)cmp.geu p35,p33=n4,t[5];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub n5=t[4],n5
+ (p35)sub n5=t[4],n5,1 };;
+{ .mii; (p33)cmp.gtu p34,p32=n5,t[4]
+ (p35)cmp.geu p34,p32=n5,t[4];;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n6=t[3],n6 }
+{ .mmi; (p34)sub n6=t[3],n6,1;;
+ (p32)cmp.gtu p35,p33=n6,t[3]
+ (p34)cmp.geu p35,p33=n6,t[3] };;
+ .pred.rel "mutex",p33,p35
+{ .mii; (p33)sub n7=t[2],n7
+ (p35)sub n7=t[2],n7,1;;
+ (p33)cmp.gtu p34,p32=n7,t[2] }
+{ .mmi; (p35)cmp.geu p34,p32=n7,t[2];;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n8=t[1],n8
+ (p34)sub n8=t[1],n8,1 };;
+{ .mii; (p32)cmp.gtu p35,p33=n8,t[1]
+ (p34)cmp.geu p35,p33=n8,t[1];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub a8=t[0],r0 }
+{ .mmi; (p35)sub a8=t[0],r0,1;;
+ (p33)cmp.gtu p34,p32=a8,t[0]
+ (p35)cmp.geu p34,p32=a8,t[0] };;
+
+// save the result, either tmp[num] or tmp[num]-np[num]
+ .pred.rel "mutex",p32,p34
+{ .mmi; (p32)st8 [rptr]=n1,8
+ (p34)st8 [rptr]=t0,8
+ add r19=-4*16,prevsp};;
+{ .mmb; (p32)st8 [rptr]=n2,8
+ (p34)st8 [rptr]=t[7],8
+ (p5)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n3,8
+ (p34)st8 [rptr]=t[6],8
+ (p7)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n4,8
+ (p34)st8 [rptr]=t[5],8
+ (p9)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n5,8
+ (p34)st8 [rptr]=t[4],8
+ (p11)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n6,8
+ (p34)st8 [rptr]=t[3],8
+ (p13)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n7,8
+ (p34)st8 [rptr]=t[2],8
+ (p15)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n8,8
+ (p34)st8 [rptr]=t[1],8
+ nop.b 0 };;
+.Ldone: // epilogue
+{ .mmi; ldf.fill f16=[r16],64
+ ldf.fill f17=[r17],64
+ nop.i 0 }
+{ .mmi; ldf.fill f18=[r18],64
+ ldf.fill f19=[r19],64
+ mov pr=prevpr,0x1ffff };;
+{ .mmi; ldf.fill f20=[r16]
+ ldf.fill f21=[r17]
+ mov ar.lc=prevlc }
+{ .mmi; ldf.fill f22=[r18]
+ ldf.fill f23=[r19]
+ mov ret0=1 } // signal "handled"
+{ .mib; rum 1<<5
+ .restore sp
+ mov sp=prevsp
+ br.ret.sptk.many b0 };;
+.endp bn_mul_mont_8#
+
+.type copyright#,\@object
+copyright:
+stringz "Montgomery multiplication for IA-64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$output=shift and open STDOUT,">$output";
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ia64.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ia64.S
new file mode 100644
index 00000000..c0cee821
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ia64.S
@@ -0,0 +1,1555 @@
+.explicit
+.text
+.ident "ia64.S, Version 2.1"
+.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+//
+// ====================================================================
+// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+// project.
+//
+// Rights for redistribution and usage in source and binary forms are
+// granted according to the OpenSSL license. Warranty of any kind is
+// disclaimed.
+// ====================================================================
+//
+// Version 2.x is Itanium2 re-tune. Few words about how Itanum2 is
+// different from Itanium to this module viewpoint. Most notably, is it
+// "wider" than Itanium? Can you experience loop scalability as
+// discussed in commentary sections? Not really:-( Itanium2 has 6
+// integer ALU ports, i.e. it's 2 ports wider, but it's not enough to
+// spin twice as fast, as I need 8 IALU ports. Amount of floating point
+// ports is the same, i.e. 2, while I need 4. In other words, to this
+// module Itanium2 remains effectively as "wide" as Itanium. Yet it's
+// essentially different in respect to this module, and a re-tune was
+// required. Well, because some intruction latencies has changed. Most
+// noticeably those intensively used:
+//
+// Itanium Itanium2
+// ldf8 9 6 L2 hit
+// ld8 2 1 L1 hit
+// getf 2 5
+// xma[->getf] 7[+1] 4[+0]
+// add[->st8] 1[+1] 1[+0]
+//
+// What does it mean? You might ratiocinate that the original code
+// should run just faster... Because sum of latencies is smaller...
+// Wrong! Note that getf latency increased. This means that if a loop is
+// scheduled for lower latency (as they were), then it will suffer from
+// stall condition and the code will therefore turn anti-scalable, e.g.
+// original bn_mul_words spun at 5*n or 2.5 times slower than expected
+// on Itanium2! What to do? Reschedule loops for Itanium2? But then
+// Itanium would exhibit anti-scalability. So I've chosen to reschedule
+// for worst latency for every instruction aiming for best *all-round*
+// performance.
+
+// Q. How much faster does it get?
+// A. Here is the output from 'openssl speed rsa dsa' for vanilla
+// 0.9.6a compiled with gcc version 2.96 20000731 (Red Hat
+// Linux 7.1 2.96-81):
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0036s 0.0003s 275.3 2999.2
+// rsa 1024 bits 0.0203s 0.0011s 49.3 894.1
+// rsa 2048 bits 0.1331s 0.0040s 7.5 250.9
+// rsa 4096 bits 0.9270s 0.0147s 1.1 68.1
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0035s 0.0043s 288.3 234.8
+// dsa 1024 bits 0.0111s 0.0135s 90.0 74.2
+//
+// And here is similar output but for this assembler
+// implementation:-)
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0021s 0.0001s 549.4 9638.5
+// rsa 1024 bits 0.0055s 0.0002s 183.8 4481.1
+// rsa 2048 bits 0.0244s 0.0006s 41.4 1726.3
+// rsa 4096 bits 0.1295s 0.0018s 7.7 561.5
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0012s 0.0013s 891.9 756.6
+// dsa 1024 bits 0.0023s 0.0028s 440.4 376.2
+//
+// Yes, you may argue that it's not fair comparison as it's
+// possible to craft the C implementation with BN_UMULT_HIGH
+// inline assembler macro. But of course! Here is the output
+// with the macro:
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0020s 0.0002s 495.0 6561.0
+// rsa 1024 bits 0.0086s 0.0004s 116.2 2235.7
+// rsa 2048 bits 0.0519s 0.0015s 19.3 667.3
+// rsa 4096 bits 0.3464s 0.0053s 2.9 187.7
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0016s 0.0020s 613.1 510.5
+// dsa 1024 bits 0.0045s 0.0054s 221.0 183.9
+//
+// My code is still way faster, huh:-) And I believe that even
+// higher performance can be achieved. Note that as keys get
+// longer, performance gain is larger. Why? According to the
+// profiler there is another player in the field, namely
+// BN_from_montgomery consuming larger and larger portion of CPU
+// time as keysize decreases. I therefore consider putting effort
+// to assembler implementation of the following routine:
+//
+// void bn_mul_add_mont (BN_ULONG *rp,BN_ULONG *np,int nl,BN_ULONG n0)
+// {
+// int i,j;
+// BN_ULONG v;
+//
+// for (i=0; i<nl; i++)
+// {
+// v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2);
+// nrp++;
+// rp++;
+// if (((nrp[-1]+=v)&BN_MASK2) < v)
+// for (j=0; ((++nrp[j])&BN_MASK2) == 0; j++) ;
+// }
+// }
+//
+// It might as well be beneficial to implement even combaX
+// variants, as it appears as it can literally unleash the
+// performance (see comment section to bn_mul_comba8 below).
+//
+// And finally for your reference the output for 0.9.6a compiled
+// with SGIcc version 0.01.0-12 (keep in mind that for the moment
+// of this writing it's not possible to convince SGIcc to use
+// BN_UMULT_HIGH inline assembler macro, yet the code is fast,
+// i.e. for a compiler generated one:-):
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0022s 0.0002s 452.7 5894.3
+// rsa 1024 bits 0.0097s 0.0005s 102.7 2002.9
+// rsa 2048 bits 0.0578s 0.0017s 17.3 600.2
+// rsa 4096 bits 0.3838s 0.0061s 2.6 164.5
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0018s 0.0022s 547.3 459.6
+// dsa 1024 bits 0.0051s 0.0062s 196.6 161.3
+//
+// Oh! Benchmarks were performed on 733MHz Lion-class Itanium
+// system running Redhat Linux 7.1 (very special thanks to Ray
+// McCaffity of Williams Communications for providing an account).
+//
+// Q. What's the heck with 'rum 1<<5' at the end of every function?
+// A. Well, by clearing the "upper FP registers written" bit of the
+// User Mask I want to excuse the kernel from preserving upper
+// (f32-f128) FP register bank over process context switch, thus
+// minimizing bus bandwidth consumption during the switch (i.e.
+// after PKI opration completes and the program is off doing
+// something else like bulk symmetric encryption). Having said
+// this, I also want to point out that it might be good idea
+// to compile the whole toolkit (as well as majority of the
+// programs for that matter) with -mfixed-range=f32-f127 command
+// line option. No, it doesn't prevent the compiler from writing
+// to upper bank, but at least discourages to do so. If you don't
+// like the idea you have the option to compile the module with
+// -Drum=nop.m in command line.
+//
+
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+#define ADDP addp4
+#else
+#define ADDP add
+#endif
+
+#if 1
+//
+// bn_[add|sub]_words routines.
+//
+// Loops are spinning in 2*(n+5) ticks on Itanuim (provided that the
+// data reside in L1 cache, i.e. 2 ticks away). It's possible to
+// compress the epilogue and get down to 2*n+6, but at the cost of
+// scalability (the neat feature of this implementation is that it
+// shall automagically spin in n+5 on "wider" IA-64 implementations:-)
+// I consider that the epilogue is short enough as it is to trade tiny
+// performance loss on Itanium for scalability.
+//
+// BN_ULONG bn_add_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
+//
+.global bn_add_words#
+.proc bn_add_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_add_words:
+ .prologue
+ .save ar.pfs,r2
+{ .mii; alloc r2=ar.pfs,4,12,0,16
+ cmp4.le p6,p0=r35,r0 };;
+{ .mfb; mov r8=r0 // return value
+(p6) br.ret.spnt.many b0 };;
+
+{ .mib; sub r10=r35,r0,1
+ .save ar.lc,r3
+ mov r3=ar.lc
+ brp.loop.imp .L_bn_add_words_ctop,.L_bn_add_words_cend-16
+ }
+{ .mib; ADDP r14=0,r32 // rp
+ .save pr,r9
+ mov r9=pr };;
+ .body
+{ .mii; ADDP r15=0,r33 // ap
+ mov ar.lc=r10
+ mov ar.ec=6 }
+{ .mib; ADDP r16=0,r34 // bp
+ mov pr.rot=1<<16 };;
+
+.L_bn_add_words_ctop:
+{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
+ (p18) add r39=r37,r34
+ (p19) cmp.ltu.unc p56,p0=r40,r38 }
+{ .mfb; (p0) nop.m 0x0
+ (p0) nop.f 0x0
+ (p0) nop.b 0x0 }
+{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
+ (p58) cmp.eq.or p57,p0=-1,r41 // (p20)
+ (p58) add r41=1,r41 } // (p20)
+{ .mfb; (p21) st8 [r14]=r42,8 // *(rp++)=r
+ (p0) nop.f 0x0
+ br.ctop.sptk .L_bn_add_words_ctop };;
+.L_bn_add_words_cend:
+
+{ .mii;
+(p59) add r8=1,r8 // return value
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mbb; nop.b 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_add_words#
+
+//
+// BN_ULONG bn_sub_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
+//
+.global bn_sub_words#
+.proc bn_sub_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_sub_words:
+ .prologue
+ .save ar.pfs,r2
+{ .mii; alloc r2=ar.pfs,4,12,0,16
+ cmp4.le p6,p0=r35,r0 };;
+{ .mfb; mov r8=r0 // return value
+(p6) br.ret.spnt.many b0 };;
+
+{ .mib; sub r10=r35,r0,1
+ .save ar.lc,r3
+ mov r3=ar.lc
+ brp.loop.imp .L_bn_sub_words_ctop,.L_bn_sub_words_cend-16
+ }
+{ .mib; ADDP r14=0,r32 // rp
+ .save pr,r9
+ mov r9=pr };;
+ .body
+{ .mii; ADDP r15=0,r33 // ap
+ mov ar.lc=r10
+ mov ar.ec=6 }
+{ .mib; ADDP r16=0,r34 // bp
+ mov pr.rot=1<<16 };;
+
+.L_bn_sub_words_ctop:
+{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
+ (p18) sub r39=r37,r34
+ (p19) cmp.gtu.unc p56,p0=r40,r38 }
+{ .mfb; (p0) nop.m 0x0
+ (p0) nop.f 0x0
+ (p0) nop.b 0x0 }
+{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
+ (p58) cmp.eq.or p57,p0=0,r41 // (p20)
+ (p58) add r41=-1,r41 } // (p20)
+{ .mbb; (p21) st8 [r14]=r42,8 // *(rp++)=r
+ (p0) nop.b 0x0
+ br.ctop.sptk .L_bn_sub_words_ctop };;
+.L_bn_sub_words_cend:
+
+{ .mii;
+(p59) add r8=1,r8 // return value
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mbb; nop.b 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_sub_words#
+#endif
+
+#if 0
+#define XMA_TEMPTATION
+#endif
+
+#if 1
+//
+// BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+//
+.global bn_mul_words#
+.proc bn_mul_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_mul_words:
+ .prologue
+ .save ar.pfs,r2
+#ifdef XMA_TEMPTATION
+{ .mfi; alloc r2=ar.pfs,4,0,0,0 };;
+#else
+{ .mfi; alloc r2=ar.pfs,4,12,0,16 };;
+#endif
+{ .mib; mov r8=r0 // return value
+ cmp4.le p6,p0=r34,r0
+(p6) br.ret.spnt.many b0 };;
+
+{ .mii; sub r10=r34,r0,1
+ .save ar.lc,r3
+ mov r3=ar.lc
+ .save pr,r9
+ mov r9=pr };;
+
+ .body
+{ .mib; setf.sig f8=r35 // w
+ mov pr.rot=0x800001<<16
+ // ------^----- serves as (p50) at first (p27)
+ brp.loop.imp .L_bn_mul_words_ctop,.L_bn_mul_words_cend-16
+ }
+
+#ifndef XMA_TEMPTATION
+
+{ .mmi; ADDP r14=0,r32 // rp
+ ADDP r15=0,r33 // ap
+ mov ar.lc=r10 }
+{ .mmi; mov r40=0 // serves as r35 at first (p27)
+ mov ar.ec=13 };;
+
+// This loop spins in 2*(n+12) ticks. It's scheduled for data in Itanium
+// L2 cache (i.e. 9 ticks away) as floating point load/store instructions
+// bypass L1 cache and L2 latency is actually best-case scenario for
+// ldf8. The loop is not scalable and shall run in 2*(n+12) even on
+// "wider" IA-64 implementations. It's a trade-off here. n+24 loop
+// would give us ~5% in *overall* performance improvement on "wider"
+// IA-64, but would hurt Itanium for about same because of longer
+// epilogue. As it's a matter of few percents in either case I've
+// chosen to trade the scalability for development time (you can see
+// this very instruction sequence in bn_mul_add_words loop which in
+// turn is scalable).
+.L_bn_mul_words_ctop:
+{ .mfi; (p25) getf.sig r36=f52 // low
+ (p21) xmpy.lu f48=f37,f8
+ (p28) cmp.ltu p54,p50=r41,r39 }
+{ .mfi; (p16) ldf8 f32=[r15],8
+ (p21) xmpy.hu f40=f37,f8
+ (p0) nop.i 0x0 };;
+{ .mii; (p25) getf.sig r32=f44 // high
+ .pred.rel "mutex",p50,p54
+ (p50) add r40=r38,r35 // (p27)
+ (p54) add r40=r38,r35,1 } // (p27)
+{ .mfb; (p28) st8 [r14]=r41,8
+ (p0) nop.f 0x0
+ br.ctop.sptk .L_bn_mul_words_ctop };;
+.L_bn_mul_words_cend:
+
+{ .mii; nop.m 0x0
+.pred.rel "mutex",p51,p55
+(p51) add r8=r36,r0
+(p55) add r8=r36,r0,1 }
+{ .mfb; nop.m 0x0
+ nop.f 0x0
+ nop.b 0x0 }
+
+#else // XMA_TEMPTATION
+
+ setf.sig f37=r0 // serves as carry at (p18) tick
+ mov ar.lc=r10
+ mov ar.ec=5;;
+
+// Most of you examining this code very likely wonder why in the name
+// of Intel the following loop is commented out? Indeed, it looks so
+// neat that you find it hard to believe that it's something wrong
+// with it, right? The catch is that every iteration depends on the
+// result from previous one and the latter isn't available instantly.
+// The loop therefore spins at the latency of xma minus 1, or in other
+// words at 6*(n+4) ticks:-( Compare to the "production" loop above
+// that runs in 2*(n+11) where the low latency problem is worked around
+// by moving the dependency to one-tick latent interger ALU. Note that
+// "distance" between ldf8 and xma is not latency of ldf8, but the
+// *difference* between xma and ldf8 latencies.
+.L_bn_mul_words_ctop:
+{ .mfi; (p16) ldf8 f32=[r33],8
+ (p18) xma.hu f38=f34,f8,f39 }
+{ .mfb; (p20) stf8 [r32]=f37,8
+ (p18) xma.lu f35=f34,f8,f39
+ br.ctop.sptk .L_bn_mul_words_ctop };;
+.L_bn_mul_words_cend:
+
+ getf.sig r8=f41 // the return value
+
+#endif // XMA_TEMPTATION
+
+{ .mii; nop.m 0x0
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mfb; rum 1<<5 // clear um.mfh
+ nop.f 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_mul_words#
+#endif
+
+#if 1
+//
+// BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+//
+.global bn_mul_add_words#
+.proc bn_mul_add_words#
+.align 64
+.skip 48 // makes the loop body aligned at 64-byte boundary
+bn_mul_add_words:
+ .prologue
+ .save ar.pfs,r2
+{ .mmi; alloc r2=ar.pfs,4,4,0,8
+ cmp4.le p6,p0=r34,r0
+ .save ar.lc,r3
+ mov r3=ar.lc };;
+{ .mib; mov r8=r0 // return value
+ sub r10=r34,r0,1
+(p6) br.ret.spnt.many b0 };;
+
+{ .mib; setf.sig f8=r35 // w
+ .save pr,r9
+ mov r9=pr
+ brp.loop.imp .L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16
+ }
+ .body
+{ .mmi; ADDP r14=0,r32 // rp
+ ADDP r15=0,r33 // ap
+ mov ar.lc=r10 }
+{ .mii; ADDP r16=0,r32 // rp copy
+ mov pr.rot=0x2001<<16
+ // ------^----- serves as (p40) at first (p27)
+ mov ar.ec=11 };;
+
+// This loop spins in 3*(n+10) ticks on Itanium and in 2*(n+10) on
+// Itanium 2. Yes, unlike previous versions it scales:-) Previous
+// version was peforming *all* additions in IALU and was starving
+// for those even on Itanium 2. In this version one addition is
+// moved to FPU and is folded with multiplication. This is at cost
+// of propogating the result from previous call to this subroutine
+// to L2 cache... In other words negligible even for shorter keys.
+// *Overall* performance improvement [over previous version] varies
+// from 11 to 22 percent depending on key length.
+.L_bn_mul_add_words_ctop:
+.pred.rel "mutex",p40,p42
+{ .mfi; (p23) getf.sig r36=f45 // low
+ (p20) xma.lu f42=f36,f8,f50 // low
+ (p40) add r39=r39,r35 } // (p27)
+{ .mfi; (p16) ldf8 f32=[r15],8 // *(ap++)
+ (p20) xma.hu f36=f36,f8,f50 // high
+ (p42) add r39=r39,r35,1 };; // (p27)
+{ .mmi; (p24) getf.sig r32=f40 // high
+ (p16) ldf8 f46=[r16],8 // *(rp1++)
+ (p40) cmp.ltu p41,p39=r39,r35 } // (p27)
+{ .mib; (p26) st8 [r14]=r39,8 // *(rp2++)
+ (p42) cmp.leu p41,p39=r39,r35 // (p27)
+ br.ctop.sptk .L_bn_mul_add_words_ctop};;
+.L_bn_mul_add_words_cend:
+
+{ .mmi; .pred.rel "mutex",p40,p42
+(p40) add r8=r35,r0
+(p42) add r8=r35,r0,1
+ mov pr=r9,0x1ffff }
+{ .mib; rum 1<<5 // clear um.mfh
+ mov ar.lc=r3
+ br.ret.sptk.many b0 };;
+.endp bn_mul_add_words#
+#endif
+
+#if 1
+//
+// void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
+//
+.global bn_sqr_words#
+.proc bn_sqr_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_sqr_words:
+ .prologue
+ .save ar.pfs,r2
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+ sxt4 r34=r34 };;
+{ .mii; cmp.le p6,p0=r34,r0
+ mov r8=r0 } // return value
+{ .mfb; ADDP r32=0,r32
+ nop.f 0x0
+(p6) br.ret.spnt.many b0 };;
+
+{ .mii; sub r10=r34,r0,1
+ .save ar.lc,r3
+ mov r3=ar.lc
+ .save pr,r9
+ mov r9=pr };;
+
+ .body
+{ .mib; ADDP r33=0,r33
+ mov pr.rot=1<<16
+ brp.loop.imp .L_bn_sqr_words_ctop,.L_bn_sqr_words_cend-16
+ }
+{ .mii; add r34=8,r32
+ mov ar.lc=r10
+ mov ar.ec=18 };;
+
+// 2*(n+17) on Itanium, (n+17) on "wider" IA-64 implementations. It's
+// possible to compress the epilogue (I'm getting tired to write this
+// comment over and over) and get down to 2*n+16 at the cost of
+// scalability. The decision will very likely be reconsidered after the
+// benchmark program is profiled. I.e. if perfomance gain on Itanium
+// will appear larger than loss on "wider" IA-64, then the loop should
+// be explicitely split and the epilogue compressed.
+.L_bn_sqr_words_ctop:
+{ .mfi; (p16) ldf8 f32=[r33],8
+ (p25) xmpy.lu f42=f41,f41
+ (p0) nop.i 0x0 }
+{ .mib; (p33) stf8 [r32]=f50,16
+ (p0) nop.i 0x0
+ (p0) nop.b 0x0 }
+{ .mfi; (p0) nop.m 0x0
+ (p25) xmpy.hu f52=f41,f41
+ (p0) nop.i 0x0 }
+{ .mib; (p33) stf8 [r34]=f60,16
+ (p0) nop.i 0x0
+ br.ctop.sptk .L_bn_sqr_words_ctop };;
+.L_bn_sqr_words_cend:
+
+{ .mii; nop.m 0x0
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mfb; rum 1<<5 // clear um.mfh
+ nop.f 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_sqr_words#
+#endif
+
+#if 1
+// Apparently we win nothing by implementing special bn_sqr_comba8.
+// Yes, it is possible to reduce the number of multiplications by
+// almost factor of two, but then the amount of additions would
+// increase by factor of two (as we would have to perform those
+// otherwise performed by xma ourselves). Normally we would trade
+// anyway as multiplications are way more expensive, but not this
+// time... Multiplication kernel is fully pipelined and as we drain
+// one 128-bit multiplication result per clock cycle multiplications
+// are effectively as inexpensive as additions. Special implementation
+// might become of interest for "wider" IA-64 implementation as you'll
+// be able to get through the multiplication phase faster (there won't
+// be any stall issues as discussed in the commentary section below and
+// you therefore will be able to employ all 4 FP units)... But these
+// Itanium days it's simply too hard to justify the effort so I just
+// drop down to bn_mul_comba8 code:-)
+//
+// void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+//
+.global bn_sqr_comba8#
+.proc bn_sqr_comba8#
+.align 64
+bn_sqr_comba8:
+ .prologue
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+ addp4 r33=0,r33
+ addp4 r32=0,r32 };;
+{ .mii;
+#else
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+#endif
+ mov r34=r33
+ add r14=8,r33 };;
+ .body
+{ .mii; add r17=8,r34
+ add r15=16,r33
+ add r18=16,r34 }
+{ .mfb; add r16=24,r33
+ br .L_cheat_entry_point8 };;
+.endp bn_sqr_comba8#
+#endif
+
+#if 1
+// I've estimated this routine to run in ~120 ticks, but in reality
+// (i.e. according to ar.itc) it takes ~160 ticks. Are those extra
+// cycles consumed for instructions fetch? Or did I misinterpret some
+// clause in Itanium µ-architecture manual? Comments are welcomed and
+// highly appreciated.
+//
+// On Itanium 2 it takes ~190 ticks. This is because of stalls on
+// result from getf.sig. I do nothing about it at this point for
+// reasons depicted below.
+//
+// However! It should be noted that even 160 ticks is darn good result
+// as it's over 10 (yes, ten, spelled as t-e-n) times faster than the
+// C version (compiled with gcc with inline assembler). I really
+// kicked compiler's butt here, didn't I? Yeah! This brings us to the
+// following statement. It's damn shame that this routine isn't called
+// very often nowadays! According to the profiler most CPU time is
+// consumed by bn_mul_add_words called from BN_from_montgomery. In
+// order to estimate what we're missing, I've compared the performance
+// of this routine against "traditional" implementation, i.e. against
+// following routine:
+//
+// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+// { r[ 8]=bn_mul_words( &(r[0]),a,8,b[0]);
+// r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]);
+// r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]);
+// r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]);
+// r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]);
+// r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]);
+// r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]);
+// r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]);
+// }
+//
+// The one below is over 8 times faster than the one above:-( Even
+// more reasons to "combafy" bn_mul_add_mont...
+//
+// And yes, this routine really made me wish there were an optimizing
+// assembler! It also feels like it deserves a dedication.
+//
+// To my wife for being there and to my kids...
+//
+// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+//
+#define carry1 r14
+#define carry2 r15
+#define carry3 r34
+.global bn_mul_comba8#
+.proc bn_mul_comba8#
+.align 64
+bn_mul_comba8:
+ .prologue
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+ addp4 r33=0,r33
+ addp4 r34=0,r34 };;
+{ .mii; addp4 r32=0,r32
+#else
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+#endif
+ add r14=8,r33
+ add r17=8,r34 }
+ .body
+{ .mii; add r15=16,r33
+ add r18=16,r34
+ add r16=24,r33 }
+.L_cheat_entry_point8:
+{ .mmi; add r19=24,r34
+
+ ldf8 f32=[r33],32 };;
+
+{ .mmi; ldf8 f120=[r34],32
+ ldf8 f121=[r17],32 }
+{ .mmi; ldf8 f122=[r18],32
+ ldf8 f123=[r19],32 };;
+{ .mmi; ldf8 f124=[r34]
+ ldf8 f125=[r17] }
+{ .mmi; ldf8 f126=[r18]
+ ldf8 f127=[r19] }
+
+{ .mmi; ldf8 f33=[r14],32
+ ldf8 f34=[r15],32 }
+{ .mmi; ldf8 f35=[r16],32;;
+ ldf8 f36=[r33] }
+{ .mmi; ldf8 f37=[r14]
+ ldf8 f38=[r15] }
+{ .mfi; ldf8 f39=[r16]
+// -------\ Entering multiplier's heaven /-------
+// ------------\ /------------
+// -----------------\ /-----------------
+// ----------------------\/----------------------
+ xma.hu f41=f32,f120,f0 }
+{ .mfi; xma.lu f40=f32,f120,f0 };; // (*)
+{ .mfi; xma.hu f51=f32,f121,f0 }
+{ .mfi; xma.lu f50=f32,f121,f0 };;
+{ .mfi; xma.hu f61=f32,f122,f0 }
+{ .mfi; xma.lu f60=f32,f122,f0 };;
+{ .mfi; xma.hu f71=f32,f123,f0 }
+{ .mfi; xma.lu f70=f32,f123,f0 };;
+{ .mfi; xma.hu f81=f32,f124,f0 }
+{ .mfi; xma.lu f80=f32,f124,f0 };;
+{ .mfi; xma.hu f91=f32,f125,f0 }
+{ .mfi; xma.lu f90=f32,f125,f0 };;
+{ .mfi; xma.hu f101=f32,f126,f0 }
+{ .mfi; xma.lu f100=f32,f126,f0 };;
+{ .mfi; xma.hu f111=f32,f127,f0 }
+{ .mfi; xma.lu f110=f32,f127,f0 };;//
+// (*) You can argue that splitting at every second bundle would
+// prevent "wider" IA-64 implementations from achieving the peak
+// performance. Well, not really... The catch is that if you
+// intend to keep 4 FP units busy by splitting at every fourth
+// bundle and thus perform these 16 multiplications in 4 ticks,
+// the first bundle *below* would stall because the result from
+// the first xma bundle *above* won't be available for another 3
+// ticks (if not more, being an optimist, I assume that "wider"
+// implementation will have same latency:-). This stall will hold
+// you back and the performance would be as if every second bundle
+// were split *anyway*...
+{ .mfi; getf.sig r16=f40
+ xma.hu f42=f33,f120,f41
+ add r33=8,r32 }
+{ .mfi; xma.lu f41=f33,f120,f41 };;
+{ .mfi; getf.sig r24=f50
+ xma.hu f52=f33,f121,f51 }
+{ .mfi; xma.lu f51=f33,f121,f51 };;
+{ .mfi; st8 [r32]=r16,16
+ xma.hu f62=f33,f122,f61 }
+{ .mfi; xma.lu f61=f33,f122,f61 };;
+{ .mfi; xma.hu f72=f33,f123,f71 }
+{ .mfi; xma.lu f71=f33,f123,f71 };;
+{ .mfi; xma.hu f82=f33,f124,f81 }
+{ .mfi; xma.lu f81=f33,f124,f81 };;
+{ .mfi; xma.hu f92=f33,f125,f91 }
+{ .mfi; xma.lu f91=f33,f125,f91 };;
+{ .mfi; xma.hu f102=f33,f126,f101 }
+{ .mfi; xma.lu f101=f33,f126,f101 };;
+{ .mfi; xma.hu f112=f33,f127,f111 }
+{ .mfi; xma.lu f111=f33,f127,f111 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r25=f41
+ xma.hu f43=f34,f120,f42 }
+{ .mfi; xma.lu f42=f34,f120,f42 };;
+{ .mfi; getf.sig r16=f60
+ xma.hu f53=f34,f121,f52 }
+{ .mfi; xma.lu f52=f34,f121,f52 };;
+{ .mfi; getf.sig r17=f51
+ xma.hu f63=f34,f122,f62
+ add r25=r25,r24 }
+{ .mfi; xma.lu f62=f34,f122,f62
+ mov carry1=0 };;
+{ .mfi; cmp.ltu p6,p0=r25,r24
+ xma.hu f73=f34,f123,f72 }
+{ .mfi; xma.lu f72=f34,f123,f72 };;
+{ .mfi; st8 [r33]=r25,16
+ xma.hu f83=f34,f124,f82
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f82=f34,f124,f82 };;
+{ .mfi; xma.hu f93=f34,f125,f92 }
+{ .mfi; xma.lu f92=f34,f125,f92 };;
+{ .mfi; xma.hu f103=f34,f126,f102 }
+{ .mfi; xma.lu f102=f34,f126,f102 };;
+{ .mfi; xma.hu f113=f34,f127,f112 }
+{ .mfi; xma.lu f112=f34,f127,f112 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r18=f42
+ xma.hu f44=f35,f120,f43
+ add r17=r17,r16 }
+{ .mfi; xma.lu f43=f35,f120,f43 };;
+{ .mfi; getf.sig r24=f70
+ xma.hu f54=f35,f121,f53 }
+{ .mfi; mov carry2=0
+ xma.lu f53=f35,f121,f53 };;
+{ .mfi; getf.sig r25=f61
+ xma.hu f64=f35,f122,f63
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; add r18=r18,r17
+ xma.lu f63=f35,f122,f63 };;
+{ .mfi; getf.sig r26=f52
+ xma.hu f74=f35,f123,f73
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,r17
+ xma.lu f73=f35,f123,f73
+ add r18=r18,carry1 };;
+{ .mfi;
+ xma.hu f84=f35,f124,f83
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,carry1
+ xma.lu f83=f35,f124,f83 };;
+{ .mfi; st8 [r32]=r18,16
+ xma.hu f94=f35,f125,f93
+(p7) add carry2=1,carry2 }
+{ .mfi; xma.lu f93=f35,f125,f93 };;
+{ .mfi; xma.hu f104=f35,f126,f103 }
+{ .mfi; xma.lu f103=f35,f126,f103 };;
+{ .mfi; xma.hu f114=f35,f127,f113 }
+{ .mfi; mov carry1=0
+ xma.lu f113=f35,f127,f113
+ add r25=r25,r24 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r27=f43
+ xma.hu f45=f36,f120,f44
+ cmp.ltu p6,p0=r25,r24 }
+{ .mfi; xma.lu f44=f36,f120,f44
+ add r26=r26,r25 };;
+{ .mfi; getf.sig r16=f80
+ xma.hu f55=f36,f121,f54
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f54=f36,f121,f54 };;
+{ .mfi; getf.sig r17=f71
+ xma.hu f65=f36,f122,f64
+ cmp.ltu p6,p0=r26,r25 }
+{ .mfi; xma.lu f64=f36,f122,f64
+ add r27=r27,r26 };;
+{ .mfi; getf.sig r18=f62
+ xma.hu f75=f36,f123,f74
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r27,r26
+ xma.lu f74=f36,f123,f74
+ add r27=r27,carry2 };;
+{ .mfi; getf.sig r19=f53
+ xma.hu f85=f36,f124,f84
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f84=f36,f124,f84
+ cmp.ltu p6,p0=r27,carry2 };;
+{ .mfi; st8 [r33]=r27,16
+ xma.hu f95=f36,f125,f94
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f94=f36,f125,f94 };;
+{ .mfi; xma.hu f105=f36,f126,f104 }
+{ .mfi; mov carry2=0
+ xma.lu f104=f36,f126,f104
+ add r17=r17,r16 };;
+{ .mfi; xma.hu f115=f36,f127,f114
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; xma.lu f114=f36,f127,f114
+ add r18=r18,r17 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r20=f44
+ xma.hu f46=f37,f120,f45
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,r17
+ xma.lu f45=f37,f120,f45
+ add r19=r19,r18 };;
+{ .mfi; getf.sig r24=f90
+ xma.hu f56=f37,f121,f55 }
+{ .mfi; xma.lu f55=f37,f121,f55 };;
+{ .mfi; getf.sig r25=f81
+ xma.hu f66=f37,f122,f65
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r19,r18
+ xma.lu f65=f37,f122,f65
+ add r20=r20,r19 };;
+{ .mfi; getf.sig r26=f72
+ xma.hu f76=f37,f123,f75
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r20,r19
+ xma.lu f75=f37,f123,f75
+ add r20=r20,carry1 };;
+{ .mfi; getf.sig r27=f63
+ xma.hu f86=f37,f124,f85
+(p7) add carry2=1,carry2 }
+{ .mfi; xma.lu f85=f37,f124,f85
+ cmp.ltu p7,p0=r20,carry1 };;
+{ .mfi; getf.sig r28=f54
+ xma.hu f96=f37,f125,f95
+(p7) add carry2=1,carry2 }
+{ .mfi; st8 [r32]=r20,16
+ xma.lu f95=f37,f125,f95 };;
+{ .mfi; xma.hu f106=f37,f126,f105 }
+{ .mfi; mov carry1=0
+ xma.lu f105=f37,f126,f105
+ add r25=r25,r24 };;
+{ .mfi; xma.hu f116=f37,f127,f115
+ cmp.ltu p6,p0=r25,r24 }
+{ .mfi; xma.lu f115=f37,f127,f115
+ add r26=r26,r25 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r29=f45
+ xma.hu f47=f38,f120,f46
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r26,r25
+ xma.lu f46=f38,f120,f46
+ add r27=r27,r26 };;
+{ .mfi; getf.sig r16=f100
+ xma.hu f57=f38,f121,f56
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r27,r26
+ xma.lu f56=f38,f121,f56
+ add r28=r28,r27 };;
+{ .mfi; getf.sig r17=f91
+ xma.hu f67=f38,f122,f66
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r28,r27
+ xma.lu f66=f38,f122,f66
+ add r29=r29,r28 };;
+{ .mfi; getf.sig r18=f82
+ xma.hu f77=f38,f123,f76
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r29,r28
+ xma.lu f76=f38,f123,f76
+ add r29=r29,carry2 };;
+{ .mfi; getf.sig r19=f73
+ xma.hu f87=f38,f124,f86
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f86=f38,f124,f86
+ cmp.ltu p6,p0=r29,carry2 };;
+{ .mfi; getf.sig r20=f64
+ xma.hu f97=f38,f125,f96
+(p6) add carry1=1,carry1 }
+{ .mfi; st8 [r33]=r29,16
+ xma.lu f96=f38,f125,f96 };;
+{ .mfi; getf.sig r21=f55
+ xma.hu f107=f38,f126,f106 }
+{ .mfi; mov carry2=0
+ xma.lu f106=f38,f126,f106
+ add r17=r17,r16 };;
+{ .mfi; xma.hu f117=f38,f127,f116
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; xma.lu f116=f38,f127,f116
+ add r18=r18,r17 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r22=f46
+ xma.hu f48=f39,f120,f47
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,r17
+ xma.lu f47=f39,f120,f47
+ add r19=r19,r18 };;
+{ .mfi; getf.sig r24=f110
+ xma.hu f58=f39,f121,f57
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r19,r18
+ xma.lu f57=f39,f121,f57
+ add r20=r20,r19 };;
+{ .mfi; getf.sig r25=f101
+ xma.hu f68=f39,f122,f67
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r20,r19
+ xma.lu f67=f39,f122,f67
+ add r21=r21,r20 };;
+{ .mfi; getf.sig r26=f92
+ xma.hu f78=f39,f123,f77
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r21,r20
+ xma.lu f77=f39,f123,f77
+ add r22=r22,r21 };;
+{ .mfi; getf.sig r27=f83
+ xma.hu f88=f39,f124,f87
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r22,r21
+ xma.lu f87=f39,f124,f87
+ add r22=r22,carry1 };;
+{ .mfi; getf.sig r28=f74
+ xma.hu f98=f39,f125,f97
+(p7) add carry2=1,carry2 }
+{ .mfi; xma.lu f97=f39,f125,f97
+ cmp.ltu p7,p0=r22,carry1 };;
+{ .mfi; getf.sig r29=f65
+ xma.hu f108=f39,f126,f107
+(p7) add carry2=1,carry2 }
+{ .mfi; st8 [r32]=r22,16
+ xma.lu f107=f39,f126,f107 };;
+{ .mfi; getf.sig r30=f56
+ xma.hu f118=f39,f127,f117 }
+{ .mfi; xma.lu f117=f39,f127,f117 };;//
+//-------------------------------------------------//
+// Leaving muliplier's heaven... Quite a ride, huh?
+
+{ .mii; getf.sig r31=f47
+ add r25=r25,r24
+ mov carry1=0 };;
+{ .mii; getf.sig r16=f111
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r17=f102 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r28=r28,r27 };;
+{ .mii; getf.sig r18=f93
+ add r17=r17,r16
+ mov carry3=0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,r27
+ add r29=r29,r28 };;
+{ .mii; getf.sig r19=f84
+ cmp.ltu p7,p0=r17,r16 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r29,r28
+ add r30=r30,r29 };;
+{ .mii; getf.sig r20=f75
+ add r18=r18,r17 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r30,r29
+ add r31=r31,r30 };;
+{ .mfb; getf.sig r21=f66 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 }
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r31,r30
+ add r31=r31,carry2 };;
+{ .mfb; getf.sig r22=f57 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r19,r18
+ add r20=r20,r19 }
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r31,carry2 };;
+{ .mfb; getf.sig r23=f48 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r20,r19
+ add r21=r21,r20 }
+{ .mii;
+(p6) add carry1=1,carry1 }
+{ .mfb; st8 [r33]=r31,16 };;
+
+{ .mfb; getf.sig r24=f112 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r21,r20
+ add r22=r22,r21 };;
+{ .mfb; getf.sig r25=f103 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r22,r21
+ add r23=r23,r22 };;
+{ .mfb; getf.sig r26=f94 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r23,r22
+ add r23=r23,carry1 };;
+{ .mfb; getf.sig r27=f85 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p8=r23,carry1};;
+{ .mii; getf.sig r28=f76
+ add r25=r25,r24
+ mov carry1=0 }
+{ .mii; st8 [r32]=r23,16
+ (p7) add carry2=1,carry3
+ (p8) add carry2=0,carry3 };;
+
+{ .mfb; nop.m 0x0 }
+{ .mii; getf.sig r29=f67
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r30=f58 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mfb; getf.sig r16=f113 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r28=r28,r27 };;
+{ .mfb; getf.sig r17=f104 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,r27
+ add r29=r29,r28 };;
+{ .mfb; getf.sig r18=f95 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r29,r28
+ add r30=r30,r29 };;
+{ .mii; getf.sig r19=f86
+ add r17=r17,r16
+ mov carry3=0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r30,r29
+ add r30=r30,carry2 };;
+{ .mii; getf.sig r20=f77
+ cmp.ltu p7,p0=r17,r16
+ add r18=r18,r17 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r30,carry2 };;
+{ .mfb; getf.sig r21=f68 }
+{ .mii; st8 [r33]=r30,16
+(p6) add carry1=1,carry1 };;
+
+{ .mfb; getf.sig r24=f114 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 };;
+{ .mfb; getf.sig r25=f105 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r19,r18
+ add r20=r20,r19 };;
+{ .mfb; getf.sig r26=f96 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r20,r19
+ add r21=r21,r20 };;
+{ .mfb; getf.sig r27=f87 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r21,r20
+ add r21=r21,carry1 };;
+{ .mib; getf.sig r28=f78
+ add r25=r25,r24 }
+{ .mib; (p7) add carry3=1,carry3
+ cmp.ltu p7,p8=r21,carry1};;
+{ .mii; st8 [r32]=r21,16
+ (p7) add carry2=1,carry3
+ (p8) add carry2=0,carry3 }
+
+{ .mii; mov carry1=0
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r16=f115 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mfb; getf.sig r17=f106 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r28=r28,r27 };;
+{ .mfb; getf.sig r18=f97 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,r27
+ add r28=r28,carry2 };;
+{ .mib; getf.sig r19=f88
+ add r17=r17,r16 }
+{ .mib;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,carry2 };;
+{ .mii; st8 [r33]=r28,16
+(p6) add carry1=1,carry1 }
+
+{ .mii; mov carry2=0
+ cmp.ltu p7,p0=r17,r16
+ add r18=r18,r17 };;
+{ .mfb; getf.sig r24=f116 }
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 };;
+{ .mfb; getf.sig r25=f107 }
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,r18
+ add r19=r19,carry1 };;
+{ .mfb; getf.sig r26=f98 }
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,carry1};;
+{ .mii; st8 [r32]=r19,16
+ (p7) add carry2=1,carry2 }
+
+{ .mfb; add r25=r25,r24 };;
+
+{ .mfb; getf.sig r16=f117 }
+{ .mii; mov carry1=0
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r17=f108 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r26=r26,carry2 };;
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,carry2 };;
+{ .mii; st8 [r33]=r26,16
+(p6) add carry1=1,carry1 }
+
+{ .mfb; add r17=r17,r16 };;
+{ .mfb; getf.sig r24=f118 }
+{ .mii; mov carry2=0
+ cmp.ltu p7,p0=r17,r16
+ add r17=r17,carry1 };;
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r17,carry1};;
+{ .mii; st8 [r32]=r17
+ (p7) add carry2=1,carry2 };;
+{ .mfb; add r24=r24,carry2 };;
+{ .mib; st8 [r33]=r24 }
+
+{ .mib; rum 1<<5 // clear um.mfh
+ br.ret.sptk.many b0 };;
+.endp bn_mul_comba8#
+#undef carry3
+#undef carry2
+#undef carry1
+#endif
+
+#if 1
+// It's possible to make it faster (see comment to bn_sqr_comba8), but
+// I reckon it doesn't worth the effort. Basically because the routine
+// (actually both of them) practically never called... So I just play
+// same trick as with bn_sqr_comba8.
+//
+// void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+//
+.global bn_sqr_comba4#
+.proc bn_sqr_comba4#
+.align 64
+bn_sqr_comba4:
+ .prologue
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+ addp4 r32=0,r32
+ addp4 r33=0,r33 };;
+{ .mii;
+#else
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+#endif
+ mov r34=r33
+ add r14=8,r33 };;
+ .body
+{ .mii; add r17=8,r34
+ add r15=16,r33
+ add r18=16,r34 }
+{ .mfb; add r16=24,r33
+ br .L_cheat_entry_point4 };;
+.endp bn_sqr_comba4#
+#endif
+
+#if 1
+// Runs in ~115 cycles and ~4.5 times faster than C. Well, whatever...
+//
+// void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+//
+#define carry1 r14
+#define carry2 r15
+.global bn_mul_comba4#
+.proc bn_mul_comba4#
+.align 64
+bn_mul_comba4:
+ .prologue
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+ addp4 r33=0,r33
+ addp4 r34=0,r34 };;
+{ .mii; addp4 r32=0,r32
+#else
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+#endif
+ add r14=8,r33
+ add r17=8,r34 }
+ .body
+{ .mii; add r15=16,r33
+ add r18=16,r34
+ add r16=24,r33 };;
+.L_cheat_entry_point4:
+{ .mmi; add r19=24,r34
+
+ ldf8 f32=[r33] }
+
+{ .mmi; ldf8 f120=[r34]
+ ldf8 f121=[r17] };;
+{ .mmi; ldf8 f122=[r18]
+ ldf8 f123=[r19] }
+
+{ .mmi; ldf8 f33=[r14]
+ ldf8 f34=[r15] }
+{ .mfi; ldf8 f35=[r16]
+
+ xma.hu f41=f32,f120,f0 }
+{ .mfi; xma.lu f40=f32,f120,f0 };;
+{ .mfi; xma.hu f51=f32,f121,f0 }
+{ .mfi; xma.lu f50=f32,f121,f0 };;
+{ .mfi; xma.hu f61=f32,f122,f0 }
+{ .mfi; xma.lu f60=f32,f122,f0 };;
+{ .mfi; xma.hu f71=f32,f123,f0 }
+{ .mfi; xma.lu f70=f32,f123,f0 };;//
+// Major stall takes place here, and 3 more places below. Result from
+// first xma is not available for another 3 ticks.
+{ .mfi; getf.sig r16=f40
+ xma.hu f42=f33,f120,f41
+ add r33=8,r32 }
+{ .mfi; xma.lu f41=f33,f120,f41 };;
+{ .mfi; getf.sig r24=f50
+ xma.hu f52=f33,f121,f51 }
+{ .mfi; xma.lu f51=f33,f121,f51 };;
+{ .mfi; st8 [r32]=r16,16
+ xma.hu f62=f33,f122,f61 }
+{ .mfi; xma.lu f61=f33,f122,f61 };;
+{ .mfi; xma.hu f72=f33,f123,f71 }
+{ .mfi; xma.lu f71=f33,f123,f71 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r25=f41
+ xma.hu f43=f34,f120,f42 }
+{ .mfi; xma.lu f42=f34,f120,f42 };;
+{ .mfi; getf.sig r16=f60
+ xma.hu f53=f34,f121,f52 }
+{ .mfi; xma.lu f52=f34,f121,f52 };;
+{ .mfi; getf.sig r17=f51
+ xma.hu f63=f34,f122,f62
+ add r25=r25,r24 }
+{ .mfi; mov carry1=0
+ xma.lu f62=f34,f122,f62 };;
+{ .mfi; st8 [r33]=r25,16
+ xma.hu f73=f34,f123,f72
+ cmp.ltu p6,p0=r25,r24 }
+{ .mfi; xma.lu f72=f34,f123,f72 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r18=f42
+ xma.hu f44=f35,f120,f43
+(p6) add carry1=1,carry1 }
+{ .mfi; add r17=r17,r16
+ xma.lu f43=f35,f120,f43
+ mov carry2=0 };;
+{ .mfi; getf.sig r24=f70
+ xma.hu f54=f35,f121,f53
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; xma.lu f53=f35,f121,f53 };;
+{ .mfi; getf.sig r25=f61
+ xma.hu f64=f35,f122,f63
+ add r18=r18,r17 }
+{ .mfi; xma.lu f63=f35,f122,f63
+(p7) add carry2=1,carry2 };;
+{ .mfi; getf.sig r26=f52
+ xma.hu f74=f35,f123,f73
+ cmp.ltu p7,p0=r18,r17 }
+{ .mfi; xma.lu f73=f35,f123,f73
+ add r18=r18,carry1 };;
+//-------------------------------------------------//
+{ .mii; st8 [r32]=r18,16
+(p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r18,carry1 };;
+
+{ .mfi; getf.sig r27=f43 // last major stall
+(p7) add carry2=1,carry2 };;
+{ .mii; getf.sig r16=f71
+ add r25=r25,r24
+ mov carry1=0 };;
+{ .mii; getf.sig r17=f62
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r27=r27,carry2 };;
+{ .mii; getf.sig r18=f53
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,carry2 };;
+{ .mfi; st8 [r33]=r27,16
+(p6) add carry1=1,carry1 }
+
+{ .mii; getf.sig r19=f44
+ add r17=r17,r16
+ mov carry2=0 };;
+{ .mii; getf.sig r24=f72
+ cmp.ltu p7,p0=r17,r16
+ add r18=r18,r17 };;
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 };;
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,r18
+ add r19=r19,carry1 };;
+{ .mii; getf.sig r25=f63
+ (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,carry1};;
+{ .mii; st8 [r32]=r19,16
+ (p7) add carry2=1,carry2 }
+
+{ .mii; getf.sig r26=f54
+ add r25=r25,r24
+ mov carry1=0 };;
+{ .mii; getf.sig r16=f73
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r26=r26,carry2 };;
+{ .mii; getf.sig r17=f64
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,carry2 };;
+{ .mii; st8 [r33]=r26,16
+(p6) add carry1=1,carry1 }
+
+{ .mii; getf.sig r24=f74
+ add r17=r17,r16
+ mov carry2=0 };;
+{ .mii; cmp.ltu p7,p0=r17,r16
+ add r17=r17,carry1 };;
+
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r17,carry1};;
+{ .mii; st8 [r32]=r17,16
+ (p7) add carry2=1,carry2 };;
+
+{ .mii; add r24=r24,carry2 };;
+{ .mii; st8 [r33]=r24 }
+
+{ .mib; rum 1<<5 // clear um.mfh
+ br.ret.sptk.many b0 };;
+.endp bn_mul_comba4#
+#undef carry2
+#undef carry1
+#endif
+
+#if 1
+//
+// BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
+//
+// In the nutshell it's a port of my MIPS III/IV implementation.
+//
+#define AT r14
+#define H r16
+#define HH r20
+#define L r17
+#define D r18
+#define DH r22
+#define I r21
+
+#if 0
+// Some preprocessors (most notably HP-UX) appear to be allergic to
+// macros enclosed to parenthesis [as these three were].
+#define cont p16
+#define break p0 // p20
+#define equ p24
+#else
+cont=p16
+break=p0
+equ=p24
+#endif
+
+.global abort#
+.global bn_div_words#
+.proc bn_div_words#
+.align 64
+bn_div_words:
+ .prologue
+ .save ar.pfs,r2
+{ .mii; alloc r2=ar.pfs,3,5,0,8
+ .save b0,r3
+ mov r3=b0
+ .save pr,r10
+ mov r10=pr };;
+{ .mmb; cmp.eq p6,p0=r34,r0
+ mov r8=-1
+(p6) br.ret.spnt.many b0 };;
+
+ .body
+{ .mii; mov H=r32 // save h
+ mov ar.ec=0 // don't rotate at exit
+ mov pr.rot=0 }
+{ .mii; mov L=r33 // save l
+ mov r36=r0 };;
+
+.L_divw_shift: // -vv- note signed comparison
+{ .mfi; (p0) cmp.lt p16,p0=r0,r34 // d
+ (p0) shladd r33=r34,1,r0 }
+{ .mfb; (p0) add r35=1,r36
+ (p0) nop.f 0x0
+(p16) br.wtop.dpnt .L_divw_shift };;
+
+{ .mii; mov D=r34
+ shr.u DH=r34,32
+ sub r35=64,r36 };;
+{ .mii; setf.sig f7=DH
+ shr.u AT=H,r35
+ mov I=r36 };;
+{ .mib; cmp.ne p6,p0=r0,AT
+ shl H=H,r36
+(p6) br.call.spnt.clr b0=abort };; // overflow, die...
+
+{ .mfi; fcvt.xuf.s1 f7=f7
+ shr.u AT=L,r35 };;
+{ .mii; shl L=L,r36
+ or H=H,AT };;
+
+{ .mii; nop.m 0x0
+ cmp.leu p6,p0=D,H;;
+(p6) sub H=H,D }
+
+{ .mlx; setf.sig f14=D
+ movl AT=0xffffffff };;
+///////////////////////////////////////////////////////////
+{ .mii; setf.sig f6=H
+ shr.u HH=H,32;;
+ cmp.eq p6,p7=HH,DH };;
+{ .mfb;
+(p6) setf.sig f8=AT
+(p7) fcvt.xuf.s1 f6=f6
+(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
+
+{ .mfi; getf.sig r33=f8 // q
+ xmpy.lu f9=f8,f14 }
+{ .mfi; xmpy.hu f10=f8,f14
+ shrp H=H,L,32 };;
+
+{ .mmi; getf.sig r35=f9 // tl
+ getf.sig r31=f10 };; // th
+
+.L_divw_1st_iter:
+{ .mii; (p0) add r32=-1,r33
+ (p0) cmp.eq equ,cont=HH,r31 };;
+{ .mii; (p0) cmp.ltu p8,p0=r35,D
+ (p0) sub r34=r35,D
+ (equ) cmp.leu break,cont=r35,H };;
+{ .mib; (cont) cmp.leu cont,break=HH,r31
+ (p8) add r31=-1,r31
+(cont) br.wtop.spnt .L_divw_1st_iter };;
+///////////////////////////////////////////////////////////
+{ .mii; sub H=H,r35
+ shl r8=r33,32
+ shl L=L,32 };;
+///////////////////////////////////////////////////////////
+{ .mii; setf.sig f6=H
+ shr.u HH=H,32;;
+ cmp.eq p6,p7=HH,DH };;
+{ .mfb;
+(p6) setf.sig f8=AT
+(p7) fcvt.xuf.s1 f6=f6
+(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
+
+{ .mfi; getf.sig r33=f8 // q
+ xmpy.lu f9=f8,f14 }
+{ .mfi; xmpy.hu f10=f8,f14
+ shrp H=H,L,32 };;
+
+{ .mmi; getf.sig r35=f9 // tl
+ getf.sig r31=f10 };; // th
+
+.L_divw_2nd_iter:
+{ .mii; (p0) add r32=-1,r33
+ (p0) cmp.eq equ,cont=HH,r31 };;
+{ .mii; (p0) cmp.ltu p8,p0=r35,D
+ (p0) sub r34=r35,D
+ (equ) cmp.leu break,cont=r35,H };;
+{ .mib; (cont) cmp.leu cont,break=HH,r31
+ (p8) add r31=-1,r31
+(cont) br.wtop.spnt .L_divw_2nd_iter };;
+///////////////////////////////////////////////////////////
+{ .mii; sub H=H,r35
+ or r8=r8,r33
+ mov ar.pfs=r2 };;
+{ .mii; shr.u r9=H,I // remainder if anybody wants it
+ mov pr=r10,0x1ffff }
+{ .mfb; br.ret.sptk.many b0 };;
+
+// Unsigned 64 by 32 (well, by 64 for the moment) bit integer division
+// procedure.
+//
+// inputs: f6 = (double)a, f7 = (double)b
+// output: f8 = (int)(a/b)
+// clobbered: f8,f9,f10,f11,pred
+pred=p15
+// One can argue that this snippet is copyrighted to Intel
+// Corporation, as it's essentially identical to one of those
+// found in "Divide, Square Root and Remainder" section at
+// http://www.intel.com/software/products/opensource/libraries/num.htm.
+// Yes, I admit that the referred code was used as template,
+// but after I realized that there hardly is any other instruction
+// sequence which would perform this operation. I mean I figure that
+// any independent attempt to implement high-performance division
+// will result in code virtually identical to the Intel code. It
+// should be noted though that below division kernel is 1 cycle
+// faster than Intel one (note commented splits:-), not to mention
+// original prologue (rather lack of one) and epilogue.
+.align 32
+.skip 16
+.L_udiv64_32_b6:
+ frcpa.s1 f8,pred=f6,f7;; // [0] y0 = 1 / b
+
+(pred) fnma.s1 f9=f7,f8,f1 // [5] e0 = 1 - b * y0
+(pred) fmpy.s1 f10=f6,f8;; // [5] q0 = a * y0
+(pred) fmpy.s1 f11=f9,f9 // [10] e1 = e0 * e0
+(pred) fma.s1 f10=f9,f10,f10;; // [10] q1 = q0 + e0 * q0
+(pred) fma.s1 f8=f9,f8,f8 //;; // [15] y1 = y0 + e0 * y0
+(pred) fma.s1 f9=f11,f10,f10;; // [15] q2 = q1 + e1 * q1
+(pred) fma.s1 f8=f11,f8,f8 //;; // [20] y2 = y1 + e1 * y1
+(pred) fnma.s1 f10=f7,f9,f6;; // [20] r2 = a - b * q2
+(pred) fma.s1 f8=f10,f8,f9;; // [25] q3 = q2 + r2 * y2
+
+ fcvt.fxu.trunc.s1 f8=f8 // [30] q = trunc(q3)
+ br.ret.sptk.many b6;;
+.endp bn_div_words#
+#endif
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips-mont.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips-mont.S
new file mode 100644
index 00000000..1b875a2a
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips-mont.S
@@ -0,0 +1,284 @@
+.text
+
+.set noat
+.set noreorder
+
+.align 5
+.globl bn_mul_mont
+.ent bn_mul_mont
+bn_mul_mont:
+ lw $8,16($29)
+ lw $9,20($29)
+ slt $1,$9,4
+ bnez $1,1f
+ li $2,0
+ slt $1,$9,17 # on in-order CPU
+ bnez $1,bn_mul_mont_internal
+ nop
+1: jr $31
+ li $4,0
+.end bn_mul_mont
+
+.align 5
+.ent bn_mul_mont_internal
+bn_mul_mont_internal:
+ .frame $30,14*4,$31
+ .mask 0x40000000|16711680,-4
+ sub $29,14*4
+ sw $30,(14-1)*4($29)
+ sw $23,(14-2)*4($29)
+ sw $22,(14-3)*4($29)
+ sw $21,(14-4)*4($29)
+ sw $20,(14-5)*4($29)
+ sw $19,(14-6)*4($29)
+ sw $18,(14-7)*4($29)
+ sw $17,(14-8)*4($29)
+ sw $16,(14-9)*4($29)
+ move $30,$29
+
+ .set reorder
+ lw $8,0($8)
+ lw $13,0($6) # bp[0]
+ lw $12,0($5) # ap[0]
+ lw $14,0($7) # np[0]
+
+ sub $29,2*4 # place for two extra words
+ sll $9,2
+ li $1,-4096
+ sub $29,$9
+ and $29,$1
+
+ multu $12,$13
+ lw $16,4($5)
+ lw $18,4($7)
+ mflo $10
+ mfhi $11
+ multu $10,$8
+ mflo $23
+
+ multu $16,$13
+ mflo $16
+ mfhi $17
+
+ multu $14,$23
+ mflo $24
+ mfhi $25
+ multu $18,$23
+ addu $24,$10
+ sltu $1,$24,$10
+ addu $25,$1
+ mflo $18
+ mfhi $19
+
+ move $15,$29
+ li $22,2*4
+.align 4
+.L1st:
+ .set noreorder
+ add $12,$5,$22
+ add $14,$7,$22
+ lw $12,($12)
+ lw $14,($14)
+
+ multu $12,$13
+ addu $10,$16,$11
+ addu $24,$18,$25
+ sltu $1,$10,$11
+ sltu $2,$24,$25
+ addu $11,$17,$1
+ addu $25,$19,$2
+ mflo $16
+ mfhi $17
+
+ addu $24,$10
+ sltu $1,$24,$10
+ multu $14,$23
+ addu $25,$1
+ addu $22,4
+ sw $24,($15)
+ sltu $2,$22,$9
+ mflo $18
+ mfhi $19
+
+ bnez $2,.L1st
+ add $15,4
+ .set reorder
+
+ addu $10,$16,$11
+ sltu $1,$10,$11
+ addu $11,$17,$1
+
+ addu $24,$18,$25
+ sltu $2,$24,$25
+ addu $25,$19,$2
+ addu $24,$10
+ sltu $1,$24,$10
+ addu $25,$1
+
+ sw $24,($15)
+
+ addu $25,$11
+ sltu $1,$25,$11
+ sw $25,4($15)
+ sw $1,2*4($15)
+
+ li $21,4
+.align 4
+.Louter:
+ add $13,$6,$21
+ lw $13,($13)
+ lw $12,($5)
+ lw $16,4($5)
+ lw $20,($29)
+
+ multu $12,$13
+ lw $14,($7)
+ lw $18,4($7)
+ mflo $10
+ mfhi $11
+ addu $10,$20
+ multu $10,$8
+ sltu $1,$10,$20
+ addu $11,$1
+ mflo $23
+
+ multu $16,$13
+ mflo $16
+ mfhi $17
+
+ multu $14,$23
+ mflo $24
+ mfhi $25
+
+ multu $18,$23
+ addu $24,$10
+ sltu $1,$24,$10
+ addu $25,$1
+ mflo $18
+ mfhi $19
+
+ move $15,$29
+ li $22,2*4
+ lw $20,4($15)
+.align 4
+.Linner:
+ .set noreorder
+ add $12,$5,$22
+ add $14,$7,$22
+ lw $12,($12)
+ lw $14,($14)
+
+ multu $12,$13
+ addu $10,$16,$11
+ addu $24,$18,$25
+ sltu $1,$10,$11
+ sltu $2,$24,$25
+ addu $11,$17,$1
+ addu $25,$19,$2
+ mflo $16
+ mfhi $17
+
+ addu $10,$20
+ addu $22,4
+ multu $14,$23
+ sltu $1,$10,$20
+ addu $24,$10
+ addu $11,$1
+ sltu $2,$24,$10
+ lw $20,2*4($15)
+ addu $25,$2
+ sltu $1,$22,$9
+ mflo $18
+ mfhi $19
+ sw $24,($15)
+ bnez $1,.Linner
+ add $15,4
+ .set reorder
+
+ addu $10,$16,$11
+ sltu $1,$10,$11
+ addu $11,$17,$1
+ addu $10,$20
+ sltu $2,$10,$20
+ addu $11,$2
+
+ lw $20,2*4($15)
+ addu $24,$18,$25
+ sltu $1,$24,$25
+ addu $25,$19,$1
+ addu $24,$10
+ sltu $2,$24,$10
+ addu $25,$2
+ sw $24,($15)
+
+ addu $24,$25,$11
+ sltu $25,$24,$11
+ addu $24,$20
+ sltu $1,$24,$20
+ addu $25,$1
+ sw $24,4($15)
+ sw $25,2*4($15)
+
+ addu $21,4
+ sltu $2,$21,$9
+ bnez $2,.Louter
+
+ .set noreorder
+ add $20,$29,$9 # &tp[num]
+ move $15,$29
+ move $5,$29
+ li $11,0 # clear borrow bit
+
+.align 4
+.Lsub: lw $10,($15)
+ lw $24,($7)
+ add $15,4
+ add $7,4
+ subu $24,$10,$24 # tp[i]-np[i]
+ sgtu $1,$24,$10
+ subu $10,$24,$11
+ sgtu $11,$10,$24
+ sw $10,($4)
+ or $11,$1
+ sltu $1,$15,$20
+ bnez $1,.Lsub
+ add $4,4
+
+ subu $11,$25,$11 # handle upmost overflow bit
+ move $15,$29
+ sub $4,$9 # restore rp
+ not $25,$11
+
+ and $5,$11,$29
+ and $6,$25,$4
+ or $5,$5,$6 # ap=borrow?tp:rp
+
+.align 4
+.Lcopy: lw $12,($5)
+ add $5,4
+ sw $0,($15)
+ add $15,4
+ sltu $1,$15,$20
+ sw $12,($4)
+ bnez $1,.Lcopy
+ add $4,4
+
+ li $4,1
+ li $2,1
+
+ .set noreorder
+ move $29,$30
+ lw $30,(14-1)*4($29)
+ lw $23,(14-2)*4($29)
+ lw $22,(14-3)*4($29)
+ lw $21,(14-4)*4($29)
+ lw $20,(14-5)*4($29)
+ lw $19,(14-6)*4($29)
+ lw $18,(14-7)*4($29)
+ lw $17,(14-8)*4($29)
+ lw $16,(14-9)*4($29)
+ jr $31
+ add $29,14*4
+.end bn_mul_mont_internal
+.rdata
+.asciiz "Montgomery Multiplication for MIPS, CRYPTOGAMS by <appro@openssl.org>"
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips-mont.pl
new file mode 100644
index 00000000..caae04ed
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips-mont.pl
@@ -0,0 +1,426 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# This module doesn't present direct interest for OpenSSL, because it
+# doesn't provide better performance for longer keys, at least not on
+# in-order-execution cores. While 512-bit RSA sign operations can be
+# 65% faster in 64-bit mode, 1024-bit ones are only 15% faster, and
+# 4096-bit ones are up to 15% slower. In 32-bit mode it varies from
+# 16% improvement for 512-bit RSA sign to -33% for 4096-bit RSA
+# verify:-( All comparisons are against bn_mul_mont-free assembler.
+# The module might be of interest to embedded system developers, as
+# the code is smaller than 1KB, yet offers >3x improvement on MIPS64
+# and 75-30% [less for longer keys] on MIPS32 over compiler-generated
+# code.
+
+######################################################################
+# There is a number of MIPS ABI in use, O32 and N32/64 are most
+# widely used. Then there is a new contender: NUBI. It appears that if
+# one picks the latter, it's possible to arrange code in ABI neutral
+# manner. Therefore let's stick to NUBI register layout:
+#
+($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
+($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
+#
+# The return value is placed in $a0. Following coding rules facilitate
+# interoperability:
+#
+# - never ever touch $tp, "thread pointer", former $gp;
+# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
+# old code];
+# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
+#
+# For reference here is register layout for N32/64 MIPS ABIs:
+#
+# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+#
+$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64
+
+if ($flavour =~ /64|n32/i) {
+ $PTR_ADD="dadd"; # incidentally works even on n32
+ $PTR_SUB="dsub"; # incidentally works even on n32
+ $REG_S="sd";
+ $REG_L="ld";
+ $SZREG=8;
+} else {
+ $PTR_ADD="add";
+ $PTR_SUB="sub";
+ $REG_S="sw";
+ $REG_L="lw";
+ $SZREG=4;
+}
+$SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0x00fff000 : 0x00ff0000;
+#
+# <appro@openssl.org>
+#
+######################################################################
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+if ($flavour =~ /64|n32/i) {
+ $LD="ld";
+ $ST="sd";
+ $MULTU="dmultu";
+ $ADDU="daddu";
+ $SUBU="dsubu";
+ $BNSZ=8;
+} else {
+ $LD="lw";
+ $ST="sw";
+ $MULTU="multu";
+ $ADDU="addu";
+ $SUBU="subu";
+ $BNSZ=4;
+}
+
+# int bn_mul_mont(
+$rp=$a0; # BN_ULONG *rp,
+$ap=$a1; # const BN_ULONG *ap,
+$bp=$a2; # const BN_ULONG *bp,
+$np=$a3; # const BN_ULONG *np,
+$n0=$a4; # const BN_ULONG *n0,
+$num=$a5; # int num);
+
+$lo0=$a6;
+$hi0=$a7;
+$lo1=$t1;
+$hi1=$t2;
+$aj=$s0;
+$bi=$s1;
+$nj=$s2;
+$tp=$s3;
+$alo=$s4;
+$ahi=$s5;
+$nlo=$s6;
+$nhi=$s7;
+$tj=$s8;
+$i=$s9;
+$j=$s10;
+$m1=$s11;
+
+$FRAMESIZE=14;
+
+$code=<<___;
+.text
+
+.set noat
+.set noreorder
+
+.align 5
+.globl bn_mul_mont
+.ent bn_mul_mont
+bn_mul_mont:
+___
+$code.=<<___ if ($flavour =~ /o32/i);
+ lw $n0,16($sp)
+ lw $num,20($sp)
+___
+$code.=<<___;
+ slt $at,$num,4
+ bnez $at,1f
+ li $t0,0
+ slt $at,$num,17 # on in-order CPU
+ bnez $at,bn_mul_mont_internal
+ nop
+1: jr $ra
+ li $a0,0
+.end bn_mul_mont
+
+.align 5
+.ent bn_mul_mont_internal
+bn_mul_mont_internal:
+ .frame $fp,$FRAMESIZE*$SZREG,$ra
+ .mask 0x40000000|$SAVED_REGS_MASK,-$SZREG
+ $PTR_SUB $sp,$FRAMESIZE*$SZREG
+ $REG_S $fp,($FRAMESIZE-1)*$SZREG($sp)
+ $REG_S $s11,($FRAMESIZE-2)*$SZREG($sp)
+ $REG_S $s10,($FRAMESIZE-3)*$SZREG($sp)
+ $REG_S $s9,($FRAMESIZE-4)*$SZREG($sp)
+ $REG_S $s8,($FRAMESIZE-5)*$SZREG($sp)
+ $REG_S $s7,($FRAMESIZE-6)*$SZREG($sp)
+ $REG_S $s6,($FRAMESIZE-7)*$SZREG($sp)
+ $REG_S $s5,($FRAMESIZE-8)*$SZREG($sp)
+ $REG_S $s4,($FRAMESIZE-9)*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_S $s3,($FRAMESIZE-10)*$SZREG($sp)
+ $REG_S $s2,($FRAMESIZE-11)*$SZREG($sp)
+ $REG_S $s1,($FRAMESIZE-12)*$SZREG($sp)
+ $REG_S $s0,($FRAMESIZE-13)*$SZREG($sp)
+___
+$code.=<<___;
+ move $fp,$sp
+
+ .set reorder
+ $LD $n0,0($n0)
+ $LD $bi,0($bp) # bp[0]
+ $LD $aj,0($ap) # ap[0]
+ $LD $nj,0($np) # np[0]
+
+ $PTR_SUB $sp,2*$BNSZ # place for two extra words
+ sll $num,`log($BNSZ)/log(2)`
+ li $at,-4096
+ $PTR_SUB $sp,$num
+ and $sp,$at
+
+ $MULTU $aj,$bi
+ $LD $alo,$BNSZ($ap)
+ $LD $nlo,$BNSZ($np)
+ mflo $lo0
+ mfhi $hi0
+ $MULTU $lo0,$n0
+ mflo $m1
+
+ $MULTU $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ $MULTU $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+ $MULTU $nlo,$m1
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,$sp
+ li $j,2*$BNSZ
+.align 4
+.L1st:
+ .set noreorder
+ $PTR_ADD $aj,$ap,$j
+ $PTR_ADD $nj,$np,$j
+ $LD $aj,($aj)
+ $LD $nj,($nj)
+
+ $MULTU $aj,$bi
+ $ADDU $lo0,$alo,$hi0
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo0,$hi0
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi0,$ahi,$at
+ $ADDU $hi1,$nhi,$t0
+ mflo $alo
+ mfhi $ahi
+
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $MULTU $nj,$m1
+ $ADDU $hi1,$at
+ addu $j,$BNSZ
+ $ST $lo1,($tp)
+ sltu $t0,$j,$num
+ mflo $nlo
+ mfhi $nhi
+
+ bnez $t0,.L1st
+ $PTR_ADD $tp,$BNSZ
+ .set reorder
+
+ $ADDU $lo0,$alo,$hi0
+ sltu $at,$lo0,$hi0
+ $ADDU $hi0,$ahi,$at
+
+ $ADDU $lo1,$nlo,$hi1
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi1,$nhi,$t0
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+
+ $ST $lo1,($tp)
+
+ $ADDU $hi1,$hi0
+ sltu $at,$hi1,$hi0
+ $ST $hi1,$BNSZ($tp)
+ $ST $at,2*$BNSZ($tp)
+
+ li $i,$BNSZ
+.align 4
+.Louter:
+ $PTR_ADD $bi,$bp,$i
+ $LD $bi,($bi)
+ $LD $aj,($ap)
+ $LD $alo,$BNSZ($ap)
+ $LD $tj,($sp)
+
+ $MULTU $aj,$bi
+ $LD $nj,($np)
+ $LD $nlo,$BNSZ($np)
+ mflo $lo0
+ mfhi $hi0
+ $ADDU $lo0,$tj
+ $MULTU $lo0,$n0
+ sltu $at,$lo0,$tj
+ $ADDU $hi0,$at
+ mflo $m1
+
+ $MULTU $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ $MULTU $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+
+ $MULTU $nlo,$m1
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,$sp
+ li $j,2*$BNSZ
+ $LD $tj,$BNSZ($tp)
+.align 4
+.Linner:
+ .set noreorder
+ $PTR_ADD $aj,$ap,$j
+ $PTR_ADD $nj,$np,$j
+ $LD $aj,($aj)
+ $LD $nj,($nj)
+
+ $MULTU $aj,$bi
+ $ADDU $lo0,$alo,$hi0
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo0,$hi0
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi0,$ahi,$at
+ $ADDU $hi1,$nhi,$t0
+ mflo $alo
+ mfhi $ahi
+
+ $ADDU $lo0,$tj
+ addu $j,$BNSZ
+ $MULTU $nj,$m1
+ sltu $at,$lo0,$tj
+ $ADDU $lo1,$lo0
+ $ADDU $hi0,$at
+ sltu $t0,$lo1,$lo0
+ $LD $tj,2*$BNSZ($tp)
+ $ADDU $hi1,$t0
+ sltu $at,$j,$num
+ mflo $nlo
+ mfhi $nhi
+ $ST $lo1,($tp)
+ bnez $at,.Linner
+ $PTR_ADD $tp,$BNSZ
+ .set reorder
+
+ $ADDU $lo0,$alo,$hi0
+ sltu $at,$lo0,$hi0
+ $ADDU $hi0,$ahi,$at
+ $ADDU $lo0,$tj
+ sltu $t0,$lo0,$tj
+ $ADDU $hi0,$t0
+
+ $LD $tj,2*$BNSZ($tp)
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo1,$hi1
+ $ADDU $hi1,$nhi,$at
+ $ADDU $lo1,$lo0
+ sltu $t0,$lo1,$lo0
+ $ADDU $hi1,$t0
+ $ST $lo1,($tp)
+
+ $ADDU $lo1,$hi1,$hi0
+ sltu $hi1,$lo1,$hi0
+ $ADDU $lo1,$tj
+ sltu $at,$lo1,$tj
+ $ADDU $hi1,$at
+ $ST $lo1,$BNSZ($tp)
+ $ST $hi1,2*$BNSZ($tp)
+
+ addu $i,$BNSZ
+ sltu $t0,$i,$num
+ bnez $t0,.Louter
+
+ .set noreorder
+ $PTR_ADD $tj,$sp,$num # &tp[num]
+ move $tp,$sp
+ move $ap,$sp
+ li $hi0,0 # clear borrow bit
+
+.align 4
+.Lsub: $LD $lo0,($tp)
+ $LD $lo1,($np)
+ $PTR_ADD $tp,$BNSZ
+ $PTR_ADD $np,$BNSZ
+ $SUBU $lo1,$lo0,$lo1 # tp[i]-np[i]
+ sgtu $at,$lo1,$lo0
+ $SUBU $lo0,$lo1,$hi0
+ sgtu $hi0,$lo0,$lo1
+ $ST $lo0,($rp)
+ or $hi0,$at
+ sltu $at,$tp,$tj
+ bnez $at,.Lsub
+ $PTR_ADD $rp,$BNSZ
+
+ $SUBU $hi0,$hi1,$hi0 # handle upmost overflow bit
+ move $tp,$sp
+ $PTR_SUB $rp,$num # restore rp
+ not $hi1,$hi0
+
+ and $ap,$hi0,$sp
+ and $bp,$hi1,$rp
+ or $ap,$ap,$bp # ap=borrow?tp:rp
+
+.align 4
+.Lcopy: $LD $aj,($ap)
+ $PTR_ADD $ap,$BNSZ
+ $ST $zero,($tp)
+ $PTR_ADD $tp,$BNSZ
+ sltu $at,$tp,$tj
+ $ST $aj,($rp)
+ bnez $at,.Lcopy
+ $PTR_ADD $rp,$BNSZ
+
+ li $a0,1
+ li $t0,1
+
+ .set noreorder
+ move $sp,$fp
+ $REG_L $fp,($FRAMESIZE-1)*$SZREG($sp)
+ $REG_L $s11,($FRAMESIZE-2)*$SZREG($sp)
+ $REG_L $s10,($FRAMESIZE-3)*$SZREG($sp)
+ $REG_L $s9,($FRAMESIZE-4)*$SZREG($sp)
+ $REG_L $s8,($FRAMESIZE-5)*$SZREG($sp)
+ $REG_L $s7,($FRAMESIZE-6)*$SZREG($sp)
+ $REG_L $s6,($FRAMESIZE-7)*$SZREG($sp)
+ $REG_L $s5,($FRAMESIZE-8)*$SZREG($sp)
+ $REG_L $s4,($FRAMESIZE-9)*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $s3,($FRAMESIZE-10)*$SZREG($sp)
+ $REG_L $s2,($FRAMESIZE-11)*$SZREG($sp)
+ $REG_L $s1,($FRAMESIZE-12)*$SZREG($sp)
+ $REG_L $s0,($FRAMESIZE-13)*$SZREG($sp)
+___
+$code.=<<___;
+ jr $ra
+ $PTR_ADD $sp,$FRAMESIZE*$SZREG
+.end bn_mul_mont_internal
+.rdata
+.asciiz "Montgomery Multiplication for MIPS, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips.pl
new file mode 100644
index 00000000..d2f3ef7b
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips.pl
@@ -0,0 +1,2583 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project.
+#
+# Rights for redistribution and usage in source and binary forms are
+# granted according to the OpenSSL license. Warranty of any kind is
+# disclaimed.
+# ====================================================================
+
+
+# July 1999
+#
+# This is drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c.
+#
+# The module is designed to work with either of the "new" MIPS ABI(5),
+# namely N32 or N64, offered by IRIX 6.x. It's not ment to work under
+# IRIX 5.x not only because it doesn't support new ABIs but also
+# because 5.x kernels put R4x00 CPU into 32-bit mode and all those
+# 64-bit instructions (daddu, dmultu, etc.) found below gonna only
+# cause illegal instruction exception:-(
+#
+# In addition the code depends on preprocessor flags set up by MIPSpro
+# compiler driver (either as or cc) and therefore (probably?) can't be
+# compiled by the GNU assembler. GNU C driver manages fine though...
+# I mean as long as -mmips-as is specified or is the default option,
+# because then it simply invokes /usr/bin/as which in turn takes
+# perfect care of the preprocessor definitions. Another neat feature
+# offered by the MIPSpro assembler is an optimization pass. This gave
+# me the opportunity to have the code looking more regular as all those
+# architecture dependent instruction rescheduling details were left to
+# the assembler. Cool, huh?
+#
+# Performance improvement is astonishing! 'apps/openssl speed rsa dsa'
+# goes way over 3 times faster!
+#
+# <appro@fy.chalmers.se>
+
+# October 2010
+#
+# Adapt the module even for 32-bit ABIs and other OSes. The former was
+# achieved by mechanical replacement of 64-bit arithmetic instructions
+# such as dmultu, daddu, etc. with their 32-bit counterparts and
+# adjusting offsets denoting multiples of BN_ULONG. Above mentioned
+# >3x performance improvement naturally does not apply to 32-bit code
+# [because there is no instruction 32-bit compiler can't use], one
+# has to content with 40-85% improvement depending on benchmark and
+# key length, more for longer keys.
+
+$flavour = shift;
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+if ($flavour =~ /64|n32/i) {
+ $LD="ld";
+ $ST="sd";
+ $MULTU="dmultu";
+ $DIVU="ddivu";
+ $ADDU="daddu";
+ $SUBU="dsubu";
+ $SRL="dsrl";
+ $SLL="dsll";
+ $BNSZ=8;
+ $PTR_ADD="daddu";
+ $PTR_SUB="dsubu";
+ $SZREG=8;
+ $REG_S="sd";
+ $REG_L="ld";
+} else {
+ $LD="lw";
+ $ST="sw";
+ $MULTU="multu";
+ $DIVU="divu";
+ $ADDU="addu";
+ $SUBU="subu";
+ $SRL="srl";
+ $SLL="sll";
+ $BNSZ=4;
+ $PTR_ADD="addu";
+ $PTR_SUB="subu";
+ $SZREG=4;
+ $REG_S="sw";
+ $REG_L="lw";
+ $code=".set mips2\n";
+}
+
+# Below is N32/64 register layout used in the original module.
+#
+($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+($ta0,$ta1,$ta2,$ta3)=($a4,$a5,$a6,$a7);
+#
+# No special adaptation is required for O32. NUBI on the other hand
+# is treated by saving/restoring ($v1,$t0..$t3).
+
+$gp=$v1 if ($flavour =~ /nubi/i);
+
+$minus4=$v1;
+
+$code.=<<___;
+.rdata
+.asciiz "mips3.s, Version 1.2"
+.asciiz "MIPS II/III/IV ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>"
+
+.text
+.set noat
+
+.align 5
+.globl bn_mul_add_words
+.ent bn_mul_add_words
+bn_mul_add_words:
+ .set noreorder
+ bgtz $a2,bn_mul_add_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_mul_add_words
+
+.align 5
+.ent bn_mul_add_words_internal
+bn_mul_add_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ beqz $ta0,.L_bn_mul_add_words_tail
+
+.L_bn_mul_add_words_loop:
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ $LD $t1,0($a0)
+ $LD $t2,$BNSZ($a1)
+ $LD $t3,$BNSZ($a0)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta1,2*$BNSZ($a0)
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0 # All manuals say it "compares 32-bit
+ # values", but it seems to work fine
+ # even on 64-bit registers.
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ $MULTU $t2,$a3
+ sltu $at,$t1,$at
+ $ST $t1,0($a0)
+ $ADDU $v0,$at
+
+ $LD $ta2,3*$BNSZ($a1)
+ $LD $ta3,3*$BNSZ($a0)
+ $ADDU $t3,$v0
+ sltu $v0,$t3,$v0
+ mflo $at
+ mfhi $t2
+ $ADDU $t3,$at
+ $ADDU $v0,$t2
+ $MULTU $ta0,$a3
+ sltu $at,$t3,$at
+ $ST $t3,$BNSZ($a0)
+ $ADDU $v0,$at
+
+ subu $a2,4
+ $PTR_ADD $a0,4*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ $ADDU $ta1,$v0
+ sltu $v0,$ta1,$v0
+ mflo $at
+ mfhi $ta0
+ $ADDU $ta1,$at
+ $ADDU $v0,$ta0
+ $MULTU $ta2,$a3
+ sltu $at,$ta1,$at
+ $ST $ta1,-2*$BNSZ($a0)
+ $ADDU $v0,$at
+
+
+ and $ta0,$a2,$minus4
+ $ADDU $ta3,$v0
+ sltu $v0,$ta3,$v0
+ mflo $at
+ mfhi $ta2
+ $ADDU $ta3,$at
+ $ADDU $v0,$ta2
+ sltu $at,$ta3,$at
+ $ST $ta3,-$BNSZ($a0)
+ .set noreorder
+ bgtz $ta0,.L_bn_mul_add_words_loop
+ $ADDU $v0,$at
+
+ beqz $a2,.L_bn_mul_add_words_return
+ nop
+
+.L_bn_mul_add_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ $LD $t1,0($a0)
+ subu $a2,1
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,0($a0)
+ $ADDU $v0,$at
+ beqz $a2,.L_bn_mul_add_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$a3
+ $LD $t1,$BNSZ($a0)
+ subu $a2,1
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$at
+ beqz $a2,.L_bn_mul_add_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$a3
+ $LD $t1,2*$BNSZ($a0)
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,2*$BNSZ($a0)
+ $ADDU $v0,$at
+
+.L_bn_mul_add_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_mul_add_words_internal
+
+.align 5
+.globl bn_mul_words
+.ent bn_mul_words
+bn_mul_words:
+ .set noreorder
+ bgtz $a2,bn_mul_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_mul_words
+
+.align 5
+.ent bn_mul_words_internal
+bn_mul_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ beqz $ta0,.L_bn_mul_words_tail
+
+.L_bn_mul_words_loop:
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ $LD $t2,$BNSZ($a1)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta2,3*$BNSZ($a1)
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $MULTU $t2,$a3
+ $ST $v0,0($a0)
+ $ADDU $v0,$t1,$t0
+
+ subu $a2,4
+ $PTR_ADD $a0,4*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ mflo $at
+ mfhi $t2
+ $ADDU $v0,$at
+ sltu $t3,$v0,$at
+ $MULTU $ta0,$a3
+ $ST $v0,-3*$BNSZ($a0)
+ $ADDU $v0,$t3,$t2
+
+ mflo $at
+ mfhi $ta0
+ $ADDU $v0,$at
+ sltu $ta1,$v0,$at
+ $MULTU $ta2,$a3
+ $ST $v0,-2*$BNSZ($a0)
+ $ADDU $v0,$ta1,$ta0
+
+ and $ta0,$a2,$minus4
+ mflo $at
+ mfhi $ta2
+ $ADDU $v0,$at
+ sltu $ta3,$v0,$at
+ $ST $v0,-$BNSZ($a0)
+ .set noreorder
+ bgtz $ta0,.L_bn_mul_words_loop
+ $ADDU $v0,$ta3,$ta2
+
+ beqz $a2,.L_bn_mul_words_return
+ nop
+
+.L_bn_mul_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ subu $a2,1
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,0($a0)
+ $ADDU $v0,$t1,$t0
+ beqz $a2,.L_bn_mul_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$a3
+ subu $a2,1
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,$BNSZ($a0)
+ $ADDU $v0,$t1,$t0
+ beqz $a2,.L_bn_mul_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$a3
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,2*$BNSZ($a0)
+ $ADDU $v0,$t1,$t0
+
+.L_bn_mul_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_mul_words_internal
+
+.align 5
+.globl bn_sqr_words
+.ent bn_sqr_words
+bn_sqr_words:
+ .set noreorder
+ bgtz $a2,bn_sqr_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_sqr_words
+
+.align 5
+.ent bn_sqr_words_internal
+bn_sqr_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ beqz $ta0,.L_bn_sqr_words_tail
+
+.L_bn_sqr_words_loop:
+ $LD $t0,0($a1)
+ $MULTU $t0,$t0
+ $LD $t2,$BNSZ($a1)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta2,3*$BNSZ($a1)
+ mflo $t1
+ mfhi $t0
+ $ST $t1,0($a0)
+ $ST $t0,$BNSZ($a0)
+
+ $MULTU $t2,$t2
+ subu $a2,4
+ $PTR_ADD $a0,8*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ mflo $t3
+ mfhi $t2
+ $ST $t3,-6*$BNSZ($a0)
+ $ST $t2,-5*$BNSZ($a0)
+
+ $MULTU $ta0,$ta0
+ mflo $ta1
+ mfhi $ta0
+ $ST $ta1,-4*$BNSZ($a0)
+ $ST $ta0,-3*$BNSZ($a0)
+
+
+ $MULTU $ta2,$ta2
+ and $ta0,$a2,$minus4
+ mflo $ta3
+ mfhi $ta2
+ $ST $ta3,-2*$BNSZ($a0)
+
+ .set noreorder
+ bgtz $ta0,.L_bn_sqr_words_loop
+ $ST $ta2,-$BNSZ($a0)
+
+ beqz $a2,.L_bn_sqr_words_return
+ nop
+
+.L_bn_sqr_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$t0
+ subu $a2,1
+ mflo $t1
+ mfhi $t0
+ $ST $t1,0($a0)
+ $ST $t0,$BNSZ($a0)
+ beqz $a2,.L_bn_sqr_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$t0
+ subu $a2,1
+ mflo $t1
+ mfhi $t0
+ $ST $t1,2*$BNSZ($a0)
+ $ST $t0,3*$BNSZ($a0)
+ beqz $a2,.L_bn_sqr_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$t0
+ mflo $t1
+ mfhi $t0
+ $ST $t1,4*$BNSZ($a0)
+ $ST $t0,5*$BNSZ($a0)
+
+.L_bn_sqr_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+
+.end bn_sqr_words_internal
+
+.align 5
+.globl bn_add_words
+.ent bn_add_words
+bn_add_words:
+ .set noreorder
+ bgtz $a3,bn_add_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_add_words
+
+.align 5
+.ent bn_add_words_internal
+bn_add_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $at,$a3,$minus4
+ beqz $at,.L_bn_add_words_tail
+
+.L_bn_add_words_loop:
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ subu $a3,4
+ $LD $t1,$BNSZ($a1)
+ and $at,$a3,$minus4
+ $LD $t2,2*$BNSZ($a1)
+ $PTR_ADD $a2,4*$BNSZ
+ $LD $t3,3*$BNSZ($a1)
+ $PTR_ADD $a0,4*$BNSZ
+ $LD $ta1,-3*$BNSZ($a2)
+ $PTR_ADD $a1,4*$BNSZ
+ $LD $ta2,-2*$BNSZ($a2)
+ $LD $ta3,-$BNSZ($a2)
+ $ADDU $ta0,$t0
+ sltu $t8,$ta0,$t0
+ $ADDU $t0,$ta0,$v0
+ sltu $v0,$t0,$ta0
+ $ST $t0,-4*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ $ADDU $ta1,$t1
+ sltu $t9,$ta1,$t1
+ $ADDU $t1,$ta1,$v0
+ sltu $v0,$t1,$ta1
+ $ST $t1,-3*$BNSZ($a0)
+ $ADDU $v0,$t9
+
+ $ADDU $ta2,$t2
+ sltu $t8,$ta2,$t2
+ $ADDU $t2,$ta2,$v0
+ sltu $v0,$t2,$ta2
+ $ST $t2,-2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ $ADDU $ta3,$t3
+ sltu $t9,$ta3,$t3
+ $ADDU $t3,$ta3,$v0
+ sltu $v0,$t3,$ta3
+ $ST $t3,-$BNSZ($a0)
+
+ .set noreorder
+ bgtz $at,.L_bn_add_words_loop
+ $ADDU $v0,$t9
+
+ beqz $a3,.L_bn_add_words_return
+ nop
+
+.L_bn_add_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ $ADDU $ta0,$t0
+ subu $a3,1
+ sltu $t8,$ta0,$t0
+ $ADDU $t0,$ta0,$v0
+ sltu $v0,$t0,$ta0
+ $ST $t0,0($a0)
+ $ADDU $v0,$t8
+ beqz $a3,.L_bn_add_words_return
+
+ $LD $t1,$BNSZ($a1)
+ $LD $ta1,$BNSZ($a2)
+ $ADDU $ta1,$t1
+ subu $a3,1
+ sltu $t9,$ta1,$t1
+ $ADDU $t1,$ta1,$v0
+ sltu $v0,$t1,$ta1
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$t9
+ beqz $a3,.L_bn_add_words_return
+
+ $LD $t2,2*$BNSZ($a1)
+ $LD $ta2,2*$BNSZ($a2)
+ $ADDU $ta2,$t2
+ sltu $t8,$ta2,$t2
+ $ADDU $t2,$ta2,$v0
+ sltu $v0,$t2,$ta2
+ $ST $t2,2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+.L_bn_add_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+
+.end bn_add_words_internal
+
+.align 5
+.globl bn_sub_words
+.ent bn_sub_words
+bn_sub_words:
+ .set noreorder
+ bgtz $a3,bn_sub_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$zero
+.end bn_sub_words
+
+.align 5
+.ent bn_sub_words_internal
+bn_sub_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $at,$a3,$minus4
+ beqz $at,.L_bn_sub_words_tail
+
+.L_bn_sub_words_loop:
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ subu $a3,4
+ $LD $t1,$BNSZ($a1)
+ and $at,$a3,$minus4
+ $LD $t2,2*$BNSZ($a1)
+ $PTR_ADD $a2,4*$BNSZ
+ $LD $t3,3*$BNSZ($a1)
+ $PTR_ADD $a0,4*$BNSZ
+ $LD $ta1,-3*$BNSZ($a2)
+ $PTR_ADD $a1,4*$BNSZ
+ $LD $ta2,-2*$BNSZ($a2)
+ $LD $ta3,-$BNSZ($a2)
+ sltu $t8,$t0,$ta0
+ $SUBU $ta0,$t0,$ta0
+ $SUBU $t0,$ta0,$v0
+ sgtu $v0,$t0,$ta0
+ $ST $t0,-4*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ sltu $t9,$t1,$ta1
+ $SUBU $ta1,$t1,$ta1
+ $SUBU $t1,$ta1,$v0
+ sgtu $v0,$t1,$ta1
+ $ST $t1,-3*$BNSZ($a0)
+ $ADDU $v0,$t9
+
+
+ sltu $t8,$t2,$ta2
+ $SUBU $ta2,$t2,$ta2
+ $SUBU $t2,$ta2,$v0
+ sgtu $v0,$t2,$ta2
+ $ST $t2,-2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ sltu $t9,$t3,$ta3
+ $SUBU $ta3,$t3,$ta3
+ $SUBU $t3,$ta3,$v0
+ sgtu $v0,$t3,$ta3
+ $ST $t3,-$BNSZ($a0)
+
+ .set noreorder
+ bgtz $at,.L_bn_sub_words_loop
+ $ADDU $v0,$t9
+
+ beqz $a3,.L_bn_sub_words_return
+ nop
+
+.L_bn_sub_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ subu $a3,1
+ sltu $t8,$t0,$ta0
+ $SUBU $ta0,$t0,$ta0
+ $SUBU $t0,$ta0,$v0
+ sgtu $v0,$t0,$ta0
+ $ST $t0,0($a0)
+ $ADDU $v0,$t8
+ beqz $a3,.L_bn_sub_words_return
+
+ $LD $t1,$BNSZ($a1)
+ subu $a3,1
+ $LD $ta1,$BNSZ($a2)
+ sltu $t9,$t1,$ta1
+ $SUBU $ta1,$t1,$ta1
+ $SUBU $t1,$ta1,$v0
+ sgtu $v0,$t1,$ta1
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$t9
+ beqz $a3,.L_bn_sub_words_return
+
+ $LD $t2,2*$BNSZ($a1)
+ $LD $ta2,2*$BNSZ($a2)
+ sltu $t8,$t2,$ta2
+ $SUBU $ta2,$t2,$ta2
+ $SUBU $t2,$ta2,$v0
+ sgtu $v0,$t2,$ta2
+ $ST $t2,2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+.L_bn_sub_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_sub_words_internal
+
+.align 5
+.globl bn_div_3_words
+.ent bn_div_3_words
+bn_div_3_words:
+ .set noreorder
+ move $a3,$a0 # we know that bn_div_words does not
+ # touch $a3, $ta2, $ta3 and preserves $a2
+ # so that we can save two arguments
+ # and return address in registers
+ # instead of stack:-)
+
+ $LD $a0,($a3)
+ move $ta2,$a1
+ bne $a0,$a2,bn_div_3_words_internal
+ $LD $a1,-$BNSZ($a3)
+ li $v0,-1
+ jr $ra
+ move $a0,$v0
+.end bn_div_3_words
+
+.align 5
+.ent bn_div_3_words_internal
+bn_div_3_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ move $ta3,$ra
+ bal bn_div_words_internal
+ move $ra,$ta3
+ $MULTU $ta2,$v0
+ $LD $t2,-2*$BNSZ($a3)
+ move $ta0,$zero
+ mfhi $t1
+ mflo $t0
+ sltu $t8,$t1,$a1
+.L_bn_div_3_words_inner_loop:
+ bnez $t8,.L_bn_div_3_words_inner_loop_done
+ sgeu $at,$t2,$t0
+ seq $t9,$t1,$a1
+ and $at,$t9
+ sltu $t3,$t0,$ta2
+ $ADDU $a1,$a2
+ $SUBU $t1,$t3
+ $SUBU $t0,$ta2
+ sltu $t8,$t1,$a1
+ sltu $ta0,$a1,$a2
+ or $t8,$ta0
+ .set noreorder
+ beqz $at,.L_bn_div_3_words_inner_loop
+ $SUBU $v0,1
+ $ADDU $v0,1
+ .set reorder
+.L_bn_div_3_words_inner_loop_done:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_div_3_words_internal
+
+.align 5
+.globl bn_div_words
+.ent bn_div_words
+bn_div_words:
+ .set noreorder
+ bnez $a2,bn_div_words_internal
+ li $v0,-1 # I would rather signal div-by-zero
+ # which can be done with 'break 7'
+ jr $ra
+ move $a0,$v0
+.end bn_div_words
+
+.align 5
+.ent bn_div_words_internal
+bn_div_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ move $v1,$zero
+ bltz $a2,.L_bn_div_words_body
+ move $t9,$v1
+ $SLL $a2,1
+ bgtz $a2,.-4
+ addu $t9,1
+
+ .set reorder
+ negu $t1,$t9
+ li $t2,-1
+ $SLL $t2,$t1
+ and $t2,$a0
+ $SRL $at,$a1,$t1
+ .set noreorder
+ beqz $t2,.+12
+ nop
+ break 6 # signal overflow
+ .set reorder
+ $SLL $a0,$t9
+ $SLL $a1,$t9
+ or $a0,$at
+___
+$QT=$ta0;
+$HH=$ta1;
+$DH=$v1;
+$code.=<<___;
+.L_bn_div_words_body:
+ $SRL $DH,$a2,4*$BNSZ # bits
+ sgeu $at,$a0,$a2
+ .set noreorder
+ beqz $at,.+12
+ nop
+ $SUBU $a0,$a2
+ .set reorder
+
+ li $QT,-1
+ $SRL $HH,$a0,4*$BNSZ # bits
+ $SRL $QT,4*$BNSZ # q=0xffffffff
+ beq $DH,$HH,.L_bn_div_words_skip_div1
+ $DIVU $zero,$a0,$DH
+ mflo $QT
+.L_bn_div_words_skip_div1:
+ $MULTU $a2,$QT
+ $SLL $t3,$a0,4*$BNSZ # bits
+ $SRL $at,$a1,4*$BNSZ # bits
+ or $t3,$at
+ mflo $t0
+ mfhi $t1
+.L_bn_div_words_inner_loop1:
+ sltu $t2,$t3,$t0
+ seq $t8,$HH,$t1
+ sltu $at,$HH,$t1
+ and $t2,$t8
+ sltu $v0,$t0,$a2
+ or $at,$t2
+ .set noreorder
+ beqz $at,.L_bn_div_words_inner_loop1_done
+ $SUBU $t1,$v0
+ $SUBU $t0,$a2
+ b .L_bn_div_words_inner_loop1
+ $SUBU $QT,1
+ .set reorder
+.L_bn_div_words_inner_loop1_done:
+
+ $SLL $a1,4*$BNSZ # bits
+ $SUBU $a0,$t3,$t0
+ $SLL $v0,$QT,4*$BNSZ # bits
+
+ li $QT,-1
+ $SRL $HH,$a0,4*$BNSZ # bits
+ $SRL $QT,4*$BNSZ # q=0xffffffff
+ beq $DH,$HH,.L_bn_div_words_skip_div2
+ $DIVU $zero,$a0,$DH
+ mflo $QT
+.L_bn_div_words_skip_div2:
+ $MULTU $a2,$QT
+ $SLL $t3,$a0,4*$BNSZ # bits
+ $SRL $at,$a1,4*$BNSZ # bits
+ or $t3,$at
+ mflo $t0
+ mfhi $t1
+.L_bn_div_words_inner_loop2:
+ sltu $t2,$t3,$t0
+ seq $t8,$HH,$t1
+ sltu $at,$HH,$t1
+ and $t2,$t8
+ sltu $v1,$t0,$a2
+ or $at,$t2
+ .set noreorder
+ beqz $at,.L_bn_div_words_inner_loop2_done
+ $SUBU $t1,$v1
+ $SUBU $t0,$a2
+ b .L_bn_div_words_inner_loop2
+ $SUBU $QT,1
+ .set reorder
+.L_bn_div_words_inner_loop2_done:
+
+ $SUBU $a0,$t3,$t0
+ or $v0,$QT
+ $SRL $v1,$a0,$t9 # $v1 contains remainder if anybody wants it
+ $SRL $a2,$t9 # restore $a2
+
+ .set noreorder
+ move $a1,$v1
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_div_words_internal
+___
+undef $HH; undef $QT; undef $DH;
+
+($a_0,$a_1,$a_2,$a_3)=($t0,$t1,$t2,$t3);
+($b_0,$b_1,$b_2,$b_3)=($ta0,$ta1,$ta2,$ta3);
+
+($a_4,$a_5,$a_6,$a_7)=($s0,$s2,$s4,$a1); # once we load a[7], no use for $a1
+($b_4,$b_5,$b_6,$b_7)=($s1,$s3,$s5,$a2); # once we load b[7], no use for $a2
+
+($t_1,$t_2,$c_1,$c_2,$c_3)=($t8,$t9,$v0,$v1,$a3);
+
+$code.=<<___;
+
+.align 5
+.globl bn_mul_comba8
+.ent bn_mul_comba8
+bn_mul_comba8:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,12*$SZREG,$ra
+ .mask 0x803ff008,-$SZREG
+ $PTR_SUB $sp,12*$SZREG
+ $REG_S $ra,11*$SZREG($sp)
+ $REG_S $s5,10*$SZREG($sp)
+ $REG_S $s4,9*$SZREG($sp)
+ $REG_S $s3,8*$SZREG($sp)
+ $REG_S $s2,7*$SZREG($sp)
+ $REG_S $s1,6*$SZREG($sp)
+ $REG_S $s0,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x003f0000,-$SZREG
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $s5,5*$SZREG($sp)
+ $REG_S $s4,4*$SZREG($sp)
+ $REG_S $s3,3*$SZREG($sp)
+ $REG_S $s2,2*$SZREG($sp)
+ $REG_S $s1,1*$SZREG($sp)
+ $REG_S $s0,0*$SZREG($sp)
+___
+$code.=<<___;
+
+ .set reorder
+ $LD $a_0,0($a1) # If compiled with -mips3 option on
+ # R5000 box assembler barks on this
+ # 1ine with "should not have mult/div
+ # as last instruction in bb (R10K
+ # bug)" warning. If anybody out there
+ # has a clue about how to circumvent
+ # this do send me a note.
+ # <appro\@fy.chalmers.se>
+
+ $LD $b_0,0($a2)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $MULTU $a_0,$b_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_3,3*$BNSZ($a1)
+ $LD $b_1,$BNSZ($a2)
+ $LD $b_2,2*$BNSZ($a2)
+ $LD $b_3,3*$BNSZ($a2)
+ mflo $c_1
+ mfhi $c_2
+
+ $LD $a_4,4*$BNSZ($a1)
+ $LD $a_5,5*$BNSZ($a1)
+ $MULTU $a_0,$b_1 # mul_add_c(a[0],b[1],c2,c3,c1);
+ $LD $a_6,6*$BNSZ($a1)
+ $LD $a_7,7*$BNSZ($a1)
+ $LD $b_4,4*$BNSZ($a2)
+ $LD $b_5,5*$BNSZ($a2)
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_0 # mul_add_c(a[1],b[0],c2,c3,c1);
+ $ADDU $c_3,$t_2,$at
+ $LD $b_6,6*$BNSZ($a2)
+ $LD $b_7,7*$BNSZ($a2)
+ $ST $c_1,0($a0) # r[0]=c1;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_0 # mul_add_c(a[2],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ $ST $c_2,$BNSZ($a0) # r[1]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_2 # mul_add_c(a[0],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_3 # mul_add_c(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0) # r[2]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_2 # mul_add_c(a[1],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_1 # mul_add_c(a[2],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_0 # mul_add_c(a[3],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_0 # mul_add_c(a[4],b[0],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0) # r[3]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_1 # mul_add_c(a[3],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_3 # mul_add_c(a[1],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$b_4 # mul_add_c(a[0],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$b_5 # mul_add_c(a[0],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0) # r[4]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_4 # mul_add_c(a[1],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_3 # mul_add_c(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_2 # mul_add_c(a[3],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_4,$b_1 # mul_add_c(a[4],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_0 # mul_add_c(a[5],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_0 # mul_add_c(a[6],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0) # r[5]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_1 # mul_add_c(a[5],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_2 # mul_add_c(a[4],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_4 # mul_add_c(a[2],b[4],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_5 # mul_add_c(a[1],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$b_6 # mul_add_c(a[0],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$b_7 # mul_add_c(a[0],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,6*$BNSZ($a0) # r[6]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_6 # mul_add_c(a[1],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_5 # mul_add_c(a[2],b[5],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_4 # mul_add_c(a[3],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_3 # mul_add_c(a[4],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_5,$b_2 # mul_add_c(a[5],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_6,$b_1 # mul_add_c(a[6],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_0 # mul_add_c(a[7],b[0],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_1 # mul_add_c(a[7],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,7*$BNSZ($a0) # r[7]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_2 # mul_add_c(a[6],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_3 # mul_add_c(a[5],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_4,$b_4 # mul_add_c(a[4],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_5 # mul_add_c(a[3],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_6 # mul_add_c(a[2],b[6],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_7 # mul_add_c(a[1],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_7 # mul_add_c(a[2],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,8*$BNSZ($a0) # r[8]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_6 # mul_add_c(a[3],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_5 # mul_add_c(a[4],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_4 # mul_add_c(a[5],b[4],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_3 # mul_add_c(a[6],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_7,$b_2 # mul_add_c(a[7],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_7,$b_3 # mul_add_c(a[7],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,9*$BNSZ($a0) # r[9]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_6,$b_4 # mul_add_c(a[6],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_5,$b_5 # mul_add_c(a[5],b[5],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_6 # mul_add_c(a[4],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_7 # mul_add_c(a[3],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_7 # mul_add_c(a[4],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,10*$BNSZ($a0) # r[10]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_6 # mul_add_c(a[5],b[6],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_5 # mul_add_c(a[6],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_7,$b_4 # mul_add_c(a[7],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_7,$b_5 # mul_add_c(a[7],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,11*$BNSZ($a0) # r[11]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_6 # mul_add_c(a[6],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_7 # mul_add_c(a[5],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_7 # mul_add_c(a[6],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,12*$BNSZ($a0) # r[12]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_6 # mul_add_c(a[7],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_7 # mul_add_c(a[7],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,13*$BNSZ($a0) # r[13]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ $ST $c_3,14*$BNSZ($a0) # r[14]=c3;
+ $ST $c_1,15*$BNSZ($a0) # r[15]=c1;
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $s5,10*$SZREG($sp)
+ $REG_L $s4,9*$SZREG($sp)
+ $REG_L $s3,8*$SZREG($sp)
+ $REG_L $s2,7*$SZREG($sp)
+ $REG_L $s1,6*$SZREG($sp)
+ $REG_L $s0,5*$SZREG($sp)
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ jr $ra
+ $PTR_ADD $sp,12*$SZREG
+___
+$code.=<<___ if ($flavour !~ /nubi/i);
+ $REG_L $s5,5*$SZREG($sp)
+ $REG_L $s4,4*$SZREG($sp)
+ $REG_L $s3,3*$SZREG($sp)
+ $REG_L $s2,2*$SZREG($sp)
+ $REG_L $s1,1*$SZREG($sp)
+ $REG_L $s0,0*$SZREG($sp)
+ jr $ra
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+.end bn_mul_comba8
+
+.align 5
+.globl bn_mul_comba4
+.ent bn_mul_comba4
+bn_mul_comba4:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $b_0,0($a2)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $MULTU $a_0,$b_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_3,3*$BNSZ($a1)
+ $LD $b_1,$BNSZ($a2)
+ $LD $b_2,2*$BNSZ($a2)
+ $LD $b_3,3*$BNSZ($a2)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$b_1 # mul_add_c(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_0 # mul_add_c(a[1],b[0],c2,c3,c1);
+ $ADDU $c_3,$t_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_0 # mul_add_c(a[2],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ $ST $c_2,$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_2 # mul_add_c(a[0],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_3 # mul_add_c(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_2 # mul_add_c(a[1],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_1 # mul_add_c(a[2],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_0 # mul_add_c(a[3],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_1 # mul_add_c(a[3],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_3 # mul_add_c(a[1],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_3 # mul_add_c(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_2 # mul_add_c(a[3],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ $ST $c_1,6*$BNSZ($a0)
+ $ST $c_2,7*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_mul_comba4
+___
+
+($a_4,$a_5,$a_6,$a_7)=($b_0,$b_1,$b_2,$b_3);
+
+$code.=<<___;
+
+.align 5
+.globl bn_sqr_comba8
+.ent bn_sqr_comba8
+bn_sqr_comba8:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $LD $a_3,3*$BNSZ($a1)
+
+ $MULTU $a_0,$a_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_4,4*$BNSZ($a1)
+ $LD $a_5,5*$BNSZ($a1)
+ $LD $a_6,6*$BNSZ($a1)
+ $LD $a_7,7*$BNSZ($a1)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$a_1 # mul_add_c2(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_2,$a_0 # mul_add_c2(a[2],b[0],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $c_3,$t_2,$at
+ $ST $c_2,$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$a_3 # mul_add_c2(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_2 # mul_add_c2(a[1],b[2],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_4,$a_0 # mul_add_c2(a[4],b[0],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_3,$a_1 # mul_add_c2(a[3],b[1],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_2,$a_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$a_5 # mul_add_c2(a[0],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_4 # mul_add_c2(a[1],b[4],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_2,$a_3 # mul_add_c2(a[2],b[3],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $MULTU $a_6,$a_0 # mul_add_c2(a[6],b[0],c1,c2,c3);
+ $ADDU $c_2,$at
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_5,$a_1 # mul_add_c2(a[5],b[1],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_4,$a_2 # mul_add_c2(a[4],b[2],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_3,$a_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$a_7 # mul_add_c2(a[0],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,6*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_6 # mul_add_c2(a[1],b[6],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_2,$a_5 # mul_add_c2(a[2],b[5],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_3,$a_4 # mul_add_c2(a[3],b[4],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_7,$a_1 # mul_add_c2(a[7],b[1],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,7*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_6,$a_2 # mul_add_c2(a[6],b[2],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_5,$a_3 # mul_add_c2(a[5],b[3],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_4,$a_4 # mul_add_c(a[4],b[4],c3,c1,c2);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$a_7 # mul_add_c2(a[2],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,8*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_3,$a_6 # mul_add_c2(a[3],b[6],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_4,$a_5 # mul_add_c2(a[4],b[5],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_7,$a_3 # mul_add_c2(a[7],b[3],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,9*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_6,$a_4 # mul_add_c2(a[6],b[4],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_1,$at
+ $MULTU $a_5,$a_5 # mul_add_c(a[5],b[5],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$a_7 # mul_add_c2(a[4],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,10*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_5,$a_6 # mul_add_c2(a[5],b[6],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_2,$at
+ $MULTU $a_7,$a_5 # mul_add_c2(a[7],b[5],c1,c2,c3);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,11*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_6,$a_6 # mul_add_c(a[6],b[6],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$a_7 # mul_add_c2(a[6],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,12*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_7,$a_7 # mul_add_c(a[7],b[7],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,13*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ $ST $c_3,14*$BNSZ($a0)
+ $ST $c_1,15*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_sqr_comba8
+
+.align 5
+.globl bn_sqr_comba4
+.ent bn_sqr_comba4
+bn_sqr_comba4:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $a_1,$BNSZ($a1)
+ $MULTU $a_0,$a_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_2,2*$BNSZ($a1)
+ $LD $a_3,3*$BNSZ($a1)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$a_1 # mul_add_c2(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_2,$a_0 # mul_add_c2(a[2],b[0],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $c_3,$t_2,$at
+ $ST $c_2,$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$a_3 # mul_add_c2(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_3,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_1,$a_2 # mul_add_c(a2[1],b[2],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ slt $at,$t_2,$zero
+ $ADDU $c_3,$at
+ $MULTU $a_3,$a_1 # mul_add_c2(a[3],b[1],c2,c3,c1);
+ $SLL $t_2,1
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_2,$a_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$a_3 # mul_add_c2(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ slt $c_2,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_3,$a_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ $ST $c_1,6*$BNSZ($a0)
+ $ST $c_2,7*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_sqr_comba4
+___
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips3-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips3-mont.pl
new file mode 100644
index 00000000..8f9156e0
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips3-mont.pl
@@ -0,0 +1,327 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# This module doesn't present direct interest for OpenSSL, because it
+# doesn't provide better performance for longer keys. While 512-bit
+# RSA private key operations are 40% faster, 1024-bit ones are hardly
+# faster at all, while longer key operations are slower by up to 20%.
+# It might be of interest to embedded system developers though, as
+# it's smaller than 1KB, yet offers ~3x improvement over compiler
+# generated code.
+#
+# The module targets N32 and N64 MIPS ABIs and currently is a bit
+# IRIX-centric, i.e. is likely to require adaptation for other OSes.
+
+# int bn_mul_mont(
+$rp="a0"; # BN_ULONG *rp,
+$ap="a1"; # const BN_ULONG *ap,
+$bp="a2"; # const BN_ULONG *bp,
+$np="a3"; # const BN_ULONG *np,
+$n0="a4"; # const BN_ULONG *n0,
+$num="a5"; # int num);
+
+$lo0="a6";
+$hi0="a7";
+$lo1="v0";
+$hi1="v1";
+$aj="t0";
+$bi="t1";
+$nj="t2";
+$tp="t3";
+$alo="s0";
+$ahi="s1";
+$nlo="s2";
+$nhi="s3";
+$tj="s4";
+$i="s5";
+$j="s6";
+$fp="t8";
+$m1="t9";
+
+$FRAME=8*(2+8);
+
+$code=<<___;
+#include <asm.h>
+#include <regdef.h>
+
+.text
+
+.set noat
+.set reorder
+
+.align 5
+.globl bn_mul_mont
+.ent bn_mul_mont
+bn_mul_mont:
+ .set noreorder
+ PTR_SUB sp,64
+ move $fp,sp
+ .frame $fp,64,ra
+ slt AT,$num,4
+ li v0,0
+ beqzl AT,.Lproceed
+ nop
+ jr ra
+ PTR_ADD sp,$fp,64
+ .set reorder
+.align 5
+.Lproceed:
+ ld $n0,0($n0)
+ ld $bi,0($bp) # bp[0]
+ ld $aj,0($ap) # ap[0]
+ ld $nj,0($np) # np[0]
+ PTR_SUB sp,16 # place for two extra words
+ sll $num,3
+ li AT,-4096
+ PTR_SUB sp,$num
+ and sp,AT
+
+ sd s0,0($fp)
+ sd s1,8($fp)
+ sd s2,16($fp)
+ sd s3,24($fp)
+ sd s4,32($fp)
+ sd s5,40($fp)
+ sd s6,48($fp)
+ sd s7,56($fp)
+
+ dmultu $aj,$bi
+ ld $alo,8($ap)
+ ld $nlo,8($np)
+ mflo $lo0
+ mfhi $hi0
+ dmultu $lo0,$n0
+ mflo $m1
+
+ dmultu $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ dmultu $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+ dmultu $nlo,$m1
+ daddu $lo1,$lo0
+ sltu AT,$lo1,$lo0
+ daddu $hi1,AT
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,sp
+ li $j,16
+.align 4
+.L1st:
+ .set noreorder
+ PTR_ADD $aj,$ap,$j
+ ld $aj,($aj)
+ PTR_ADD $nj,$np,$j
+ ld $nj,($nj)
+
+ dmultu $aj,$bi
+ daddu $lo0,$alo,$hi0
+ daddu $lo1,$nlo,$hi1
+ sltu AT,$lo0,$hi0
+ sltu s7,$lo1,$hi1
+ daddu $hi0,$ahi,AT
+ daddu $hi1,$nhi,s7
+ mflo $alo
+ mfhi $ahi
+
+ daddu $lo1,$lo0
+ sltu AT,$lo1,$lo0
+ dmultu $nj,$m1
+ daddu $hi1,AT
+ addu $j,8
+ sd $lo1,($tp)
+ sltu s7,$j,$num
+ mflo $nlo
+ mfhi $nhi
+
+ bnez s7,.L1st
+ PTR_ADD $tp,8
+ .set reorder
+
+ daddu $lo0,$alo,$hi0
+ sltu AT,$lo0,$hi0
+ daddu $hi0,$ahi,AT
+
+ daddu $lo1,$nlo,$hi1
+ sltu s7,$lo1,$hi1
+ daddu $hi1,$nhi,s7
+ daddu $lo1,$lo0
+ sltu AT,$lo1,$lo0
+ daddu $hi1,AT
+
+ sd $lo1,($tp)
+
+ daddu $hi1,$hi0
+ sltu AT,$hi1,$hi0
+ sd $hi1,8($tp)
+ sd AT,16($tp)
+
+ li $i,8
+.align 4
+.Louter:
+ PTR_ADD $bi,$bp,$i
+ ld $bi,($bi)
+ ld $aj,($ap)
+ ld $alo,8($ap)
+ ld $tj,(sp)
+
+ dmultu $aj,$bi
+ ld $nj,($np)
+ ld $nlo,8($np)
+ mflo $lo0
+ mfhi $hi0
+ daddu $lo0,$tj
+ dmultu $lo0,$n0
+ sltu AT,$lo0,$tj
+ daddu $hi0,AT
+ mflo $m1
+
+ dmultu $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ dmultu $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+
+ dmultu $nlo,$m1
+ daddu $lo1,$lo0
+ sltu AT,$lo1,$lo0
+ daddu $hi1,AT
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,sp
+ li $j,16
+ ld $tj,8($tp)
+.align 4
+.Linner:
+ .set noreorder
+ PTR_ADD $aj,$ap,$j
+ ld $aj,($aj)
+ PTR_ADD $nj,$np,$j
+ ld $nj,($nj)
+
+ dmultu $aj,$bi
+ daddu $lo0,$alo,$hi0
+ daddu $lo1,$nlo,$hi1
+ sltu AT,$lo0,$hi0
+ sltu s7,$lo1,$hi1
+ daddu $hi0,$ahi,AT
+ daddu $hi1,$nhi,s7
+ mflo $alo
+ mfhi $ahi
+
+ daddu $lo0,$tj
+ addu $j,8
+ dmultu $nj,$m1
+ sltu AT,$lo0,$tj
+ daddu $lo1,$lo0
+ daddu $hi0,AT
+ sltu s7,$lo1,$lo0
+ ld $tj,16($tp)
+ daddu $hi1,s7
+ sltu AT,$j,$num
+ mflo $nlo
+ mfhi $nhi
+ sd $lo1,($tp)
+ bnez AT,.Linner
+ PTR_ADD $tp,8
+ .set reorder
+
+ daddu $lo0,$alo,$hi0
+ sltu AT,$lo0,$hi0
+ daddu $hi0,$ahi,AT
+ daddu $lo0,$tj
+ sltu s7,$lo0,$tj
+ daddu $hi0,s7
+
+ ld $tj,16($tp)
+ daddu $lo1,$nlo,$hi1
+ sltu AT,$lo1,$hi1
+ daddu $hi1,$nhi,AT
+ daddu $lo1,$lo0
+ sltu s7,$lo1,$lo0
+ daddu $hi1,s7
+ sd $lo1,($tp)
+
+ daddu $lo1,$hi1,$hi0
+ sltu $hi1,$lo1,$hi0
+ daddu $lo1,$tj
+ sltu AT,$lo1,$tj
+ daddu $hi1,AT
+ sd $lo1,8($tp)
+ sd $hi1,16($tp)
+
+ addu $i,8
+ sltu s7,$i,$num
+ bnez s7,.Louter
+
+ .set noreorder
+ PTR_ADD $tj,sp,$num # &tp[num]
+ move $tp,sp
+ move $ap,sp
+ li $hi0,0 # clear borrow bit
+
+.align 4
+.Lsub: ld $lo0,($tp)
+ ld $lo1,($np)
+ PTR_ADD $tp,8
+ PTR_ADD $np,8
+ dsubu $lo1,$lo0,$lo1 # tp[i]-np[i]
+ sgtu AT,$lo1,$lo0
+ dsubu $lo0,$lo1,$hi0
+ sgtu $hi0,$lo0,$lo1
+ sd $lo0,($rp)
+ or $hi0,AT
+ sltu AT,$tp,$tj
+ bnez AT,.Lsub
+ PTR_ADD $rp,8
+
+ dsubu $hi0,$hi1,$hi0 # handle upmost overflow bit
+ move $tp,sp
+ PTR_SUB $rp,$num # restore rp
+ not $hi1,$hi0
+
+ and $ap,$hi0,sp
+ and $bp,$hi1,$rp
+ or $ap,$ap,$bp # ap=borrow?tp:rp
+
+.align 4
+.Lcopy: ld $aj,($ap)
+ PTR_ADD $ap,8
+ PTR_ADD $tp,8
+ sd zero,-8($tp)
+ sltu AT,$tp,$tj
+ sd $aj,($rp)
+ bnez AT,.Lcopy
+ PTR_ADD $rp,8
+
+ ld s0,0($fp)
+ ld s1,8($fp)
+ ld s2,16($fp)
+ ld s3,24($fp)
+ ld s4,32($fp)
+ ld s5,40($fp)
+ ld s6,48($fp)
+ ld s7,56($fp)
+ li v0,1
+ jr ra
+ PTR_ADD sp,$fp,64
+ .set reorder
+END(bn_mul_mont)
+.rdata
+.asciiz "Montgomery Multiplication for MIPS III/IV, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips3.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips3.S
new file mode 100644
index 00000000..dca4105c
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/mips3.S
@@ -0,0 +1,2201 @@
+.rdata
+.asciiz "mips3.s, Version 1.1"
+.asciiz "MIPS III/IV ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+/*
+ * ====================================================================
+ * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+ * project.
+ *
+ * Rights for redistribution and usage in source and binary forms are
+ * granted according to the OpenSSL license. Warranty of any kind is
+ * disclaimed.
+ * ====================================================================
+ */
+
+/*
+ * This is my modest contributon to the OpenSSL project (see
+ * http://www.openssl.org/ for more information about it) and is
+ * a drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c
+ * module. For updates see http://fy.chalmers.se/~appro/hpe/.
+ *
+ * The module is designed to work with either of the "new" MIPS ABI(5),
+ * namely N32 or N64, offered by IRIX 6.x. It's not ment to work under
+ * IRIX 5.x not only because it doesn't support new ABIs but also
+ * because 5.x kernels put R4x00 CPU into 32-bit mode and all those
+ * 64-bit instructions (daddu, dmultu, etc.) found below gonna only
+ * cause illegal instruction exception:-(
+ *
+ * In addition the code depends on preprocessor flags set up by MIPSpro
+ * compiler driver (either as or cc) and therefore (probably?) can't be
+ * compiled by the GNU assembler. GNU C driver manages fine though...
+ * I mean as long as -mmips-as is specified or is the default option,
+ * because then it simply invokes /usr/bin/as which in turn takes
+ * perfect care of the preprocessor definitions. Another neat feature
+ * offered by the MIPSpro assembler is an optimization pass. This gave
+ * me the opportunity to have the code looking more regular as all those
+ * architecture dependent instruction rescheduling details were left to
+ * the assembler. Cool, huh?
+ *
+ * Performance improvement is astonishing! 'apps/openssl speed rsa dsa'
+ * goes way over 3 times faster!
+ *
+ * <appro@fy.chalmers.se>
+ */
+#include <asm.h>
+#include <regdef.h>
+
+#if _MIPS_ISA>=4
+#define MOVNZ(cond,dst,src) \
+ movn dst,src,cond
+#else
+#define MOVNZ(cond,dst,src) \
+ .set noreorder; \
+ bnezl cond,.+8; \
+ move dst,src; \
+ .set reorder
+#endif
+
+.text
+
+.set noat
+.set reorder
+
+#define MINUS4 v1
+
+.align 5
+LEAF(bn_mul_add_words)
+ .set noreorder
+ bgtzl a2,.L_bn_mul_add_words_proceed
+ ld t0,0(a1)
+ jr ra
+ move v0,zero
+ .set reorder
+
+.L_bn_mul_add_words_proceed:
+ li MINUS4,-4
+ and ta0,a2,MINUS4
+ move v0,zero
+ beqz ta0,.L_bn_mul_add_words_tail
+
+.L_bn_mul_add_words_loop:
+ dmultu t0,a3
+ ld t1,0(a0)
+ ld t2,8(a1)
+ ld t3,8(a0)
+ ld ta0,16(a1)
+ ld ta1,16(a0)
+ daddu t1,v0
+ sltu v0,t1,v0 /* All manuals say it "compares 32-bit
+ * values", but it seems to work fine
+ * even on 64-bit registers. */
+ mflo AT
+ mfhi t0
+ daddu t1,AT
+ daddu v0,t0
+ sltu AT,t1,AT
+ sd t1,0(a0)
+ daddu v0,AT
+
+ dmultu t2,a3
+ ld ta2,24(a1)
+ ld ta3,24(a0)
+ daddu t3,v0
+ sltu v0,t3,v0
+ mflo AT
+ mfhi t2
+ daddu t3,AT
+ daddu v0,t2
+ sltu AT,t3,AT
+ sd t3,8(a0)
+ daddu v0,AT
+
+ dmultu ta0,a3
+ subu a2,4
+ PTR_ADD a0,32
+ PTR_ADD a1,32
+ daddu ta1,v0
+ sltu v0,ta1,v0
+ mflo AT
+ mfhi ta0
+ daddu ta1,AT
+ daddu v0,ta0
+ sltu AT,ta1,AT
+ sd ta1,-16(a0)
+ daddu v0,AT
+
+
+ dmultu ta2,a3
+ and ta0,a2,MINUS4
+ daddu ta3,v0
+ sltu v0,ta3,v0
+ mflo AT
+ mfhi ta2
+ daddu ta3,AT
+ daddu v0,ta2
+ sltu AT,ta3,AT
+ sd ta3,-8(a0)
+ daddu v0,AT
+ .set noreorder
+ bgtzl ta0,.L_bn_mul_add_words_loop
+ ld t0,0(a1)
+
+ bnezl a2,.L_bn_mul_add_words_tail
+ ld t0,0(a1)
+ .set reorder
+
+.L_bn_mul_add_words_return:
+ jr ra
+
+.L_bn_mul_add_words_tail:
+ dmultu t0,a3
+ ld t1,0(a0)
+ subu a2,1
+ daddu t1,v0
+ sltu v0,t1,v0
+ mflo AT
+ mfhi t0
+ daddu t1,AT
+ daddu v0,t0
+ sltu AT,t1,AT
+ sd t1,0(a0)
+ daddu v0,AT
+ beqz a2,.L_bn_mul_add_words_return
+
+ ld t0,8(a1)
+ dmultu t0,a3
+ ld t1,8(a0)
+ subu a2,1
+ daddu t1,v0
+ sltu v0,t1,v0
+ mflo AT
+ mfhi t0
+ daddu t1,AT
+ daddu v0,t0
+ sltu AT,t1,AT
+ sd t1,8(a0)
+ daddu v0,AT
+ beqz a2,.L_bn_mul_add_words_return
+
+ ld t0,16(a1)
+ dmultu t0,a3
+ ld t1,16(a0)
+ daddu t1,v0
+ sltu v0,t1,v0
+ mflo AT
+ mfhi t0
+ daddu t1,AT
+ daddu v0,t0
+ sltu AT,t1,AT
+ sd t1,16(a0)
+ daddu v0,AT
+ jr ra
+END(bn_mul_add_words)
+
+.align 5
+LEAF(bn_mul_words)
+ .set noreorder
+ bgtzl a2,.L_bn_mul_words_proceed
+ ld t0,0(a1)
+ jr ra
+ move v0,zero
+ .set reorder
+
+.L_bn_mul_words_proceed:
+ li MINUS4,-4
+ and ta0,a2,MINUS4
+ move v0,zero
+ beqz ta0,.L_bn_mul_words_tail
+
+.L_bn_mul_words_loop:
+ dmultu t0,a3
+ ld t2,8(a1)
+ ld ta0,16(a1)
+ ld ta2,24(a1)
+ mflo AT
+ mfhi t0
+ daddu v0,AT
+ sltu t1,v0,AT
+ sd v0,0(a0)
+ daddu v0,t1,t0
+
+ dmultu t2,a3
+ subu a2,4
+ PTR_ADD a0,32
+ PTR_ADD a1,32
+ mflo AT
+ mfhi t2
+ daddu v0,AT
+ sltu t3,v0,AT
+ sd v0,-24(a0)
+ daddu v0,t3,t2
+
+ dmultu ta0,a3
+ mflo AT
+ mfhi ta0
+ daddu v0,AT
+ sltu ta1,v0,AT
+ sd v0,-16(a0)
+ daddu v0,ta1,ta0
+
+
+ dmultu ta2,a3
+ and ta0,a2,MINUS4
+ mflo AT
+ mfhi ta2
+ daddu v0,AT
+ sltu ta3,v0,AT
+ sd v0,-8(a0)
+ daddu v0,ta3,ta2
+ .set noreorder
+ bgtzl ta0,.L_bn_mul_words_loop
+ ld t0,0(a1)
+
+ bnezl a2,.L_bn_mul_words_tail
+ ld t0,0(a1)
+ .set reorder
+
+.L_bn_mul_words_return:
+ jr ra
+
+.L_bn_mul_words_tail:
+ dmultu t0,a3
+ subu a2,1
+ mflo AT
+ mfhi t0
+ daddu v0,AT
+ sltu t1,v0,AT
+ sd v0,0(a0)
+ daddu v0,t1,t0
+ beqz a2,.L_bn_mul_words_return
+
+ ld t0,8(a1)
+ dmultu t0,a3
+ subu a2,1
+ mflo AT
+ mfhi t0
+ daddu v0,AT
+ sltu t1,v0,AT
+ sd v0,8(a0)
+ daddu v0,t1,t0
+ beqz a2,.L_bn_mul_words_return
+
+ ld t0,16(a1)
+ dmultu t0,a3
+ mflo AT
+ mfhi t0
+ daddu v0,AT
+ sltu t1,v0,AT
+ sd v0,16(a0)
+ daddu v0,t1,t0
+ jr ra
+END(bn_mul_words)
+
+.align 5
+LEAF(bn_sqr_words)
+ .set noreorder
+ bgtzl a2,.L_bn_sqr_words_proceed
+ ld t0,0(a1)
+ jr ra
+ move v0,zero
+ .set reorder
+
+.L_bn_sqr_words_proceed:
+ li MINUS4,-4
+ and ta0,a2,MINUS4
+ move v0,zero
+ beqz ta0,.L_bn_sqr_words_tail
+
+.L_bn_sqr_words_loop:
+ dmultu t0,t0
+ ld t2,8(a1)
+ ld ta0,16(a1)
+ ld ta2,24(a1)
+ mflo t1
+ mfhi t0
+ sd t1,0(a0)
+ sd t0,8(a0)
+
+ dmultu t2,t2
+ subu a2,4
+ PTR_ADD a0,64
+ PTR_ADD a1,32
+ mflo t3
+ mfhi t2
+ sd t3,-48(a0)
+ sd t2,-40(a0)
+
+ dmultu ta0,ta0
+ mflo ta1
+ mfhi ta0
+ sd ta1,-32(a0)
+ sd ta0,-24(a0)
+
+
+ dmultu ta2,ta2
+ and ta0,a2,MINUS4
+ mflo ta3
+ mfhi ta2
+ sd ta3,-16(a0)
+ sd ta2,-8(a0)
+
+ .set noreorder
+ bgtzl ta0,.L_bn_sqr_words_loop
+ ld t0,0(a1)
+
+ bnezl a2,.L_bn_sqr_words_tail
+ ld t0,0(a1)
+ .set reorder
+
+.L_bn_sqr_words_return:
+ move v0,zero
+ jr ra
+
+.L_bn_sqr_words_tail:
+ dmultu t0,t0
+ subu a2,1
+ mflo t1
+ mfhi t0
+ sd t1,0(a0)
+ sd t0,8(a0)
+ beqz a2,.L_bn_sqr_words_return
+
+ ld t0,8(a1)
+ dmultu t0,t0
+ subu a2,1
+ mflo t1
+ mfhi t0
+ sd t1,16(a0)
+ sd t0,24(a0)
+ beqz a2,.L_bn_sqr_words_return
+
+ ld t0,16(a1)
+ dmultu t0,t0
+ mflo t1
+ mfhi t0
+ sd t1,32(a0)
+ sd t0,40(a0)
+ jr ra
+END(bn_sqr_words)
+
+.align 5
+LEAF(bn_add_words)
+ .set noreorder
+ bgtzl a3,.L_bn_add_words_proceed
+ ld t0,0(a1)
+ jr ra
+ move v0,zero
+ .set reorder
+
+.L_bn_add_words_proceed:
+ li MINUS4,-4
+ and AT,a3,MINUS4
+ move v0,zero
+ beqz AT,.L_bn_add_words_tail
+
+.L_bn_add_words_loop:
+ ld ta0,0(a2)
+ subu a3,4
+ ld t1,8(a1)
+ and AT,a3,MINUS4
+ ld t2,16(a1)
+ PTR_ADD a2,32
+ ld t3,24(a1)
+ PTR_ADD a0,32
+ ld ta1,-24(a2)
+ PTR_ADD a1,32
+ ld ta2,-16(a2)
+ ld ta3,-8(a2)
+ daddu ta0,t0
+ sltu t8,ta0,t0
+ daddu t0,ta0,v0
+ sltu v0,t0,ta0
+ sd t0,-32(a0)
+ daddu v0,t8
+
+ daddu ta1,t1
+ sltu t9,ta1,t1
+ daddu t1,ta1,v0
+ sltu v0,t1,ta1
+ sd t1,-24(a0)
+ daddu v0,t9
+
+ daddu ta2,t2
+ sltu t8,ta2,t2
+ daddu t2,ta2,v0
+ sltu v0,t2,ta2
+ sd t2,-16(a0)
+ daddu v0,t8
+
+ daddu ta3,t3
+ sltu t9,ta3,t3
+ daddu t3,ta3,v0
+ sltu v0,t3,ta3
+ sd t3,-8(a0)
+ daddu v0,t9
+
+ .set noreorder
+ bgtzl AT,.L_bn_add_words_loop
+ ld t0,0(a1)
+
+ bnezl a3,.L_bn_add_words_tail
+ ld t0,0(a1)
+ .set reorder
+
+.L_bn_add_words_return:
+ jr ra
+
+.L_bn_add_words_tail:
+ ld ta0,0(a2)
+ daddu ta0,t0
+ subu a3,1
+ sltu t8,ta0,t0
+ daddu t0,ta0,v0
+ sltu v0,t0,ta0
+ sd t0,0(a0)
+ daddu v0,t8
+ beqz a3,.L_bn_add_words_return
+
+ ld t1,8(a1)
+ ld ta1,8(a2)
+ daddu ta1,t1
+ subu a3,1
+ sltu t9,ta1,t1
+ daddu t1,ta1,v0
+ sltu v0,t1,ta1
+ sd t1,8(a0)
+ daddu v0,t9
+ beqz a3,.L_bn_add_words_return
+
+ ld t2,16(a1)
+ ld ta2,16(a2)
+ daddu ta2,t2
+ sltu t8,ta2,t2
+ daddu t2,ta2,v0
+ sltu v0,t2,ta2
+ sd t2,16(a0)
+ daddu v0,t8
+ jr ra
+END(bn_add_words)
+
+.align 5
+LEAF(bn_sub_words)
+ .set noreorder
+ bgtzl a3,.L_bn_sub_words_proceed
+ ld t0,0(a1)
+ jr ra
+ move v0,zero
+ .set reorder
+
+.L_bn_sub_words_proceed:
+ li MINUS4,-4
+ and AT,a3,MINUS4
+ move v0,zero
+ beqz AT,.L_bn_sub_words_tail
+
+.L_bn_sub_words_loop:
+ ld ta0,0(a2)
+ subu a3,4
+ ld t1,8(a1)
+ and AT,a3,MINUS4
+ ld t2,16(a1)
+ PTR_ADD a2,32
+ ld t3,24(a1)
+ PTR_ADD a0,32
+ ld ta1,-24(a2)
+ PTR_ADD a1,32
+ ld ta2,-16(a2)
+ ld ta3,-8(a2)
+ sltu t8,t0,ta0
+ dsubu t0,ta0
+ dsubu ta0,t0,v0
+ sd ta0,-32(a0)
+ MOVNZ (t0,v0,t8)
+
+ sltu t9,t1,ta1
+ dsubu t1,ta1
+ dsubu ta1,t1,v0
+ sd ta1,-24(a0)
+ MOVNZ (t1,v0,t9)
+
+
+ sltu t8,t2,ta2
+ dsubu t2,ta2
+ dsubu ta2,t2,v0
+ sd ta2,-16(a0)
+ MOVNZ (t2,v0,t8)
+
+ sltu t9,t3,ta3
+ dsubu t3,ta3
+ dsubu ta3,t3,v0
+ sd ta3,-8(a0)
+ MOVNZ (t3,v0,t9)
+
+ .set noreorder
+ bgtzl AT,.L_bn_sub_words_loop
+ ld t0,0(a1)
+
+ bnezl a3,.L_bn_sub_words_tail
+ ld t0,0(a1)
+ .set reorder
+
+.L_bn_sub_words_return:
+ jr ra
+
+.L_bn_sub_words_tail:
+ ld ta0,0(a2)
+ subu a3,1
+ sltu t8,t0,ta0
+ dsubu t0,ta0
+ dsubu ta0,t0,v0
+ MOVNZ (t0,v0,t8)
+ sd ta0,0(a0)
+ beqz a3,.L_bn_sub_words_return
+
+ ld t1,8(a1)
+ subu a3,1
+ ld ta1,8(a2)
+ sltu t9,t1,ta1
+ dsubu t1,ta1
+ dsubu ta1,t1,v0
+ MOVNZ (t1,v0,t9)
+ sd ta1,8(a0)
+ beqz a3,.L_bn_sub_words_return
+
+ ld t2,16(a1)
+ ld ta2,16(a2)
+ sltu t8,t2,ta2
+ dsubu t2,ta2
+ dsubu ta2,t2,v0
+ MOVNZ (t2,v0,t8)
+ sd ta2,16(a0)
+ jr ra
+END(bn_sub_words)
+
+#undef MINUS4
+
+.align 5
+LEAF(bn_div_3_words)
+ .set reorder
+ move a3,a0 /* we know that bn_div_words doesn't
+ * touch a3, ta2, ta3 and preserves a2
+ * so that we can save two arguments
+ * and return address in registers
+ * instead of stack:-)
+ */
+ ld a0,(a3)
+ move ta2,a1
+ ld a1,-8(a3)
+ bne a0,a2,.L_bn_div_3_words_proceed
+ li v0,-1
+ jr ra
+.L_bn_div_3_words_proceed:
+ move ta3,ra
+ bal bn_div_words
+ move ra,ta3
+ dmultu ta2,v0
+ ld t2,-16(a3)
+ move ta0,zero
+ mfhi t1
+ mflo t0
+ sltu t8,t1,v1
+.L_bn_div_3_words_inner_loop:
+ bnez t8,.L_bn_div_3_words_inner_loop_done
+ sgeu AT,t2,t0
+ seq t9,t1,v1
+ and AT,t9
+ sltu t3,t0,ta2
+ daddu v1,a2
+ dsubu t1,t3
+ dsubu t0,ta2
+ sltu t8,t1,v1
+ sltu ta0,v1,a2
+ or t8,ta0
+ .set noreorder
+ beqzl AT,.L_bn_div_3_words_inner_loop
+ dsubu v0,1
+ .set reorder
+.L_bn_div_3_words_inner_loop_done:
+ jr ra
+END(bn_div_3_words)
+
+.align 5
+LEAF(bn_div_words)
+ .set noreorder
+ bnezl a2,.L_bn_div_words_proceed
+ move v1,zero
+ jr ra
+ li v0,-1 /* I'd rather signal div-by-zero
+ * which can be done with 'break 7' */
+
+.L_bn_div_words_proceed:
+ bltz a2,.L_bn_div_words_body
+ move t9,v1
+ dsll a2,1
+ bgtz a2,.-4
+ addu t9,1
+
+ .set reorder
+ negu t1,t9
+ li t2,-1
+ dsll t2,t1
+ and t2,a0
+ dsrl AT,a1,t1
+ .set noreorder
+ bnezl t2,.+8
+ break 6 /* signal overflow */
+ .set reorder
+ dsll a0,t9
+ dsll a1,t9
+ or a0,AT
+
+#define QT ta0
+#define HH ta1
+#define DH v1
+.L_bn_div_words_body:
+ dsrl DH,a2,32
+ sgeu AT,a0,a2
+ .set noreorder
+ bnezl AT,.+8
+ dsubu a0,a2
+ .set reorder
+
+ li QT,-1
+ dsrl HH,a0,32
+ dsrl QT,32 /* q=0xffffffff */
+ beq DH,HH,.L_bn_div_words_skip_div1
+ ddivu zero,a0,DH
+ mflo QT
+.L_bn_div_words_skip_div1:
+ dmultu a2,QT
+ dsll t3,a0,32
+ dsrl AT,a1,32
+ or t3,AT
+ mflo t0
+ mfhi t1
+.L_bn_div_words_inner_loop1:
+ sltu t2,t3,t0
+ seq t8,HH,t1
+ sltu AT,HH,t1
+ and t2,t8
+ sltu v0,t0,a2
+ or AT,t2
+ .set noreorder
+ beqz AT,.L_bn_div_words_inner_loop1_done
+ dsubu t1,v0
+ dsubu t0,a2
+ b .L_bn_div_words_inner_loop1
+ dsubu QT,1
+ .set reorder
+.L_bn_div_words_inner_loop1_done:
+
+ dsll a1,32
+ dsubu a0,t3,t0
+ dsll v0,QT,32
+
+ li QT,-1
+ dsrl HH,a0,32
+ dsrl QT,32 /* q=0xffffffff */
+ beq DH,HH,.L_bn_div_words_skip_div2
+ ddivu zero,a0,DH
+ mflo QT
+.L_bn_div_words_skip_div2:
+#undef DH
+ dmultu a2,QT
+ dsll t3,a0,32
+ dsrl AT,a1,32
+ or t3,AT
+ mflo t0
+ mfhi t1
+.L_bn_div_words_inner_loop2:
+ sltu t2,t3,t0
+ seq t8,HH,t1
+ sltu AT,HH,t1
+ and t2,t8
+ sltu v1,t0,a2
+ or AT,t2
+ .set noreorder
+ beqz AT,.L_bn_div_words_inner_loop2_done
+ dsubu t1,v1
+ dsubu t0,a2
+ b .L_bn_div_words_inner_loop2
+ dsubu QT,1
+ .set reorder
+.L_bn_div_words_inner_loop2_done:
+#undef HH
+
+ dsubu a0,t3,t0
+ or v0,QT
+ dsrl v1,a0,t9 /* v1 contains remainder if anybody wants it */
+ dsrl a2,t9 /* restore a2 */
+ jr ra
+#undef QT
+END(bn_div_words)
+
+#define a_0 t0
+#define a_1 t1
+#define a_2 t2
+#define a_3 t3
+#define b_0 ta0
+#define b_1 ta1
+#define b_2 ta2
+#define b_3 ta3
+
+#define a_4 s0
+#define a_5 s2
+#define a_6 s4
+#define a_7 a1 /* once we load a[7] we don't need a anymore */
+#define b_4 s1
+#define b_5 s3
+#define b_6 s5
+#define b_7 a2 /* once we load b[7] we don't need b anymore */
+
+#define t_1 t8
+#define t_2 t9
+
+#define c_1 v0
+#define c_2 v1
+#define c_3 a3
+
+#define FRAME_SIZE 48
+
+.align 5
+LEAF(bn_mul_comba8)
+ .set noreorder
+ PTR_SUB sp,FRAME_SIZE
+ .frame sp,64,ra
+ .set reorder
+ ld a_0,0(a1) /* If compiled with -mips3 option on
+ * R5000 box assembler barks on this
+ * line with "shouldn't have mult/div
+ * as last instruction in bb (R10K
+ * bug)" warning. If anybody out there
+ * has a clue about how to circumvent
+ * this do send me a note.
+ * <appro@fy.chalmers.se>
+ */
+ ld b_0,0(a2)
+ ld a_1,8(a1)
+ ld a_2,16(a1)
+ ld a_3,24(a1)
+ ld b_1,8(a2)
+ ld b_2,16(a2)
+ ld b_3,24(a2)
+ dmultu a_0,b_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
+ sd s0,0(sp)
+ sd s1,8(sp)
+ sd s2,16(sp)
+ sd s3,24(sp)
+ sd s4,32(sp)
+ sd s5,40(sp)
+ mflo c_1
+ mfhi c_2
+
+ dmultu a_0,b_1 /* mul_add_c(a[0],b[1],c2,c3,c1); */
+ ld a_4,32(a1)
+ ld a_5,40(a1)
+ ld a_6,48(a1)
+ ld a_7,56(a1)
+ ld b_4,32(a2)
+ ld b_5,40(a2)
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu c_3,t_2,AT
+ dmultu a_1,b_0 /* mul_add_c(a[1],b[0],c2,c3,c1); */
+ ld b_6,48(a2)
+ ld b_7,56(a2)
+ sd c_1,0(a0) /* r[0]=c1; */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ sd c_2,8(a0) /* r[1]=c2; */
+
+ dmultu a_2,b_0 /* mul_add_c(a[2],b[0],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ dmultu a_1,b_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_0,b_2 /* mul_add_c(a[0],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,16(a0) /* r[2]=c3; */
+
+ dmultu a_0,b_3 /* mul_add_c(a[0],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu c_3,c_2,t_2
+ dmultu a_1,b_2 /* mul_add_c(a[1],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_2,b_1 /* mul_add_c(a[2],b[1],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_3,b_0 /* mul_add_c(a[3],b[0],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,24(a0) /* r[3]=c1; */
+
+ dmultu a_4,b_0 /* mul_add_c(a[4],b[0],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ dmultu a_3,b_1 /* mul_add_c(a[3],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_2,b_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_1,b_3 /* mul_add_c(a[1],b[3],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_0,b_4 /* mul_add_c(a[0],b[4],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,32(a0) /* r[4]=c2; */
+
+ dmultu a_0,b_5 /* mul_add_c(a[0],b[5],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_1,b_4 /* mul_add_c(a[1],b[4],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_2,b_3 /* mul_add_c(a[2],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_3,b_2 /* mul_add_c(a[3],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_4,b_1 /* mul_add_c(a[4],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_5,b_0 /* mul_add_c(a[5],b[0],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,40(a0) /* r[5]=c3; */
+
+ dmultu a_6,b_0 /* mul_add_c(a[6],b[0],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu c_3,c_2,t_2
+ dmultu a_5,b_1 /* mul_add_c(a[5],b[1],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_4,b_2 /* mul_add_c(a[4],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_3,b_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_2,b_4 /* mul_add_c(a[2],b[4],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_1,b_5 /* mul_add_c(a[1],b[5],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_0,b_6 /* mul_add_c(a[0],b[6],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,48(a0) /* r[6]=c1; */
+
+ dmultu a_0,b_7 /* mul_add_c(a[0],b[7],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ dmultu a_1,b_6 /* mul_add_c(a[1],b[6],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_2,b_5 /* mul_add_c(a[2],b[5],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_3,b_4 /* mul_add_c(a[3],b[4],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_4,b_3 /* mul_add_c(a[4],b[3],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_5,b_2 /* mul_add_c(a[5],b[2],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_6,b_1 /* mul_add_c(a[6],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_7,b_0 /* mul_add_c(a[7],b[0],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,56(a0) /* r[7]=c2; */
+
+ dmultu a_7,b_1 /* mul_add_c(a[7],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_6,b_2 /* mul_add_c(a[6],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_5,b_3 /* mul_add_c(a[5],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_4,b_4 /* mul_add_c(a[4],b[4],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_3,b_5 /* mul_add_c(a[3],b[5],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_2,b_6 /* mul_add_c(a[2],b[6],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_1,b_7 /* mul_add_c(a[1],b[7],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,64(a0) /* r[8]=c3; */
+
+ dmultu a_2,b_7 /* mul_add_c(a[2],b[7],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu c_3,c_2,t_2
+ dmultu a_3,b_6 /* mul_add_c(a[3],b[6],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_4,b_5 /* mul_add_c(a[4],b[5],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_5,b_4 /* mul_add_c(a[5],b[4],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_6,b_3 /* mul_add_c(a[6],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_7,b_2 /* mul_add_c(a[7],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,72(a0) /* r[9]=c1; */
+
+ dmultu a_7,b_3 /* mul_add_c(a[7],b[3],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ dmultu a_6,b_4 /* mul_add_c(a[6],b[4],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_5,b_5 /* mul_add_c(a[5],b[5],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_4,b_6 /* mul_add_c(a[4],b[6],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_3,b_7 /* mul_add_c(a[3],b[7],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,80(a0) /* r[10]=c2; */
+
+ dmultu a_4,b_7 /* mul_add_c(a[4],b[7],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_5,b_6 /* mul_add_c(a[5],b[6],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_6,b_5 /* mul_add_c(a[6],b[5],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_7,b_4 /* mul_add_c(a[7],b[4],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,88(a0) /* r[11]=c3; */
+
+ dmultu a_7,b_5 /* mul_add_c(a[7],b[5],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu c_3,c_2,t_2
+ dmultu a_6,b_6 /* mul_add_c(a[6],b[6],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_5,b_7 /* mul_add_c(a[5],b[7],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,96(a0) /* r[12]=c1; */
+
+ dmultu a_6,b_7 /* mul_add_c(a[6],b[7],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ dmultu a_7,b_6 /* mul_add_c(a[7],b[6],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,104(a0) /* r[13]=c2; */
+
+ dmultu a_7,b_7 /* mul_add_c(a[7],b[7],c3,c1,c2); */
+ ld s0,0(sp)
+ ld s1,8(sp)
+ ld s2,16(sp)
+ ld s3,24(sp)
+ ld s4,32(sp)
+ ld s5,40(sp)
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sd c_3,112(a0) /* r[14]=c3; */
+ sd c_1,120(a0) /* r[15]=c1; */
+
+ PTR_ADD sp,FRAME_SIZE
+
+ jr ra
+END(bn_mul_comba8)
+
+.align 5
+LEAF(bn_mul_comba4)
+ .set reorder
+ ld a_0,0(a1)
+ ld b_0,0(a2)
+ ld a_1,8(a1)
+ ld a_2,16(a1)
+ dmultu a_0,b_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
+ ld a_3,24(a1)
+ ld b_1,8(a2)
+ ld b_2,16(a2)
+ ld b_3,24(a2)
+ mflo c_1
+ mfhi c_2
+ sd c_1,0(a0)
+
+ dmultu a_0,b_1 /* mul_add_c(a[0],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu c_3,t_2,AT
+ dmultu a_1,b_0 /* mul_add_c(a[1],b[0],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ sd c_2,8(a0)
+
+ dmultu a_2,b_0 /* mul_add_c(a[2],b[0],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ dmultu a_1,b_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_0,b_2 /* mul_add_c(a[0],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,16(a0)
+
+ dmultu a_0,b_3 /* mul_add_c(a[0],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu c_3,c_2,t_2
+ dmultu a_1,b_2 /* mul_add_c(a[1],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_2,b_1 /* mul_add_c(a[2],b[1],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_3,b_0 /* mul_add_c(a[3],b[0],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,24(a0)
+
+ dmultu a_3,b_1 /* mul_add_c(a[3],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu c_1,c_3,t_2
+ dmultu a_2,b_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_1,b_3 /* mul_add_c(a[1],b[3],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,32(a0)
+
+ dmultu a_2,b_3 /* mul_add_c(a[2],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu c_2,c_1,t_2
+ dmultu a_3,b_2 /* mul_add_c(a[3],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,40(a0)
+
+ dmultu a_3,b_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sd c_1,48(a0)
+ sd c_2,56(a0)
+
+ jr ra
+END(bn_mul_comba4)
+
+#undef a_4
+#undef a_5
+#undef a_6
+#undef a_7
+#define a_4 b_0
+#define a_5 b_1
+#define a_6 b_2
+#define a_7 b_3
+
+.align 5
+LEAF(bn_sqr_comba8)
+ .set reorder
+ ld a_0,0(a1)
+ ld a_1,8(a1)
+ ld a_2,16(a1)
+ ld a_3,24(a1)
+
+ dmultu a_0,a_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
+ ld a_4,32(a1)
+ ld a_5,40(a1)
+ ld a_6,48(a1)
+ ld a_7,56(a1)
+ mflo c_1
+ mfhi c_2
+ sd c_1,0(a0)
+
+ dmultu a_0,a_1 /* mul_add_c2(a[0],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu c_3,t_2,AT
+ sd c_2,8(a0)
+
+ dmultu a_2,a_0 /* mul_add_c2(a[2],b[0],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_1,a_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,16(a0)
+
+ dmultu a_0,a_3 /* mul_add_c2(a[0],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt c_3,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_1,a_2 /* mul_add_c2(a[1],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,24(a0)
+
+ dmultu a_4,a_0 /* mul_add_c2(a[4],b[0],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_3,a_1 /* mul_add_c2(a[3],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_1,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_2,a_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,32(a0)
+
+ dmultu a_0,a_5 /* mul_add_c2(a[0],b[5],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_1,a_4 /* mul_add_c2(a[1],b[4],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_2,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_2,a_3 /* mul_add_c2(a[2],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_2,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,40(a0)
+
+ dmultu a_6,a_0 /* mul_add_c2(a[6],b[0],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt c_3,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_5,a_1 /* mul_add_c2(a[5],b[1],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_4,a_2 /* mul_add_c2(a[4],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_3,a_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,48(a0)
+
+ dmultu a_0,a_7 /* mul_add_c2(a[0],b[7],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_1,a_6 /* mul_add_c2(a[1],b[6],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_1,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_2,a_5 /* mul_add_c2(a[2],b[5],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_1,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_3,a_4 /* mul_add_c2(a[3],b[4],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_1,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,56(a0)
+
+ dmultu a_7,a_1 /* mul_add_c2(a[7],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_6,a_2 /* mul_add_c2(a[6],b[2],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_2,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_5,a_3 /* mul_add_c2(a[5],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_2,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_4,a_4 /* mul_add_c(a[4],b[4],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,64(a0)
+
+ dmultu a_2,a_7 /* mul_add_c2(a[2],b[7],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt c_3,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_3,a_6 /* mul_add_c2(a[3],b[6],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_4,a_5 /* mul_add_c2(a[4],b[5],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,72(a0)
+
+ dmultu a_7,a_3 /* mul_add_c2(a[7],b[3],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_6,a_4 /* mul_add_c2(a[6],b[4],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_1,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_5,a_5 /* mul_add_c(a[5],b[5],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,80(a0)
+
+ dmultu a_4,a_7 /* mul_add_c2(a[4],b[7],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_5,a_6 /* mul_add_c2(a[5],b[6],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_2,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,88(a0)
+
+ dmultu a_7,a_5 /* mul_add_c2(a[7],b[5],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt c_3,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_6,a_6 /* mul_add_c(a[6],b[6],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,96(a0)
+
+ dmultu a_6,a_7 /* mul_add_c2(a[6],b[7],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,104(a0)
+
+ dmultu a_7,a_7 /* mul_add_c(a[7],b[7],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sd c_3,112(a0)
+ sd c_1,120(a0)
+
+ jr ra
+END(bn_sqr_comba8)
+
+.align 5
+LEAF(bn_sqr_comba4)
+ .set reorder
+ ld a_0,0(a1)
+ ld a_1,8(a1)
+ ld a_2,16(a1)
+ ld a_3,24(a1)
+ dmultu a_0,a_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
+ mflo c_1
+ mfhi c_2
+ sd c_1,0(a0)
+
+ dmultu a_0,a_1 /* mul_add_c2(a[0],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu c_3,t_2,AT
+ sd c_2,8(a0)
+
+ dmultu a_2,a_0 /* mul_add_c2(a[2],b[0],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ dmultu a_1,a_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,16(a0)
+
+ dmultu a_0,a_3 /* mul_add_c2(a[0],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt c_3,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ dmultu a_1,a_2 /* mul_add_c(a2[1],b[2],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ slt AT,t_2,zero
+ daddu c_3,AT
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sltu AT,c_2,t_2
+ daddu c_3,AT
+ sd c_1,24(a0)
+
+ dmultu a_3,a_1 /* mul_add_c2(a[3],b[1],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ slt c_1,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ dmultu a_2,a_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
+ mflo t_1
+ mfhi t_2
+ daddu c_2,t_1
+ sltu AT,c_2,t_1
+ daddu t_2,AT
+ daddu c_3,t_2
+ sltu AT,c_3,t_2
+ daddu c_1,AT
+ sd c_2,32(a0)
+
+ dmultu a_2,a_3 /* mul_add_c2(a[2],b[3],c3,c1,c2); */
+ mflo t_1
+ mfhi t_2
+ slt c_2,t_2,zero
+ dsll t_2,1
+ slt a2,t_1,zero
+ daddu t_2,a2
+ dsll t_1,1
+ daddu c_3,t_1
+ sltu AT,c_3,t_1
+ daddu t_2,AT
+ daddu c_1,t_2
+ sltu AT,c_1,t_2
+ daddu c_2,AT
+ sd c_3,40(a0)
+
+ dmultu a_3,a_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
+ mflo t_1
+ mfhi t_2
+ daddu c_1,t_1
+ sltu AT,c_1,t_1
+ daddu t_2,AT
+ daddu c_2,t_2
+ sd c_1,48(a0)
+ sd c_2,56(a0)
+
+ jr ra
+END(bn_sqr_comba4)
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/modexp512-x86_64.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/modexp512-x86_64.S
new file mode 100644
index 00000000..6cccafb8
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/modexp512-x86_64.S
@@ -0,0 +1,1773 @@
+.text
+
+.type MULADD_128x512,@function
+.align 16
+MULADD_128x512:
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ movq %r8,0(%rcx)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ addq %rbx,%r9
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ addq %rbx,%r10
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %rbx,%r11
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%r8
+ movq 8(%rdi),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ movq %r9,8(%rcx)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ addq %rbx,%r10
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %rbx,%r11
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ addq %rbx,%r8
+ adcq $0,%rdx
+ movq %rdx,%r9
+ .byte 0xf3,0xc3
+.size MULADD_128x512,.-MULADD_128x512
+.type mont_reduce,@function
+.align 16
+mont_reduce:
+ leaq 192(%rsp),%rdi
+ movq 32(%rsp),%rsi
+ addq $576,%rsi
+ leaq 520(%rsp),%rcx
+
+ movq 96(%rcx),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ movq (%rcx),%r8
+ addq %rax,%r8
+ adcq $0,%rdx
+ movq %r8,0(%rdi)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ movq 8(%rcx),%r9
+ addq %rax,%r9
+ adcq $0,%rdx
+ addq %rbx,%r9
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ movq 16(%rcx),%r10
+ addq %rax,%r10
+ adcq $0,%rdx
+ addq %rbx,%r10
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ movq 24(%rcx),%r11
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %rbx,%r11
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ movq 32(%rcx),%r12
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ movq 40(%rcx),%r13
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ movq 48(%rcx),%r14
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ movq 56(%rcx),%r15
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%r8
+ movq 104(%rcx),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ movq %r9,8(%rdi)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ addq %rbx,%r10
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %rbx,%r11
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ addq %rbx,%r8
+ adcq $0,%rdx
+ movq %rdx,%r9
+ movq 112(%rcx),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ movq %r10,16(%rdi)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %rbx,%r11
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ addq %rbx,%r8
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ addq %rbx,%r9
+ adcq $0,%rdx
+ movq %rdx,%r10
+ movq 120(%rcx),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ movq %r11,24(%rdi)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ addq %rbx,%r8
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ addq %rbx,%r9
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ addq %rbx,%r10
+ adcq $0,%rdx
+ movq %rdx,%r11
+ xorq %rax,%rax
+
+ addq 64(%rcx),%r8
+ adcq 72(%rcx),%r9
+ adcq 80(%rcx),%r10
+ adcq 88(%rcx),%r11
+ adcq $0,%rax
+
+
+
+
+ movq %r8,64(%rdi)
+ movq %r9,72(%rdi)
+ movq %r10,%rbp
+ movq %r11,88(%rdi)
+
+ movq %rax,384(%rsp)
+
+ movq 0(%rdi),%r8
+ movq 8(%rdi),%r9
+ movq 16(%rdi),%r10
+ movq 24(%rdi),%r11
+
+
+
+
+
+
+
+
+ addq $80,%rdi
+
+ addq $64,%rsi
+ leaq 296(%rsp),%rcx
+
+ call MULADD_128x512
+
+ movq 384(%rsp),%rax
+
+
+ addq -16(%rdi),%r8
+ adcq -8(%rdi),%r9
+ movq %r8,64(%rcx)
+ movq %r9,72(%rcx)
+
+ adcq %rax,%rax
+ movq %rax,384(%rsp)
+
+ leaq 192(%rsp),%rdi
+ addq $64,%rsi
+
+
+
+
+
+ movq (%rsi),%r8
+ movq 8(%rsi),%rbx
+
+ movq (%rcx),%rax
+ mulq %r8
+ movq %rax,%rbp
+ movq %rdx,%r9
+
+ movq 8(%rcx),%rax
+ mulq %r8
+ addq %rax,%r9
+
+ movq (%rcx),%rax
+ mulq %rbx
+ addq %rax,%r9
+
+ movq %r9,8(%rdi)
+
+
+ subq $192,%rsi
+
+ movq (%rcx),%r8
+ movq 8(%rcx),%r9
+
+ call MULADD_128x512
+
+
+
+
+ movq 0(%rsi),%rax
+ movq 8(%rsi),%rbx
+ movq 16(%rsi),%rdi
+ movq 24(%rsi),%rdx
+
+
+ movq 384(%rsp),%rbp
+
+ addq 64(%rcx),%r8
+ adcq 72(%rcx),%r9
+
+
+ adcq %rbp,%rbp
+
+
+
+ shlq $3,%rbp
+ movq 32(%rsp),%rcx
+ addq %rcx,%rbp
+
+
+ xorq %rsi,%rsi
+
+ addq 0(%rbp),%r10
+ adcq 64(%rbp),%r11
+ adcq 128(%rbp),%r12
+ adcq 192(%rbp),%r13
+ adcq 256(%rbp),%r14
+ adcq 320(%rbp),%r15
+ adcq 384(%rbp),%r8
+ adcq 448(%rbp),%r9
+
+
+
+ sbbq $0,%rsi
+
+
+ andq %rsi,%rax
+ andq %rsi,%rbx
+ andq %rsi,%rdi
+ andq %rsi,%rdx
+
+ movq $1,%rbp
+ subq %rax,%r10
+ sbbq %rbx,%r11
+ sbbq %rdi,%r12
+ sbbq %rdx,%r13
+
+
+
+
+ sbbq $0,%rbp
+
+
+
+ addq $512,%rcx
+ movq 32(%rcx),%rax
+ movq 40(%rcx),%rbx
+ movq 48(%rcx),%rdi
+ movq 56(%rcx),%rdx
+
+
+
+ andq %rsi,%rax
+ andq %rsi,%rbx
+ andq %rsi,%rdi
+ andq %rsi,%rdx
+
+
+
+ subq $1,%rbp
+
+ sbbq %rax,%r14
+ sbbq %rbx,%r15
+ sbbq %rdi,%r8
+ sbbq %rdx,%r9
+
+
+
+ movq 144(%rsp),%rsi
+ movq %r10,0(%rsi)
+ movq %r11,8(%rsi)
+ movq %r12,16(%rsi)
+ movq %r13,24(%rsi)
+ movq %r14,32(%rsi)
+ movq %r15,40(%rsi)
+ movq %r8,48(%rsi)
+ movq %r9,56(%rsi)
+
+ .byte 0xf3,0xc3
+.size mont_reduce,.-mont_reduce
+.type mont_mul_a3b,@function
+.align 16
+mont_mul_a3b:
+
+
+
+
+ movq 0(%rdi),%rbp
+
+ movq %r10,%rax
+ mulq %rbp
+ movq %rax,520(%rsp)
+ movq %rdx,%r10
+ movq %r11,%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ movq %rdx,%r11
+ movq %r12,%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ movq %rdx,%r12
+ movq %r13,%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ movq %rdx,%r13
+ movq %r14,%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ movq %rdx,%r14
+ movq %r15,%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ movq %rdx,%r15
+ movq %r8,%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ movq %rdx,%r8
+ movq %r9,%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ movq %rdx,%r9
+ movq 8(%rdi),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ movq %r10,528(%rsp)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %rbx,%r11
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ addq %rbx,%r8
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ addq %rbx,%r9
+ adcq $0,%rdx
+ movq %rdx,%r10
+ movq 16(%rdi),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ movq %r11,536(%rsp)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ addq %rbx,%r8
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ addq %rbx,%r9
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ addq %rbx,%r10
+ adcq $0,%rdx
+ movq %rdx,%r11
+ movq 24(%rdi),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ movq %r12,544(%rsp)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ addq %rbx,%r8
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ addq %rbx,%r9
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ addq %rbx,%r10
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %rbx,%r11
+ adcq $0,%rdx
+ movq %rdx,%r12
+ movq 32(%rdi),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ movq %r13,552(%rsp)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ addq %rbx,%r8
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ addq %rbx,%r9
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ addq %rbx,%r10
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %rbx,%r11
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%r13
+ movq 40(%rdi),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ movq %r14,560(%rsp)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ addq %rbx,%r8
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ addq %rbx,%r9
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ addq %rbx,%r10
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %rbx,%r11
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%r14
+ movq 48(%rdi),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ movq %r15,568(%rsp)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ addq %rbx,%r8
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ addq %rbx,%r9
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ addq %rbx,%r10
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %rbx,%r11
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%r15
+ movq 56(%rdi),%rbp
+ movq 0(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r8
+ adcq $0,%rdx
+ movq %r8,576(%rsp)
+ movq %rdx,%rbx
+
+ movq 8(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r9
+ adcq $0,%rdx
+ addq %rbx,%r9
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 16(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r10
+ adcq $0,%rdx
+ addq %rbx,%r10
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 24(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %rbx,%r11
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 32(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %rbx,%r12
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 40(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %rbx,%r13
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 48(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %rbx,%r14
+ adcq $0,%rdx
+ movq %rdx,%rbx
+
+ movq 56(%rsi),%rax
+ mulq %rbp
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %rbx,%r15
+ adcq $0,%rdx
+ movq %rdx,%r8
+ movq %r9,584(%rsp)
+ movq %r10,592(%rsp)
+ movq %r11,600(%rsp)
+ movq %r12,608(%rsp)
+ movq %r13,616(%rsp)
+ movq %r14,624(%rsp)
+ movq %r15,632(%rsp)
+ movq %r8,640(%rsp)
+
+
+
+
+
+ jmp mont_reduce
+
+
+.size mont_mul_a3b,.-mont_mul_a3b
+.type sqr_reduce,@function
+.align 16
+sqr_reduce:
+ movq 16(%rsp),%rcx
+
+
+
+ movq %r10,%rbx
+
+ movq %r11,%rax
+ mulq %rbx
+ movq %rax,528(%rsp)
+ movq %rdx,%r10
+ movq %r12,%rax
+ mulq %rbx
+ addq %rax,%r10
+ adcq $0,%rdx
+ movq %rdx,%r11
+ movq %r13,%rax
+ mulq %rbx
+ addq %rax,%r11
+ adcq $0,%rdx
+ movq %rdx,%r12
+ movq %r14,%rax
+ mulq %rbx
+ addq %rax,%r12
+ adcq $0,%rdx
+ movq %rdx,%r13
+ movq %r15,%rax
+ mulq %rbx
+ addq %rax,%r13
+ adcq $0,%rdx
+ movq %rdx,%r14
+ movq %r8,%rax
+ mulq %rbx
+ addq %rax,%r14
+ adcq $0,%rdx
+ movq %rdx,%r15
+ movq %r9,%rax
+ mulq %rbx
+ addq %rax,%r15
+ adcq $0,%rdx
+ movq %rdx,%rsi
+
+ movq %r10,536(%rsp)
+
+
+
+
+
+ movq 8(%rcx),%rbx
+
+ movq 16(%rcx),%rax
+ mulq %rbx
+ addq %rax,%r11
+ adcq $0,%rdx
+ movq %r11,544(%rsp)
+
+ movq %rdx,%r10
+ movq 24(%rcx),%rax
+ mulq %rbx
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %r10,%r12
+ adcq $0,%rdx
+ movq %r12,552(%rsp)
+
+ movq %rdx,%r10
+ movq 32(%rcx),%rax
+ mulq %rbx
+ addq %rax,%r13
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+
+ movq %rdx,%r10
+ movq 40(%rcx),%rax
+ mulq %rbx
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %r10,%r14
+ adcq $0,%rdx
+
+ movq %rdx,%r10
+ movq %r8,%rax
+ mulq %rbx
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %r10,%r15
+ adcq $0,%rdx
+
+ movq %rdx,%r10
+ movq %r9,%rax
+ mulq %rbx
+ addq %rax,%rsi
+ adcq $0,%rdx
+ addq %r10,%rsi
+ adcq $0,%rdx
+
+ movq %rdx,%r11
+
+
+
+
+ movq 16(%rcx),%rbx
+
+ movq 24(%rcx),%rax
+ mulq %rbx
+ addq %rax,%r13
+ adcq $0,%rdx
+ movq %r13,560(%rsp)
+
+ movq %rdx,%r10
+ movq 32(%rcx),%rax
+ mulq %rbx
+ addq %rax,%r14
+ adcq $0,%rdx
+ addq %r10,%r14
+ adcq $0,%rdx
+ movq %r14,568(%rsp)
+
+ movq %rdx,%r10
+ movq 40(%rcx),%rax
+ mulq %rbx
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %r10,%r15
+ adcq $0,%rdx
+
+ movq %rdx,%r10
+ movq %r8,%rax
+ mulq %rbx
+ addq %rax,%rsi
+ adcq $0,%rdx
+ addq %r10,%rsi
+ adcq $0,%rdx
+
+ movq %rdx,%r10
+ movq %r9,%rax
+ mulq %rbx
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %r10,%r11
+ adcq $0,%rdx
+
+ movq %rdx,%r12
+
+
+
+
+
+ movq 24(%rcx),%rbx
+
+ movq 32(%rcx),%rax
+ mulq %rbx
+ addq %rax,%r15
+ adcq $0,%rdx
+ movq %r15,576(%rsp)
+
+ movq %rdx,%r10
+ movq 40(%rcx),%rax
+ mulq %rbx
+ addq %rax,%rsi
+ adcq $0,%rdx
+ addq %r10,%rsi
+ adcq $0,%rdx
+ movq %rsi,584(%rsp)
+
+ movq %rdx,%r10
+ movq %r8,%rax
+ mulq %rbx
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %r10,%r11
+ adcq $0,%rdx
+
+ movq %rdx,%r10
+ movq %r9,%rax
+ mulq %rbx
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %r10,%r12
+ adcq $0,%rdx
+
+ movq %rdx,%r15
+
+
+
+
+ movq 32(%rcx),%rbx
+
+ movq 40(%rcx),%rax
+ mulq %rbx
+ addq %rax,%r11
+ adcq $0,%rdx
+ movq %r11,592(%rsp)
+
+ movq %rdx,%r10
+ movq %r8,%rax
+ mulq %rbx
+ addq %rax,%r12
+ adcq $0,%rdx
+ addq %r10,%r12
+ adcq $0,%rdx
+ movq %r12,600(%rsp)
+
+ movq %rdx,%r10
+ movq %r9,%rax
+ mulq %rbx
+ addq %rax,%r15
+ adcq $0,%rdx
+ addq %r10,%r15
+ adcq $0,%rdx
+
+ movq %rdx,%r11
+
+
+
+
+ movq 40(%rcx),%rbx
+
+ movq %r8,%rax
+ mulq %rbx
+ addq %rax,%r15
+ adcq $0,%rdx
+ movq %r15,608(%rsp)
+
+ movq %rdx,%r10
+ movq %r9,%rax
+ mulq %rbx
+ addq %rax,%r11
+ adcq $0,%rdx
+ addq %r10,%r11
+ adcq $0,%rdx
+ movq %r11,616(%rsp)
+
+ movq %rdx,%r12
+
+
+
+
+ movq %r8,%rbx
+
+ movq %r9,%rax
+ mulq %rbx
+ addq %rax,%r12
+ adcq $0,%rdx
+ movq %r12,624(%rsp)
+
+ movq %rdx,632(%rsp)
+
+
+ movq 528(%rsp),%r10
+ movq 536(%rsp),%r11
+ movq 544(%rsp),%r12
+ movq 552(%rsp),%r13
+ movq 560(%rsp),%r14
+ movq 568(%rsp),%r15
+
+ movq 24(%rcx),%rax
+ mulq %rax
+ movq %rax,%rdi
+ movq %rdx,%r8
+
+ addq %r10,%r10
+ adcq %r11,%r11
+ adcq %r12,%r12
+ adcq %r13,%r13
+ adcq %r14,%r14
+ adcq %r15,%r15
+ adcq $0,%r8
+
+ movq 0(%rcx),%rax
+ mulq %rax
+ movq %rax,520(%rsp)
+ movq %rdx,%rbx
+
+ movq 8(%rcx),%rax
+ mulq %rax
+
+ addq %rbx,%r10
+ adcq %rax,%r11
+ adcq $0,%rdx
+
+ movq %rdx,%rbx
+ movq %r10,528(%rsp)
+ movq %r11,536(%rsp)
+
+ movq 16(%rcx),%rax
+ mulq %rax
+
+ addq %rbx,%r12
+ adcq %rax,%r13
+ adcq $0,%rdx
+
+ movq %rdx,%rbx
+
+ movq %r12,544(%rsp)
+ movq %r13,552(%rsp)
+
+ xorq %rbp,%rbp
+ addq %rbx,%r14
+ adcq %rdi,%r15
+ adcq $0,%rbp
+
+ movq %r14,560(%rsp)
+ movq %r15,568(%rsp)
+
+
+
+
+ movq 576(%rsp),%r10
+ movq 584(%rsp),%r11
+ movq 592(%rsp),%r12
+ movq 600(%rsp),%r13
+ movq 608(%rsp),%r14
+ movq 616(%rsp),%r15
+ movq 624(%rsp),%rdi
+ movq 632(%rsp),%rsi
+
+ movq %r9,%rax
+ mulq %rax
+ movq %rax,%r9
+ movq %rdx,%rbx
+
+ addq %r10,%r10
+ adcq %r11,%r11
+ adcq %r12,%r12
+ adcq %r13,%r13
+ adcq %r14,%r14
+ adcq %r15,%r15
+ adcq %rdi,%rdi
+ adcq %rsi,%rsi
+ adcq $0,%rbx
+
+ addq %rbp,%r10
+
+ movq 32(%rcx),%rax
+ mulq %rax
+
+ addq %r8,%r10
+ adcq %rax,%r11
+ adcq $0,%rdx
+
+ movq %rdx,%rbp
+
+ movq %r10,576(%rsp)
+ movq %r11,584(%rsp)
+
+ movq 40(%rcx),%rax
+ mulq %rax
+
+ addq %rbp,%r12
+ adcq %rax,%r13
+ adcq $0,%rdx
+
+ movq %rdx,%rbp
+
+ movq %r12,592(%rsp)
+ movq %r13,600(%rsp)
+
+ movq 48(%rcx),%rax
+ mulq %rax
+
+ addq %rbp,%r14
+ adcq %rax,%r15
+ adcq $0,%rdx
+
+ movq %r14,608(%rsp)
+ movq %r15,616(%rsp)
+
+ addq %rdx,%rdi
+ adcq %r9,%rsi
+ adcq $0,%rbx
+
+ movq %rdi,624(%rsp)
+ movq %rsi,632(%rsp)
+ movq %rbx,640(%rsp)
+
+ jmp mont_reduce
+
+
+.size sqr_reduce,.-sqr_reduce
+.globl mod_exp_512
+.type mod_exp_512,@function
+mod_exp_512:
+ pushq %rbp
+ pushq %rbx
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+
+ movq %rsp,%r8
+ subq $2688,%rsp
+ andq $-64,%rsp
+
+
+ movq %r8,0(%rsp)
+ movq %rdi,8(%rsp)
+ movq %rsi,16(%rsp)
+ movq %rcx,24(%rsp)
+.Lbody:
+
+
+
+ pxor %xmm4,%xmm4
+ movdqu 0(%rsi),%xmm0
+ movdqu 16(%rsi),%xmm1
+ movdqu 32(%rsi),%xmm2
+ movdqu 48(%rsi),%xmm3
+ movdqa %xmm4,512(%rsp)
+ movdqa %xmm4,528(%rsp)
+ movdqa %xmm4,608(%rsp)
+ movdqa %xmm4,624(%rsp)
+ movdqa %xmm0,544(%rsp)
+ movdqa %xmm1,560(%rsp)
+ movdqa %xmm2,576(%rsp)
+ movdqa %xmm3,592(%rsp)
+
+
+ movdqu 0(%rdx),%xmm0
+ movdqu 16(%rdx),%xmm1
+ movdqu 32(%rdx),%xmm2
+ movdqu 48(%rdx),%xmm3
+
+ leaq 384(%rsp),%rbx
+ movq %rbx,136(%rsp)
+ call mont_reduce
+
+
+ leaq 448(%rsp),%rcx
+ xorq %rax,%rax
+ movq %rax,0(%rcx)
+ movq %rax,8(%rcx)
+ movq %rax,24(%rcx)
+ movq %rax,32(%rcx)
+ movq %rax,40(%rcx)
+ movq %rax,48(%rcx)
+ movq %rax,56(%rcx)
+ movq %rax,128(%rsp)
+ movq $1,16(%rcx)
+
+ leaq 640(%rsp),%rbp
+ movq %rcx,%rsi
+ movq %rbp,%rdi
+ movq $8,%rax
+loop_0:
+ movq (%rcx),%rbx
+ movw %bx,(%rdi)
+ shrq $16,%rbx
+ movw %bx,64(%rdi)
+ shrq $16,%rbx
+ movw %bx,128(%rdi)
+ shrq $16,%rbx
+ movw %bx,192(%rdi)
+ leaq 8(%rcx),%rcx
+ leaq 256(%rdi),%rdi
+ decq %rax
+ jnz loop_0
+ movq $31,%rax
+ movq %rax,32(%rsp)
+ movq %rbp,40(%rsp)
+
+ movq %rsi,136(%rsp)
+ movq 0(%rsi),%r10
+ movq 8(%rsi),%r11
+ movq 16(%rsi),%r12
+ movq 24(%rsi),%r13
+ movq 32(%rsi),%r14
+ movq 40(%rsi),%r15
+ movq 48(%rsi),%r8
+ movq 56(%rsi),%r9
+init_loop:
+ leaq 384(%rsp),%rdi
+ call mont_mul_a3b
+ leaq 448(%rsp),%rsi
+ movq 40(%rsp),%rbp
+ addq $2,%rbp
+ movq %rbp,40(%rsp)
+ movq %rsi,%rcx
+ movq $8,%rax
+loop_1:
+ movq (%rcx),%rbx
+ movw %bx,(%rbp)
+ shrq $16,%rbx
+ movw %bx,64(%rbp)
+ shrq $16,%rbx
+ movw %bx,128(%rbp)
+ shrq $16,%rbx
+ movw %bx,192(%rbp)
+ leaq 8(%rcx),%rcx
+ leaq 256(%rbp),%rbp
+ decq %rax
+ jnz loop_1
+ movq 32(%rsp),%rax
+ subq $1,%rax
+ movq %rax,32(%rsp)
+ jne init_loop
+
+
+
+ movdqa %xmm0,64(%rsp)
+ movdqa %xmm1,80(%rsp)
+ movdqa %xmm2,96(%rsp)
+ movdqa %xmm3,112(%rsp)
+
+
+
+
+
+ movl 126(%rsp),%eax
+ movq %rax,%rdx
+ shrq $11,%rax
+ andl $2047,%edx
+ movl %edx,126(%rsp)
+ leaq 640(%rsp,%rax,2),%rsi
+ movq 8(%rsp),%rdx
+ movq $4,%rbp
+loop_2:
+ movzwq 192(%rsi),%rbx
+ movzwq 448(%rsi),%rax
+ shlq $16,%rbx
+ shlq $16,%rax
+ movw 128(%rsi),%bx
+ movw 384(%rsi),%ax
+ shlq $16,%rbx
+ shlq $16,%rax
+ movw 64(%rsi),%bx
+ movw 320(%rsi),%ax
+ shlq $16,%rbx
+ shlq $16,%rax
+ movw 0(%rsi),%bx
+ movw 256(%rsi),%ax
+ movq %rbx,0(%rdx)
+ movq %rax,8(%rdx)
+ leaq 512(%rsi),%rsi
+ leaq 16(%rdx),%rdx
+ subq $1,%rbp
+ jnz loop_2
+ movq $505,48(%rsp)
+
+ movq 8(%rsp),%rcx
+ movq %rcx,136(%rsp)
+ movq 0(%rcx),%r10
+ movq 8(%rcx),%r11
+ movq 16(%rcx),%r12
+ movq 24(%rcx),%r13
+ movq 32(%rcx),%r14
+ movq 40(%rcx),%r15
+ movq 48(%rcx),%r8
+ movq 56(%rcx),%r9
+ jmp sqr_2
+
+main_loop_a3b:
+ call sqr_reduce
+ call sqr_reduce
+ call sqr_reduce
+sqr_2:
+ call sqr_reduce
+ call sqr_reduce
+
+
+
+ movq 48(%rsp),%rcx
+ movq %rcx,%rax
+ shrq $4,%rax
+ movl 64(%rsp,%rax,2),%edx
+ andq $15,%rcx
+ shrq %cl,%rdx
+ andq $31,%rdx
+
+ leaq 640(%rsp,%rdx,2),%rsi
+ leaq 448(%rsp),%rdx
+ movq %rdx,%rdi
+ movq $4,%rbp
+loop_3:
+ movzwq 192(%rsi),%rbx
+ movzwq 448(%rsi),%rax
+ shlq $16,%rbx
+ shlq $16,%rax
+ movw 128(%rsi),%bx
+ movw 384(%rsi),%ax
+ shlq $16,%rbx
+ shlq $16,%rax
+ movw 64(%rsi),%bx
+ movw 320(%rsi),%ax
+ shlq $16,%rbx
+ shlq $16,%rax
+ movw 0(%rsi),%bx
+ movw 256(%rsi),%ax
+ movq %rbx,0(%rdx)
+ movq %rax,8(%rdx)
+ leaq 512(%rsi),%rsi
+ leaq 16(%rdx),%rdx
+ subq $1,%rbp
+ jnz loop_3
+ movq 8(%rsp),%rsi
+ call mont_mul_a3b
+
+
+
+ movq 48(%rsp),%rcx
+ subq $5,%rcx
+ movq %rcx,48(%rsp)
+ jge main_loop_a3b
+
+
+
+end_main_loop_a3b:
+
+
+ movq 8(%rsp),%rdx
+ pxor %xmm4,%xmm4
+ movdqu 0(%rdx),%xmm0
+ movdqu 16(%rdx),%xmm1
+ movdqu 32(%rdx),%xmm2
+ movdqu 48(%rdx),%xmm3
+ movdqa %xmm4,576(%rsp)
+ movdqa %xmm4,592(%rsp)
+ movdqa %xmm4,608(%rsp)
+ movdqa %xmm4,624(%rsp)
+ movdqa %xmm0,512(%rsp)
+ movdqa %xmm1,528(%rsp)
+ movdqa %xmm2,544(%rsp)
+ movdqa %xmm3,560(%rsp)
+ call mont_reduce
+
+
+
+ movq 8(%rsp),%rax
+ movq 0(%rax),%r8
+ movq 8(%rax),%r9
+ movq 16(%rax),%r10
+ movq 24(%rax),%r11
+ movq 32(%rax),%r12
+ movq 40(%rax),%r13
+ movq 48(%rax),%r14
+ movq 56(%rax),%r15
+
+
+ movq 24(%rsp),%rbx
+ addq $512,%rbx
+
+ subq 0(%rbx),%r8
+ sbbq 8(%rbx),%r9
+ sbbq 16(%rbx),%r10
+ sbbq 24(%rbx),%r11
+ sbbq 32(%rbx),%r12
+ sbbq 40(%rbx),%r13
+ sbbq 48(%rbx),%r14
+ sbbq 56(%rbx),%r15
+
+
+ movq 0(%rax),%rsi
+ movq 8(%rax),%rdi
+ movq 16(%rax),%rcx
+ movq 24(%rax),%rdx
+ cmovncq %r8,%rsi
+ cmovncq %r9,%rdi
+ cmovncq %r10,%rcx
+ cmovncq %r11,%rdx
+ movq %rsi,0(%rax)
+ movq %rdi,8(%rax)
+ movq %rcx,16(%rax)
+ movq %rdx,24(%rax)
+
+ movq 32(%rax),%rsi
+ movq 40(%rax),%rdi
+ movq 48(%rax),%rcx
+ movq 56(%rax),%rdx
+ cmovncq %r12,%rsi
+ cmovncq %r13,%rdi
+ cmovncq %r14,%rcx
+ cmovncq %r15,%rdx
+ movq %rsi,32(%rax)
+ movq %rdi,40(%rax)
+ movq %rcx,48(%rax)
+ movq %rdx,56(%rax)
+
+ movq 0(%rsp),%rsi
+ movq 0(%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbx
+ movq 40(%rsi),%rbp
+ leaq 48(%rsi),%rsp
+.Lepilogue:
+ .byte 0xf3,0xc3
+.size mod_exp_512, . - mod_exp_512
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/modexp512-x86_64.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/modexp512-x86_64.pl
new file mode 100644
index 00000000..bfd6e975
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/modexp512-x86_64.pl
@@ -0,0 +1,1497 @@
+#!/usr/bin/env perl
+#
+# Copyright (c) 2010-2011 Intel Corp.
+# Author: Vinodh.Gopal@intel.com
+# Jim Guilford
+# Erdinc.Ozturk@intel.com
+# Maxim.Perminov@intel.com
+#
+# More information about algorithm used can be found at:
+# http://www.cse.buffalo.edu/srds2009/escs2009_submission_Gopal.pdf
+#
+# ====================================================================
+# Copyright (c) 2011 The OpenSSL Project. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# 3. All advertising materials mentioning features or use of this
+# software must display the following acknowledgment:
+# "This product includes software developed by the OpenSSL Project
+# for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+#
+# 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+# endorse or promote products derived from this software without
+# prior written permission. For written permission, please contact
+# licensing@OpenSSL.org.
+#
+# 5. Products derived from this software may not be called "OpenSSL"
+# nor may "OpenSSL" appear in their names without prior written
+# permission of the OpenSSL Project.
+#
+# 6. Redistributions of any form whatsoever must retain the following
+# acknowledgment:
+# "This product includes software developed by the OpenSSL Project
+# for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+#
+# THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+# OF THE POSSIBILITY OF SUCH DAMAGE.
+# ====================================================================
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+use strict;
+my $code=".text\n\n";
+my $m=0;
+
+#
+# Define x512 macros
+#
+
+#MULSTEP_512_ADD MACRO x7, x6, x5, x4, x3, x2, x1, x0, dst, src1, src2, add_src, tmp1, tmp2
+#
+# uses rax, rdx, and args
+sub MULSTEP_512_ADD
+{
+ my ($x, $DST, $SRC2, $ASRC, $OP, $TMP)=@_;
+ my @X=@$x; # make a copy
+$code.=<<___;
+ mov (+8*0)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [0]
+ mov ($ASRC), $X[0]
+ add %rax, $X[0]
+ adc \$0, %rdx
+ mov $X[0], $DST
+___
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov %rdx, $TMP
+
+ mov (+8*$i)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [$i]
+ mov (+8*$i)($ASRC), $X[$i]
+ add %rax, $X[$i]
+ adc \$0, %rdx
+ add $TMP, $X[$i]
+ adc \$0, %rdx
+___
+}
+$code.=<<___;
+ mov %rdx, $X[0]
+___
+}
+
+#MULSTEP_512 MACRO x7, x6, x5, x4, x3, x2, x1, x0, dst, src2, src1_val, tmp
+#
+# uses rax, rdx, and args
+sub MULSTEP_512
+{
+ my ($x, $DST, $SRC2, $OP, $TMP)=@_;
+ my @X=@$x; # make a copy
+$code.=<<___;
+ mov (+8*0)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [0]
+ add %rax, $X[0]
+ adc \$0, %rdx
+ mov $X[0], $DST
+___
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov %rdx, $TMP
+
+ mov (+8*$i)($SRC2), %rax
+ mul $OP # rdx:rax = %OP * [$i]
+ add %rax, $X[$i]
+ adc \$0, %rdx
+ add $TMP, $X[$i]
+ adc \$0, %rdx
+___
+}
+$code.=<<___;
+ mov %rdx, $X[0]
+___
+}
+
+#
+# Swizzle Macros
+#
+
+# macro to copy data from flat space to swizzled table
+#MACRO swizzle pDst, pSrc, tmp1, tmp2
+# pDst and pSrc are modified
+sub swizzle
+{
+ my ($pDst, $pSrc, $cnt, $d0)=@_;
+$code.=<<___;
+ mov \$8, $cnt
+loop_$m:
+ mov ($pSrc), $d0
+ mov $d0#w, ($pDst)
+ shr \$16, $d0
+ mov $d0#w, (+64*1)($pDst)
+ shr \$16, $d0
+ mov $d0#w, (+64*2)($pDst)
+ shr \$16, $d0
+ mov $d0#w, (+64*3)($pDst)
+ lea 8($pSrc), $pSrc
+ lea 64*4($pDst), $pDst
+ dec $cnt
+ jnz loop_$m
+___
+
+ $m++;
+}
+
+# macro to copy data from swizzled table to flat space
+#MACRO unswizzle pDst, pSrc, tmp*3
+sub unswizzle
+{
+ my ($pDst, $pSrc, $cnt, $d0, $d1)=@_;
+$code.=<<___;
+ mov \$4, $cnt
+loop_$m:
+ movzxw (+64*3+256*0)($pSrc), $d0
+ movzxw (+64*3+256*1)($pSrc), $d1
+ shl \$16, $d0
+ shl \$16, $d1
+ mov (+64*2+256*0)($pSrc), $d0#w
+ mov (+64*2+256*1)($pSrc), $d1#w
+ shl \$16, $d0
+ shl \$16, $d1
+ mov (+64*1+256*0)($pSrc), $d0#w
+ mov (+64*1+256*1)($pSrc), $d1#w
+ shl \$16, $d0
+ shl \$16, $d1
+ mov (+64*0+256*0)($pSrc), $d0#w
+ mov (+64*0+256*1)($pSrc), $d1#w
+ mov $d0, (+8*0)($pDst)
+ mov $d1, (+8*1)($pDst)
+ lea 256*2($pSrc), $pSrc
+ lea 8*2($pDst), $pDst
+ sub \$1, $cnt
+ jnz loop_$m
+___
+
+ $m++;
+}
+
+#
+# Data Structures
+#
+
+# Reduce Data
+#
+#
+# Offset Value
+# 0C0 Carries
+# 0B8 X2[10]
+# 0B0 X2[9]
+# 0A8 X2[8]
+# 0A0 X2[7]
+# 098 X2[6]
+# 090 X2[5]
+# 088 X2[4]
+# 080 X2[3]
+# 078 X2[2]
+# 070 X2[1]
+# 068 X2[0]
+# 060 X1[12] P[10]
+# 058 X1[11] P[9] Z[8]
+# 050 X1[10] P[8] Z[7]
+# 048 X1[9] P[7] Z[6]
+# 040 X1[8] P[6] Z[5]
+# 038 X1[7] P[5] Z[4]
+# 030 X1[6] P[4] Z[3]
+# 028 X1[5] P[3] Z[2]
+# 020 X1[4] P[2] Z[1]
+# 018 X1[3] P[1] Z[0]
+# 010 X1[2] P[0] Y[2]
+# 008 X1[1] Q[1] Y[1]
+# 000 X1[0] Q[0] Y[0]
+
+my $X1_offset = 0; # 13 qwords
+my $X2_offset = $X1_offset + 13*8; # 11 qwords
+my $Carries_offset = $X2_offset + 11*8; # 1 qword
+my $Q_offset = 0; # 2 qwords
+my $P_offset = $Q_offset + 2*8; # 11 qwords
+my $Y_offset = 0; # 3 qwords
+my $Z_offset = $Y_offset + 3*8; # 9 qwords
+
+my $Red_Data_Size = $Carries_offset + 1*8; # (25 qwords)
+
+#
+# Stack Frame
+#
+#
+# offset value
+# ... <old stack contents>
+# ...
+# 280 Garray
+
+# 278 tmp16[15]
+# ... ...
+# 200 tmp16[0]
+
+# 1F8 tmp[7]
+# ... ...
+# 1C0 tmp[0]
+
+# 1B8 GT[7]
+# ... ...
+# 180 GT[0]
+
+# 178 Reduce Data
+# ... ...
+# 0B8 Reduce Data
+# 0B0 reserved
+# 0A8 reserved
+# 0A0 reserved
+# 098 reserved
+# 090 reserved
+# 088 reduce result addr
+# 080 exp[8]
+
+# ...
+# 048 exp[1]
+# 040 exp[0]
+
+# 038 reserved
+# 030 loop_idx
+# 028 pg
+# 020 i
+# 018 pData ; arg 4
+# 010 pG ; arg 2
+# 008 pResult ; arg 1
+# 000 rsp ; stack pointer before subtract
+
+my $rsp_offset = 0;
+my $pResult_offset = 8*1 + $rsp_offset;
+my $pG_offset = 8*1 + $pResult_offset;
+my $pData_offset = 8*1 + $pG_offset;
+my $i_offset = 8*1 + $pData_offset;
+my $pg_offset = 8*1 + $i_offset;
+my $loop_idx_offset = 8*1 + $pg_offset;
+my $reserved1_offset = 8*1 + $loop_idx_offset;
+my $exp_offset = 8*1 + $reserved1_offset;
+my $red_result_addr_offset= 8*9 + $exp_offset;
+my $reserved2_offset = 8*1 + $red_result_addr_offset;
+my $Reduce_Data_offset = 8*5 + $reserved2_offset;
+my $GT_offset = $Red_Data_Size + $Reduce_Data_offset;
+my $tmp_offset = 8*8 + $GT_offset;
+my $tmp16_offset = 8*8 + $tmp_offset;
+my $garray_offset = 8*16 + $tmp16_offset;
+my $mem_size = 8*8*32 + $garray_offset;
+
+#
+# Offsets within Reduce Data
+#
+#
+# struct MODF_2FOLD_MONT_512_C1_DATA {
+# UINT64 t[8][8];
+# UINT64 m[8];
+# UINT64 m1[8]; /* 2^768 % m */
+# UINT64 m2[8]; /* 2^640 % m */
+# UINT64 k1[2]; /* (- 1/m) % 2^128 */
+# };
+
+my $T = 0;
+my $M = 512; # = 8 * 8 * 8
+my $M1 = 576; # = 8 * 8 * 9 /* += 8 * 8 */
+my $M2 = 640; # = 8 * 8 * 10 /* += 8 * 8 */
+my $K1 = 704; # = 8 * 8 * 11 /* += 8 * 8 */
+
+#
+# FUNCTIONS
+#
+
+{{{
+#
+# MULADD_128x512 : Function to multiply 128-bits (2 qwords) by 512-bits (8 qwords)
+# and add 512-bits (8 qwords)
+# to get 640 bits (10 qwords)
+# Input: 128-bit mul source: [rdi+8*1], rbp
+# 512-bit mul source: [rsi+8*n]
+# 512-bit add source: r15, r14, ..., r9, r8
+# Output: r9, r8, r15, r14, r13, r12, r11, r10, [rcx+8*1], [rcx+8*0]
+# Clobbers all regs except: rcx, rsi, rdi
+$code.=<<___;
+.type MULADD_128x512,\@abi-omnipotent
+.align 16
+MULADD_128x512:
+___
+ &MULSTEP_512([map("%r$_",(8..15))], "(+8*0)(%rcx)", "%rsi", "%rbp", "%rbx");
+$code.=<<___;
+ mov (+8*1)(%rdi), %rbp
+___
+ &MULSTEP_512([map("%r$_",(9..15,8))], "(+8*1)(%rcx)", "%rsi", "%rbp", "%rbx");
+$code.=<<___;
+ ret
+.size MULADD_128x512,.-MULADD_128x512
+___
+}}}
+
+{{{
+#MULADD_256x512 MACRO pDst, pA, pB, OP, TMP, X7, X6, X5, X4, X3, X2, X1, X0
+#
+# Inputs: pDst: Destination (768 bits, 12 qwords)
+# pA: Multiplicand (1024 bits, 16 qwords)
+# pB: Multiplicand (512 bits, 8 qwords)
+# Dst = Ah * B + Al
+# where Ah is (in qwords) A[15:12] (256 bits) and Al is A[7:0] (512 bits)
+# Results in X3 X2 X1 X0 X7 X6 X5 X4 Dst[3:0]
+# Uses registers: arguments, RAX, RDX
+sub MULADD_256x512
+{
+ my ($pDst, $pA, $pB, $OP, $TMP, $X)=@_;
+$code.=<<___;
+ mov (+8*12)($pA), $OP
+___
+ &MULSTEP_512_ADD($X, "(+8*0)($pDst)", $pB, $pA, $OP, $TMP);
+ push(@$X,shift(@$X));
+
+$code.=<<___;
+ mov (+8*13)($pA), $OP
+___
+ &MULSTEP_512($X, "(+8*1)($pDst)", $pB, $OP, $TMP);
+ push(@$X,shift(@$X));
+
+$code.=<<___;
+ mov (+8*14)($pA), $OP
+___
+ &MULSTEP_512($X, "(+8*2)($pDst)", $pB, $OP, $TMP);
+ push(@$X,shift(@$X));
+
+$code.=<<___;
+ mov (+8*15)($pA), $OP
+___
+ &MULSTEP_512($X, "(+8*3)($pDst)", $pB, $OP, $TMP);
+ push(@$X,shift(@$X));
+}
+
+#
+# mont_reduce(UINT64 *x, /* 1024 bits, 16 qwords */
+# UINT64 *m, /* 512 bits, 8 qwords */
+# MODF_2FOLD_MONT_512_C1_DATA *data,
+# UINT64 *r) /* 512 bits, 8 qwords */
+# Input: x (number to be reduced): tmp16 (Implicit)
+# m (modulus): [pM] (Implicit)
+# data (reduce data): [pData] (Implicit)
+# Output: r (result): Address in [red_res_addr]
+# result also in: r9, r8, r15, r14, r13, r12, r11, r10
+
+my @X=map("%r$_",(8..15));
+
+$code.=<<___;
+.type mont_reduce,\@abi-omnipotent
+.align 16
+mont_reduce:
+___
+
+my $STACK_DEPTH = 8;
+ #
+ # X1 = Xh * M1 + Xl
+$code.=<<___;
+ lea (+$Reduce_Data_offset+$X1_offset+$STACK_DEPTH)(%rsp), %rdi # pX1 (Dst) 769 bits, 13 qwords
+ mov (+$pData_offset+$STACK_DEPTH)(%rsp), %rsi # pM1 (Bsrc) 512 bits, 8 qwords
+ add \$$M1, %rsi
+ lea (+$tmp16_offset+$STACK_DEPTH)(%rsp), %rcx # X (Asrc) 1024 bits, 16 qwords
+
+___
+
+ &MULADD_256x512("%rdi", "%rcx", "%rsi", "%rbp", "%rbx", \@X); # rotates @X 4 times
+ # results in r11, r10, r9, r8, r15, r14, r13, r12, X1[3:0]
+
+$code.=<<___;
+ xor %rax, %rax
+ # X1 += xl
+ add (+8*8)(%rcx), $X[4]
+ adc (+8*9)(%rcx), $X[5]
+ adc (+8*10)(%rcx), $X[6]
+ adc (+8*11)(%rcx), $X[7]
+ adc \$0, %rax
+ # X1 is now rax, r11-r8, r15-r12, tmp16[3:0]
+
+ #
+ # check for carry ;; carry stored in rax
+ mov $X[4], (+8*8)(%rdi) # rdi points to X1
+ mov $X[5], (+8*9)(%rdi)
+ mov $X[6], %rbp
+ mov $X[7], (+8*11)(%rdi)
+
+ mov %rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp)
+
+ mov (+8*0)(%rdi), $X[4]
+ mov (+8*1)(%rdi), $X[5]
+ mov (+8*2)(%rdi), $X[6]
+ mov (+8*3)(%rdi), $X[7]
+
+ # X1 is now stored in: X1[11], rbp, X1[9:8], r15-r8
+ # rdi -> X1
+ # rsi -> M1
+
+ #
+ # X2 = Xh * M2 + Xl
+ # do first part (X2 = Xh * M2)
+ add \$8*10, %rdi # rdi -> pXh ; 128 bits, 2 qwords
+ # Xh is actually { [rdi+8*1], rbp }
+ add \$`$M2-$M1`, %rsi # rsi -> M2
+ lea (+$Reduce_Data_offset+$X2_offset+$STACK_DEPTH)(%rsp), %rcx # rcx -> pX2 ; 641 bits, 11 qwords
+___
+ unshift(@X,pop(@X)); unshift(@X,pop(@X));
+$code.=<<___;
+
+ call MULADD_128x512 # args in rcx, rdi / rbp, rsi, r15-r8
+ # result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0]
+ mov (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rax
+
+ # X2 += Xl
+ add (+8*8-8*10)(%rdi), $X[6] # (-8*10) is to adjust rdi -> Xh to Xl
+ adc (+8*9-8*10)(%rdi), $X[7]
+ mov $X[6], (+8*8)(%rcx)
+ mov $X[7], (+8*9)(%rcx)
+
+ adc %rax, %rax
+ mov %rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp)
+
+ lea (+$Reduce_Data_offset+$Q_offset+$STACK_DEPTH)(%rsp), %rdi # rdi -> pQ ; 128 bits, 2 qwords
+ add \$`$K1-$M2`, %rsi # rsi -> pK1 ; 128 bits, 2 qwords
+
+ # MUL_128x128t128 rdi, rcx, rsi ; Q = X2 * K1 (bottom half)
+ # B1:B0 = rsi[1:0] = K1[1:0]
+ # A1:A0 = rcx[1:0] = X2[1:0]
+ # Result = rdi[1],rbp = Q[1],rbp
+ mov (%rsi), %r8 # B0
+ mov (+8*1)(%rsi), %rbx # B1
+
+ mov (%rcx), %rax # A0
+ mul %r8 # B0
+ mov %rax, %rbp
+ mov %rdx, %r9
+
+ mov (+8*1)(%rcx), %rax # A1
+ mul %r8 # B0
+ add %rax, %r9
+
+ mov (%rcx), %rax # A0
+ mul %rbx # B1
+ add %rax, %r9
+
+ mov %r9, (+8*1)(%rdi)
+ # end MUL_128x128t128
+
+ sub \$`$K1-$M`, %rsi
+
+ mov (%rcx), $X[6]
+ mov (+8*1)(%rcx), $X[7] # r9:r8 = X2[1:0]
+
+ call MULADD_128x512 # args in rcx, rdi / rbp, rsi, r15-r8
+ # result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0]
+
+ # load first half of m to rdx, rdi, rbx, rax
+ # moved this here for efficiency
+ mov (+8*0)(%rsi), %rax
+ mov (+8*1)(%rsi), %rbx
+ mov (+8*2)(%rsi), %rdi
+ mov (+8*3)(%rsi), %rdx
+
+ # continue with reduction
+ mov (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rbp
+
+ add (+8*8)(%rcx), $X[6]
+ adc (+8*9)(%rcx), $X[7]
+
+ #accumulate the final carry to rbp
+ adc %rbp, %rbp
+
+ # Add in overflow corrections: R = (X2>>128) += T[overflow]
+ # R = {r9, r8, r15, r14, ..., r10}
+ shl \$3, %rbp
+ mov (+$pData_offset+$STACK_DEPTH)(%rsp), %rcx # rsi -> Data (and points to T)
+ add %rcx, %rbp # pT ; 512 bits, 8 qwords, spread out
+
+ # rsi will be used to generate a mask after the addition
+ xor %rsi, %rsi
+
+ add (+8*8*0)(%rbp), $X[0]
+ adc (+8*8*1)(%rbp), $X[1]
+ adc (+8*8*2)(%rbp), $X[2]
+ adc (+8*8*3)(%rbp), $X[3]
+ adc (+8*8*4)(%rbp), $X[4]
+ adc (+8*8*5)(%rbp), $X[5]
+ adc (+8*8*6)(%rbp), $X[6]
+ adc (+8*8*7)(%rbp), $X[7]
+
+ # if there is a carry: rsi = 0xFFFFFFFFFFFFFFFF
+ # if carry is clear: rsi = 0x0000000000000000
+ sbb \$0, %rsi
+
+ # if carry is clear, subtract 0. Otherwise, subtract 256 bits of m
+ and %rsi, %rax
+ and %rsi, %rbx
+ and %rsi, %rdi
+ and %rsi, %rdx
+
+ mov \$1, %rbp
+ sub %rax, $X[0]
+ sbb %rbx, $X[1]
+ sbb %rdi, $X[2]
+ sbb %rdx, $X[3]
+
+ # if there is a borrow: rbp = 0
+ # if there is no borrow: rbp = 1
+ # this is used to save the borrows in between the first half and the 2nd half of the subtraction of m
+ sbb \$0, %rbp
+
+ #load second half of m to rdx, rdi, rbx, rax
+
+ add \$$M, %rcx
+ mov (+8*4)(%rcx), %rax
+ mov (+8*5)(%rcx), %rbx
+ mov (+8*6)(%rcx), %rdi
+ mov (+8*7)(%rcx), %rdx
+
+ # use the rsi mask as before
+ # if carry is clear, subtract 0. Otherwise, subtract 256 bits of m
+ and %rsi, %rax
+ and %rsi, %rbx
+ and %rsi, %rdi
+ and %rsi, %rdx
+
+ # if rbp = 0, there was a borrow before, it is moved to the carry flag
+ # if rbp = 1, there was not a borrow before, carry flag is cleared
+ sub \$1, %rbp
+
+ sbb %rax, $X[4]
+ sbb %rbx, $X[5]
+ sbb %rdi, $X[6]
+ sbb %rdx, $X[7]
+
+ # write R back to memory
+
+ mov (+$red_result_addr_offset+$STACK_DEPTH)(%rsp), %rsi
+ mov $X[0], (+8*0)(%rsi)
+ mov $X[1], (+8*1)(%rsi)
+ mov $X[2], (+8*2)(%rsi)
+ mov $X[3], (+8*3)(%rsi)
+ mov $X[4], (+8*4)(%rsi)
+ mov $X[5], (+8*5)(%rsi)
+ mov $X[6], (+8*6)(%rsi)
+ mov $X[7], (+8*7)(%rsi)
+
+ ret
+.size mont_reduce,.-mont_reduce
+___
+}}}
+
+{{{
+#MUL_512x512 MACRO pDst, pA, pB, x7, x6, x5, x4, x3, x2, x1, x0, tmp*2
+#
+# Inputs: pDst: Destination (1024 bits, 16 qwords)
+# pA: Multiplicand (512 bits, 8 qwords)
+# pB: Multiplicand (512 bits, 8 qwords)
+# Uses registers rax, rdx, args
+# B operand in [pB] and also in x7...x0
+sub MUL_512x512
+{
+ my ($pDst, $pA, $pB, $x, $OP, $TMP, $pDst_o)=@_;
+ my ($pDst, $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/);
+ my @X=@$x; # make a copy
+
+$code.=<<___;
+ mov (+8*0)($pA), $OP
+
+ mov $X[0], %rax
+ mul $OP # rdx:rax = %OP * [0]
+ mov %rax, (+$pDst_o+8*0)($pDst)
+ mov %rdx, $X[0]
+___
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov $X[$i], %rax
+ mul $OP # rdx:rax = %OP * [$i]
+ add %rax, $X[$i-1]
+ adc \$0, %rdx
+ mov %rdx, $X[$i]
+___
+}
+
+for(my $i=1;$i<8;$i++) {
+$code.=<<___;
+ mov (+8*$i)($pA), $OP
+___
+
+ &MULSTEP_512(\@X, "(+$pDst_o+8*$i)($pDst)", $pB, $OP, $TMP);
+ push(@X,shift(@X));
+}
+
+$code.=<<___;
+ mov $X[0], (+$pDst_o+8*8)($pDst)
+ mov $X[1], (+$pDst_o+8*9)($pDst)
+ mov $X[2], (+$pDst_o+8*10)($pDst)
+ mov $X[3], (+$pDst_o+8*11)($pDst)
+ mov $X[4], (+$pDst_o+8*12)($pDst)
+ mov $X[5], (+$pDst_o+8*13)($pDst)
+ mov $X[6], (+$pDst_o+8*14)($pDst)
+ mov $X[7], (+$pDst_o+8*15)($pDst)
+___
+}
+
+#
+# mont_mul_a3b : subroutine to compute (Src1 * Src2) % M (all 512-bits)
+# Input: src1: Address of source 1: rdi
+# src2: Address of source 2: rsi
+# Output: dst: Address of destination: [red_res_addr]
+# src2 and result also in: r9, r8, r15, r14, r13, r12, r11, r10
+# Temp: Clobbers [tmp16], all registers
+$code.=<<___;
+.type mont_mul_a3b,\@abi-omnipotent
+.align 16
+mont_mul_a3b:
+ #
+ # multiply tmp = src1 * src2
+ # For multiply: dst = rcx, src1 = rdi, src2 = rsi
+ # stack depth is extra 8 from call
+___
+ &MUL_512x512("%rsp+$tmp16_offset+8", "%rdi", "%rsi", [map("%r$_",(10..15,8..9))], "%rbp", "%rbx");
+$code.=<<___;
+ #
+ # Dst = tmp % m
+ # Call reduce(tmp, m, data, dst)
+
+ # tail recursion optimization: jmp to mont_reduce and return from there
+ jmp mont_reduce
+ # call mont_reduce
+ # ret
+.size mont_mul_a3b,.-mont_mul_a3b
+___
+}}}
+
+{{{
+#SQR_512 MACRO pDest, pA, x7, x6, x5, x4, x3, x2, x1, x0, tmp*4
+#
+# Input in memory [pA] and also in x7...x0
+# Uses all argument registers plus rax and rdx
+#
+# This version computes all of the off-diagonal terms into memory,
+# and then it adds in the diagonal terms
+
+sub SQR_512
+{
+ my ($pDst, $pA, $x, $A, $tmp, $x7, $x6, $pDst_o)=@_;
+ my ($pDst, $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/);
+ my @X=@$x; # make a copy
+$code.=<<___;
+ # ------------------
+ # first pass 01...07
+ # ------------------
+ mov $X[0], $A
+
+ mov $X[1],%rax
+ mul $A
+ mov %rax, (+$pDst_o+8*1)($pDst)
+___
+for(my $i=2;$i<8;$i++) {
+$code.=<<___;
+ mov %rdx, $X[$i-2]
+ mov $X[$i],%rax
+ mul $A
+ add %rax, $X[$i-2]
+ adc \$0, %rdx
+___
+}
+$code.=<<___;
+ mov %rdx, $x7
+
+ mov $X[0], (+$pDst_o+8*2)($pDst)
+
+ # ------------------
+ # second pass 12...17
+ # ------------------
+
+ mov (+8*1)($pA), $A
+
+ mov (+8*2)($pA),%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ mov $X[1], (+$pDst_o+8*3)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*3)($pA),%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ add $X[0], $X[2]
+ adc \$0, %rdx
+ mov $X[2], (+$pDst_o+8*4)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*4)($pA),%rax
+ mul $A
+ add %rax, $X[3]
+ adc \$0, %rdx
+ add $X[0], $X[3]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $X[4]
+ adc \$0, %rdx
+ add $X[0], $X[4]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ add $X[0], $X[5]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $x7
+ adc \$0, %rdx
+ add $X[0], $x7
+ adc \$0, %rdx
+
+ mov %rdx, $X[1]
+
+ # ------------------
+ # third pass 23...27
+ # ------------------
+ mov (+8*2)($pA), $A
+
+ mov (+8*3)($pA),%rax
+ mul $A
+ add %rax, $X[3]
+ adc \$0, %rdx
+ mov $X[3], (+$pDst_o+8*5)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*4)($pA),%rax
+ mul $A
+ add %rax, $X[4]
+ adc \$0, %rdx
+ add $X[0], $X[4]
+ adc \$0, %rdx
+ mov $X[4], (+$pDst_o+8*6)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ add $X[0], $X[5]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $x7
+ adc \$0, %rdx
+ add $X[0], $x7
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ add $X[0], $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $X[2]
+
+ # ------------------
+ # fourth pass 34...37
+ # ------------------
+
+ mov (+8*3)($pA), $A
+
+ mov (+8*4)($pA),%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ mov $X[5], (+$pDst_o+8*7)($pDst)
+
+ mov %rdx, $X[0]
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $x7
+ adc \$0, %rdx
+ add $X[0], $x7
+ adc \$0, %rdx
+ mov $x7, (+$pDst_o+8*8)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ add $X[0], $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ add $X[0], $X[2]
+ adc \$0, %rdx
+
+ mov %rdx, $X[5]
+
+ # ------------------
+ # fifth pass 45...47
+ # ------------------
+ mov (+8*4)($pA), $A
+
+ mov (+8*5)($pA),%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ mov $X[1], (+$pDst_o+8*9)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ add $X[0], $X[2]
+ adc \$0, %rdx
+ mov $X[2], (+$pDst_o+8*10)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ add $X[0], $X[5]
+ adc \$0, %rdx
+
+ mov %rdx, $X[1]
+
+ # ------------------
+ # sixth pass 56...57
+ # ------------------
+ mov (+8*5)($pA), $A
+
+ mov $X[6],%rax
+ mul $A
+ add %rax, $X[5]
+ adc \$0, %rdx
+ mov $X[5], (+$pDst_o+8*11)($pDst)
+
+ mov %rdx, $X[0]
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[1]
+ adc \$0, %rdx
+ add $X[0], $X[1]
+ adc \$0, %rdx
+ mov $X[1], (+$pDst_o+8*12)($pDst)
+
+ mov %rdx, $X[2]
+
+ # ------------------
+ # seventh pass 67
+ # ------------------
+ mov $X[6], $A
+
+ mov $X[7],%rax
+ mul $A
+ add %rax, $X[2]
+ adc \$0, %rdx
+ mov $X[2], (+$pDst_o+8*13)($pDst)
+
+ mov %rdx, (+$pDst_o+8*14)($pDst)
+
+ # start finalize (add in squares, and double off-terms)
+ mov (+$pDst_o+8*1)($pDst), $X[0]
+ mov (+$pDst_o+8*2)($pDst), $X[1]
+ mov (+$pDst_o+8*3)($pDst), $X[2]
+ mov (+$pDst_o+8*4)($pDst), $X[3]
+ mov (+$pDst_o+8*5)($pDst), $X[4]
+ mov (+$pDst_o+8*6)($pDst), $X[5]
+
+ mov (+8*3)($pA), %rax
+ mul %rax
+ mov %rax, $x6
+ mov %rdx, $X[6]
+
+ add $X[0], $X[0]
+ adc $X[1], $X[1]
+ adc $X[2], $X[2]
+ adc $X[3], $X[3]
+ adc $X[4], $X[4]
+ adc $X[5], $X[5]
+ adc \$0, $X[6]
+
+ mov (+8*0)($pA), %rax
+ mul %rax
+ mov %rax, (+$pDst_o+8*0)($pDst)
+ mov %rdx, $A
+
+ mov (+8*1)($pA), %rax
+ mul %rax
+
+ add $A, $X[0]
+ adc %rax, $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $A
+ mov $X[0], (+$pDst_o+8*1)($pDst)
+ mov $X[1], (+$pDst_o+8*2)($pDst)
+
+ mov (+8*2)($pA), %rax
+ mul %rax
+
+ add $A, $X[2]
+ adc %rax, $X[3]
+ adc \$0, %rdx
+
+ mov %rdx, $A
+
+ mov $X[2], (+$pDst_o+8*3)($pDst)
+ mov $X[3], (+$pDst_o+8*4)($pDst)
+
+ xor $tmp, $tmp
+ add $A, $X[4]
+ adc $x6, $X[5]
+ adc \$0, $tmp
+
+ mov $X[4], (+$pDst_o+8*5)($pDst)
+ mov $X[5], (+$pDst_o+8*6)($pDst)
+
+ # %%tmp has 0/1 in column 7
+ # %%A6 has a full value in column 7
+
+ mov (+$pDst_o+8*7)($pDst), $X[0]
+ mov (+$pDst_o+8*8)($pDst), $X[1]
+ mov (+$pDst_o+8*9)($pDst), $X[2]
+ mov (+$pDst_o+8*10)($pDst), $X[3]
+ mov (+$pDst_o+8*11)($pDst), $X[4]
+ mov (+$pDst_o+8*12)($pDst), $X[5]
+ mov (+$pDst_o+8*13)($pDst), $x6
+ mov (+$pDst_o+8*14)($pDst), $x7
+
+ mov $X[7], %rax
+ mul %rax
+ mov %rax, $X[7]
+ mov %rdx, $A
+
+ add $X[0], $X[0]
+ adc $X[1], $X[1]
+ adc $X[2], $X[2]
+ adc $X[3], $X[3]
+ adc $X[4], $X[4]
+ adc $X[5], $X[5]
+ adc $x6, $x6
+ adc $x7, $x7
+ adc \$0, $A
+
+ add $tmp, $X[0]
+
+ mov (+8*4)($pA), %rax
+ mul %rax
+
+ add $X[6], $X[0]
+ adc %rax, $X[1]
+ adc \$0, %rdx
+
+ mov %rdx, $tmp
+
+ mov $X[0], (+$pDst_o+8*7)($pDst)
+ mov $X[1], (+$pDst_o+8*8)($pDst)
+
+ mov (+8*5)($pA), %rax
+ mul %rax
+
+ add $tmp, $X[2]
+ adc %rax, $X[3]
+ adc \$0, %rdx
+
+ mov %rdx, $tmp
+
+ mov $X[2], (+$pDst_o+8*9)($pDst)
+ mov $X[3], (+$pDst_o+8*10)($pDst)
+
+ mov (+8*6)($pA), %rax
+ mul %rax
+
+ add $tmp, $X[4]
+ adc %rax, $X[5]
+ adc \$0, %rdx
+
+ mov $X[4], (+$pDst_o+8*11)($pDst)
+ mov $X[5], (+$pDst_o+8*12)($pDst)
+
+ add %rdx, $x6
+ adc $X[7], $x7
+ adc \$0, $A
+
+ mov $x6, (+$pDst_o+8*13)($pDst)
+ mov $x7, (+$pDst_o+8*14)($pDst)
+ mov $A, (+$pDst_o+8*15)($pDst)
+___
+}
+
+#
+# sqr_reduce: subroutine to compute Result = reduce(Result * Result)
+#
+# input and result also in: r9, r8, r15, r14, r13, r12, r11, r10
+#
+$code.=<<___;
+.type sqr_reduce,\@abi-omnipotent
+.align 16
+sqr_reduce:
+ mov (+$pResult_offset+8)(%rsp), %rcx
+___
+ &SQR_512("%rsp+$tmp16_offset+8", "%rcx", [map("%r$_",(10..15,8..9))], "%rbx", "%rbp", "%rsi", "%rdi");
+$code.=<<___;
+ # tail recursion optimization: jmp to mont_reduce and return from there
+ jmp mont_reduce
+ # call mont_reduce
+ # ret
+.size sqr_reduce,.-sqr_reduce
+___
+}}}
+
+#
+# MAIN FUNCTION
+#
+
+#mod_exp_512(UINT64 *result, /* 512 bits, 8 qwords */
+# UINT64 *g, /* 512 bits, 8 qwords */
+# UINT64 *exp, /* 512 bits, 8 qwords */
+# struct mod_ctx_512 *data)
+
+# window size = 5
+# table size = 2^5 = 32
+#table_entries equ 32
+#table_size equ table_entries * 8
+$code.=<<___;
+.globl mod_exp_512
+.type mod_exp_512,\@function,4
+mod_exp_512:
+ push %rbp
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ # adjust stack down and then align it with cache boundary
+ mov %rsp, %r8
+ sub \$$mem_size, %rsp
+ and \$-64, %rsp
+
+ # store previous stack pointer and arguments
+ mov %r8, (+$rsp_offset)(%rsp)
+ mov %rdi, (+$pResult_offset)(%rsp)
+ mov %rsi, (+$pG_offset)(%rsp)
+ mov %rcx, (+$pData_offset)(%rsp)
+.Lbody:
+ # transform g into montgomery space
+ # GT = reduce(g * C2) = reduce(g * (2^256))
+ # reduce expects to have the input in [tmp16]
+ pxor %xmm4, %xmm4
+ movdqu (+16*0)(%rsi), %xmm0
+ movdqu (+16*1)(%rsi), %xmm1
+ movdqu (+16*2)(%rsi), %xmm2
+ movdqu (+16*3)(%rsi), %xmm3
+ movdqa %xmm4, (+$tmp16_offset+16*0)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*1)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*6)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*7)(%rsp)
+ movdqa %xmm0, (+$tmp16_offset+16*2)(%rsp)
+ movdqa %xmm1, (+$tmp16_offset+16*3)(%rsp)
+ movdqa %xmm2, (+$tmp16_offset+16*4)(%rsp)
+ movdqa %xmm3, (+$tmp16_offset+16*5)(%rsp)
+
+ # load pExp before rdx gets blown away
+ movdqu (+16*0)(%rdx), %xmm0
+ movdqu (+16*1)(%rdx), %xmm1
+ movdqu (+16*2)(%rdx), %xmm2
+ movdqu (+16*3)(%rdx), %xmm3
+
+ lea (+$GT_offset)(%rsp), %rbx
+ mov %rbx, (+$red_result_addr_offset)(%rsp)
+ call mont_reduce
+
+ # Initialize tmp = C
+ lea (+$tmp_offset)(%rsp), %rcx
+ xor %rax, %rax
+ mov %rax, (+8*0)(%rcx)
+ mov %rax, (+8*1)(%rcx)
+ mov %rax, (+8*3)(%rcx)
+ mov %rax, (+8*4)(%rcx)
+ mov %rax, (+8*5)(%rcx)
+ mov %rax, (+8*6)(%rcx)
+ mov %rax, (+8*7)(%rcx)
+ mov %rax, (+$exp_offset+8*8)(%rsp)
+ movq \$1, (+8*2)(%rcx)
+
+ lea (+$garray_offset)(%rsp), %rbp
+ mov %rcx, %rsi # pTmp
+ mov %rbp, %rdi # Garray[][0]
+___
+
+ &swizzle("%rdi", "%rcx", "%rax", "%rbx");
+
+ # for (rax = 31; rax != 0; rax--) {
+ # tmp = reduce(tmp * G)
+ # swizzle(pg, tmp);
+ # pg += 2; }
+$code.=<<___;
+ mov \$31, %rax
+ mov %rax, (+$i_offset)(%rsp)
+ mov %rbp, (+$pg_offset)(%rsp)
+ # rsi -> pTmp
+ mov %rsi, (+$red_result_addr_offset)(%rsp)
+ mov (+8*0)(%rsi), %r10
+ mov (+8*1)(%rsi), %r11
+ mov (+8*2)(%rsi), %r12
+ mov (+8*3)(%rsi), %r13
+ mov (+8*4)(%rsi), %r14
+ mov (+8*5)(%rsi), %r15
+ mov (+8*6)(%rsi), %r8
+ mov (+8*7)(%rsi), %r9
+init_loop:
+ lea (+$GT_offset)(%rsp), %rdi
+ call mont_mul_a3b
+ lea (+$tmp_offset)(%rsp), %rsi
+ mov (+$pg_offset)(%rsp), %rbp
+ add \$2, %rbp
+ mov %rbp, (+$pg_offset)(%rsp)
+ mov %rsi, %rcx # rcx = rsi = addr of tmp
+___
+
+ &swizzle("%rbp", "%rcx", "%rax", "%rbx");
+$code.=<<___;
+ mov (+$i_offset)(%rsp), %rax
+ sub \$1, %rax
+ mov %rax, (+$i_offset)(%rsp)
+ jne init_loop
+
+ #
+ # Copy exponent onto stack
+ movdqa %xmm0, (+$exp_offset+16*0)(%rsp)
+ movdqa %xmm1, (+$exp_offset+16*1)(%rsp)
+ movdqa %xmm2, (+$exp_offset+16*2)(%rsp)
+ movdqa %xmm3, (+$exp_offset+16*3)(%rsp)
+
+
+ #
+ # Do exponentiation
+ # Initialize result to G[exp{511:507}]
+ mov (+$exp_offset+62)(%rsp), %eax
+ mov %rax, %rdx
+ shr \$11, %rax
+ and \$0x07FF, %edx
+ mov %edx, (+$exp_offset+62)(%rsp)
+ lea (+$garray_offset)(%rsp,%rax,2), %rsi
+ mov (+$pResult_offset)(%rsp), %rdx
+___
+
+ &unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax");
+
+ #
+ # Loop variables
+ # rcx = [loop_idx] = index: 510-5 to 0 by 5
+$code.=<<___;
+ movq \$505, (+$loop_idx_offset)(%rsp)
+
+ mov (+$pResult_offset)(%rsp), %rcx
+ mov %rcx, (+$red_result_addr_offset)(%rsp)
+ mov (+8*0)(%rcx), %r10
+ mov (+8*1)(%rcx), %r11
+ mov (+8*2)(%rcx), %r12
+ mov (+8*3)(%rcx), %r13
+ mov (+8*4)(%rcx), %r14
+ mov (+8*5)(%rcx), %r15
+ mov (+8*6)(%rcx), %r8
+ mov (+8*7)(%rcx), %r9
+ jmp sqr_2
+
+main_loop_a3b:
+ call sqr_reduce
+ call sqr_reduce
+ call sqr_reduce
+sqr_2:
+ call sqr_reduce
+ call sqr_reduce
+
+ #
+ # Do multiply, first look up proper value in Garray
+ mov (+$loop_idx_offset)(%rsp), %rcx # bit index
+ mov %rcx, %rax
+ shr \$4, %rax # rax is word pointer
+ mov (+$exp_offset)(%rsp,%rax,2), %edx
+ and \$15, %rcx
+ shrq %cl, %rdx
+ and \$0x1F, %rdx
+
+ lea (+$garray_offset)(%rsp,%rdx,2), %rsi
+ lea (+$tmp_offset)(%rsp), %rdx
+ mov %rdx, %rdi
+___
+
+ &unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax");
+ # rdi = tmp = pG
+
+ #
+ # Call mod_mul_a1(pDst, pSrc1, pSrc2, pM, pData)
+ # result result pG M Data
+$code.=<<___;
+ mov (+$pResult_offset)(%rsp), %rsi
+ call mont_mul_a3b
+
+ #
+ # finish loop
+ mov (+$loop_idx_offset)(%rsp), %rcx
+ sub \$5, %rcx
+ mov %rcx, (+$loop_idx_offset)(%rsp)
+ jge main_loop_a3b
+
+ #
+
+end_main_loop_a3b:
+ # transform result out of Montgomery space
+ # result = reduce(result)
+ mov (+$pResult_offset)(%rsp), %rdx
+ pxor %xmm4, %xmm4
+ movdqu (+16*0)(%rdx), %xmm0
+ movdqu (+16*1)(%rdx), %xmm1
+ movdqu (+16*2)(%rdx), %xmm2
+ movdqu (+16*3)(%rdx), %xmm3
+ movdqa %xmm4, (+$tmp16_offset+16*4)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*5)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*6)(%rsp)
+ movdqa %xmm4, (+$tmp16_offset+16*7)(%rsp)
+ movdqa %xmm0, (+$tmp16_offset+16*0)(%rsp)
+ movdqa %xmm1, (+$tmp16_offset+16*1)(%rsp)
+ movdqa %xmm2, (+$tmp16_offset+16*2)(%rsp)
+ movdqa %xmm3, (+$tmp16_offset+16*3)(%rsp)
+ call mont_reduce
+
+ # If result > m, subract m
+ # load result into r15:r8
+ mov (+$pResult_offset)(%rsp), %rax
+ mov (+8*0)(%rax), %r8
+ mov (+8*1)(%rax), %r9
+ mov (+8*2)(%rax), %r10
+ mov (+8*3)(%rax), %r11
+ mov (+8*4)(%rax), %r12
+ mov (+8*5)(%rax), %r13
+ mov (+8*6)(%rax), %r14
+ mov (+8*7)(%rax), %r15
+
+ # subtract m
+ mov (+$pData_offset)(%rsp), %rbx
+ add \$$M, %rbx
+
+ sub (+8*0)(%rbx), %r8
+ sbb (+8*1)(%rbx), %r9
+ sbb (+8*2)(%rbx), %r10
+ sbb (+8*3)(%rbx), %r11
+ sbb (+8*4)(%rbx), %r12
+ sbb (+8*5)(%rbx), %r13
+ sbb (+8*6)(%rbx), %r14
+ sbb (+8*7)(%rbx), %r15
+
+ # if Carry is clear, replace result with difference
+ mov (+8*0)(%rax), %rsi
+ mov (+8*1)(%rax), %rdi
+ mov (+8*2)(%rax), %rcx
+ mov (+8*3)(%rax), %rdx
+ cmovnc %r8, %rsi
+ cmovnc %r9, %rdi
+ cmovnc %r10, %rcx
+ cmovnc %r11, %rdx
+ mov %rsi, (+8*0)(%rax)
+ mov %rdi, (+8*1)(%rax)
+ mov %rcx, (+8*2)(%rax)
+ mov %rdx, (+8*3)(%rax)
+
+ mov (+8*4)(%rax), %rsi
+ mov (+8*5)(%rax), %rdi
+ mov (+8*6)(%rax), %rcx
+ mov (+8*7)(%rax), %rdx
+ cmovnc %r12, %rsi
+ cmovnc %r13, %rdi
+ cmovnc %r14, %rcx
+ cmovnc %r15, %rdx
+ mov %rsi, (+8*4)(%rax)
+ mov %rdi, (+8*5)(%rax)
+ mov %rcx, (+8*6)(%rax)
+ mov %rdx, (+8*7)(%rax)
+
+ mov (+$rsp_offset)(%rsp), %rsi
+ mov 0(%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbx
+ mov 40(%rsi),%rbp
+ lea 48(%rsi),%rsp
+.Lepilogue:
+ ret
+.size mod_exp_512, . - mod_exp_512
+___
+
+if ($win64) {
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+my $rec="%rcx";
+my $frame="%rdx";
+my $context="%r8";
+my $disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type mod_exp_512_se_handler,\@abi-omnipotent
+.align 16
+mod_exp_512_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lbody(%rip),%r10
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ lea .Lepilogue(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_prologue
+
+ mov $rsp_offset(%rax),%rax # pull saved Rsp
+
+ mov 32(%rax),%rbx
+ mov 40(%rax),%rbp
+ mov 24(%rax),%r12
+ mov 16(%rax),%r13
+ mov 8(%rax),%r14
+ mov 0(%rax),%r15
+ lea 48(%rax),%rax
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lin_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size mod_exp_512_se_handler,.-mod_exp_512_se_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_mod_exp_512
+ .rva .LSEH_end_mod_exp_512
+ .rva .LSEH_info_mod_exp_512
+
+.section .xdata
+.align 8
+.LSEH_info_mod_exp_512:
+ .byte 9,0,0,0
+ .rva mod_exp_512_se_handler
+___
+}
+
+sub reg_part {
+my ($reg,$conv)=@_;
+ if ($reg =~ /%r[0-9]+/) { $reg .= $conv; }
+ elsif ($conv eq "b") { $reg =~ s/%[er]([^x]+)x?/%$1l/; }
+ elsif ($conv eq "w") { $reg =~ s/%[er](.+)/%$1/; }
+ elsif ($conv eq "d") { $reg =~ s/%[er](.+)/%e$1/; }
+ return $reg;
+}
+
+$code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem;
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/(\(\+[^)]+\))/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/pa-risc2.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/pa-risc2.S
new file mode 100644
index 00000000..f3b16290
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/pa-risc2.S
@@ -0,0 +1,1618 @@
+;
+; PA-RISC 2.0 implementation of bn_asm code, based on the
+; 64-bit version of the code. This code is effectively the
+; same as the 64-bit version except the register model is
+; slightly different given all values must be 32-bit between
+; function calls. Thus the 64-bit return values are returned
+; in %ret0 and %ret1 vs just %ret0 as is done in 64-bit
+;
+;
+; This code is approximately 2x faster than the C version
+; for RSA/DSA.
+;
+; See http://devresource.hp.com/ for more details on the PA-RISC
+; architecture. Also see the book "PA-RISC 2.0 Architecture"
+; by Gerry Kane for information on the instruction set architecture.
+;
+; Code written by Chris Ruemmler (with some help from the HP C
+; compiler).
+;
+; The code compiles with HP's assembler
+;
+
+ .level 2.0N
+ .space $TEXT$
+ .subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
+
+;
+; Global Register definitions used for the routines.
+;
+; Some information about HP's runtime architecture for 32-bits.
+;
+; "Caller save" means the calling function must save the register
+; if it wants the register to be preserved.
+; "Callee save" means if a function uses the register, it must save
+; the value before using it.
+;
+; For the floating point registers
+;
+; "caller save" registers: fr4-fr11, fr22-fr31
+; "callee save" registers: fr12-fr21
+; "special" registers: fr0-fr3 (status and exception registers)
+;
+; For the integer registers
+; value zero : r0
+; "caller save" registers: r1,r19-r26
+; "callee save" registers: r3-r18
+; return register : r2 (rp)
+; return values ; r28,r29 (ret0,ret1)
+; Stack pointer ; r30 (sp)
+; millicode return ptr ; r31 (also a caller save register)
+
+
+;
+; Arguments to the routines
+;
+r_ptr .reg %r26
+a_ptr .reg %r25
+b_ptr .reg %r24
+num .reg %r24
+n .reg %r23
+
+;
+; Note that the "w" argument for bn_mul_add_words and bn_mul_words
+; is passed on the stack at a delta of -56 from the top of stack
+; as the routine is entered.
+;
+
+;
+; Globals used in some routines
+;
+
+top_overflow .reg %r23
+high_mask .reg %r22 ; value 0xffffffff80000000L
+
+
+;------------------------------------------------------------------------------
+;
+; bn_mul_add_words
+;
+;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
+; int num, BN_ULONG w)
+;
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg3 = num
+; -56(sp) = w
+;
+; Local register definitions
+;
+
+fm1 .reg %fr22
+fm .reg %fr23
+ht_temp .reg %fr24
+ht_temp_1 .reg %fr25
+lt_temp .reg %fr26
+lt_temp_1 .reg %fr27
+fm1_1 .reg %fr28
+fm_1 .reg %fr29
+
+fw_h .reg %fr7L
+fw_l .reg %fr7R
+fw .reg %fr7
+
+fht_0 .reg %fr8L
+flt_0 .reg %fr8R
+t_float_0 .reg %fr8
+
+fht_1 .reg %fr9L
+flt_1 .reg %fr9R
+t_float_1 .reg %fr9
+
+tmp_0 .reg %r31
+tmp_1 .reg %r21
+m_0 .reg %r20
+m_1 .reg %r19
+ht_0 .reg %r1
+ht_1 .reg %r3
+lt_0 .reg %r4
+lt_1 .reg %r5
+m1_0 .reg %r6
+m1_1 .reg %r7
+rp_val .reg %r8
+rp_val_1 .reg %r9
+
+bn_mul_add_words
+ .export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
+ .proc
+ .callinfo frame=128
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP ; Needed to make the loop 16-byte aligned
+ NOP ; needed to make the loop 16-byte aligned
+
+ STD %r5,16(%sp) ; save r5
+ NOP
+ STD %r6,24(%sp) ; save r6
+ STD %r7,32(%sp) ; save r7
+
+ STD %r8,40(%sp) ; save r8
+ STD %r9,48(%sp) ; save r9
+ COPY %r0,%ret1 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+
+ CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; The loop is unrolled twice, so if there is only 1 number
+ ; then go straight to the cleanup code.
+ ;
+ CMPIB,= 1,num,bn_mul_add_words_single_top
+ FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_add_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDD 8(r_ptr),rp_val_1 ; rp[1]
+
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
+
+ XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m[0]
+ FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
+
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
+
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
+
+ LDD -8(%sp),m_0 ; m[0]
+ LDD -40(%sp),m_1 ; m[1]
+ LDD -16(%sp),m1_0 ; m1[0]
+ LDD -48(%sp),m1_1 ; m1[1]
+
+ LDD -24(%sp),ht_0 ; ht[0]
+ LDD -56(%sp),ht_1 ; ht[1]
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
+
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
+ ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
+
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
+ ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
+ EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
+
+ EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
+ ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
+
+ ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+
+ ADD %ret1,lt_0,lt_0 ; lt[0] = lt[0] + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+
+ LDO -2(num),num ; num = num - 2;
+ ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+
+ ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
+ ADD,DC ht_1,%r0,%ret1 ; ht[1]++
+ LDO 16(a_ptr),a_ptr ; a_ptr += 2
+
+ STD lt_1,8(r_ptr) ; rp[1] = lt[1]
+ CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
+ LDO 16(r_ptr),r_ptr ; r_ptr += 2
+
+ CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_add_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDO 8(a_ptr),a_ptr ; a_ptr++
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0 ; m1 = temp1
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD %ret1,tmp_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
+ ADD,DC ht_0,%r0,%ret1 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_add_words_exit
+ .EXIT
+
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ LDD -80(%sp),%r9 ; restore r9
+ LDD -88(%sp),%r8 ; restore r8
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+;
+; arg0 = rp
+; arg1 = ap
+; arg3 = num
+; w on stack at -56(sp)
+
+bn_mul_words
+ .proc
+ .callinfo frame=128
+ .entry
+ .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP
+ STD %r5,16(%sp) ; save r5
+
+ STD %r6,24(%sp) ; save r6
+ STD %r7,32(%sp) ; save r7
+ COPY %r0,%ret1 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+
+ CMPIB,>= 0,num,bn_mul_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; See if only 1 word to do, thus just do cleanup
+ ;
+ CMPIB,= 1,num,bn_mul_words_single_top
+ FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
+
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
+
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ FSTD fm_1,-40(%sp) ; -40(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
+
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
+ LDD -8(%sp),m_0
+ LDD -40(%sp),m_1
+
+ LDD -16(%sp),m1_0
+ LDD -48(%sp),m1_1
+ LDD -24(%sp),ht_0
+ LDD -56(%sp),ht_1
+
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
+ ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+ EXTRD,U tmp_1,31,32,m_1 ; m>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ ADD %ret1,lt_0,lt_0 ; lt = lt + c (ret1);
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD lt_1,8(r_ptr) ; rp[1] = lt
+
+ COPY ht_1,%ret1 ; carry = ht
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_mul_words_unroll2
+ LDO 16(r_ptr),r_ptr ; rp++
+
+ CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD %ret1,lt_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ COPY ht_0,%ret1 ; copy carry
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_words_exit
+ .EXIT
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND
+
+;----------------------------------------------------------------------------
+;
+;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = num
+;
+
+bn_sqr_words
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP
+ STD %r5,16(%sp) ; save r5
+
+ CMPIB,>= 0,num,bn_sqr_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; If only 1, the goto straight to cleanup
+ ;
+ CMPIB,= 1,num,bn_sqr_words_single_top
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+
+bn_sqr_words_unroll2
+ FLDD 0(a_ptr),t_float_0 ; a[0]
+ FLDD 8(a_ptr),t_float_1 ; a[1]
+ XMPYU fht_0,flt_0,fm ; m[0]
+ XMPYU fht_1,flt_1,fm_1 ; m[1]
+
+ FSTD fm,-24(%sp) ; store m[0]
+ FSTD fm_1,-56(%sp) ; store m[1]
+ XMPYU flt_0,flt_0,lt_temp ; lt[0]
+ XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
+
+ FSTD lt_temp,-16(%sp) ; store lt[0]
+ FSTD lt_temp_1,-48(%sp) ; store lt[1]
+ XMPYU fht_0,fht_0,ht_temp ; ht[0]
+ XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
+
+ FSTD ht_temp,-8(%sp) ; store ht[0]
+ FSTD ht_temp_1,-40(%sp) ; store ht[1]
+ LDD -24(%sp),m_0
+ LDD -56(%sp),m_1
+
+ AND m_0,high_mask,tmp_0 ; m[0] & Mask
+ AND m_1,high_mask,tmp_1 ; m[1] & Mask
+ DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
+ DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
+
+ LDD -16(%sp),lt_0
+ LDD -48(%sp),lt_1
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
+ EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
+
+ LDD -8(%sp),ht_0
+ LDD -40(%sp),ht_1
+ ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
+ ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
+
+ ADD lt_0,m_0,lt_0 ; lt = lt+m
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+ STD ht_0,8(r_ptr) ; rp[1] = ht[1]
+
+ ADD lt_1,m_1,lt_1 ; lt = lt+m
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_1,16(r_ptr) ; rp[2] = lt[1]
+ STD ht_1,24(r_ptr) ; rp[3] = ht[1]
+
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_sqr_words_unroll2
+ LDO 32(r_ptr),r_ptr ; rp += 4
+
+ CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_sqr_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,flt_0,fm ; m
+ FSTD fm,-24(%sp) ; store m
+
+ XMPYU flt_0,flt_0,lt_temp ; lt
+ FSTD lt_temp,-16(%sp) ; store lt
+
+ XMPYU fht_0,fht_0,ht_temp ; ht
+ FSTD ht_temp,-8(%sp) ; store ht
+
+ LDD -24(%sp),m_0 ; load m
+ AND m_0,high_mask,tmp_0 ; m & Mask
+ DEPD,Z m_0,30,31,m_0 ; m << 32+1
+ LDD -16(%sp),lt_0 ; lt
+
+ LDD -8(%sp),ht_0 ; ht
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
+ ADD m_0,lt_0,lt_0 ; lt = lt+m
+ ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD ht_0,8(r_ptr) ; rp[1] = ht
+
+bn_sqr_words_exit
+ .EXIT
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t .reg %r22
+b .reg %r21
+l .reg %r20
+
+bn_add_words
+ .proc
+ .entry
+ .callinfo
+ .EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ CMPIB,>= 0,n,bn_add_words_exit
+ COPY %r0,%ret1 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_add_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_add_words_unroll2
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+ ADD t,%ret1,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret1 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret1,%r0,%ret1 ; c+= carry
+ STD l,0(r_ptr)
+
+ LDD 8(a_ptr),t
+ LDD 8(b_ptr),b
+ ADD t,%ret1,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret1 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret1,%r0,%ret1 ; c+= carry
+ STD l,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_add_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
+
+bn_add_words_single_top
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+
+ ADD t,%ret1,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret1 ; set c to carry (could use CMPCLR??)
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret1,%r0,%ret1 ; c+= carry
+ STD l,0(r_ptr)
+
+bn_add_words_exit
+ .EXIT
+ BVE (%rp)
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t1 .reg %r22
+t2 .reg %r21
+sub_tmp1 .reg %r20
+sub_tmp2 .reg %r19
+
+
+bn_sub_words
+ .proc
+ .callinfo
+ .EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ CMPIB,>= 0,n,bn_sub_words_exit
+ COPY %r0,%ret1 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_sub_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_sub_words_unroll2
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
+
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret1
+ STD sub_tmp1,0(r_ptr)
+
+ LDD 8(a_ptr),t1
+ LDD 8(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret1
+ STD sub_tmp1,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_sub_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
+
+bn_sub_words_single_top
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret1
+
+ STD sub_tmp1,0(r_ptr)
+
+bn_sub_words_exit
+ .EXIT
+ BVE (%rp)
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;------------------------------------------------------------------------------
+;
+; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
+;
+; arg0 = h
+; arg1 = l
+; arg2 = d
+;
+; This is mainly just output from the HP C compiler.
+;
+;------------------------------------------------------------------------------
+bn_div_words
+ .PROC
+ .EXPORT bn_div_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR,RTNVAL=GR,LONG_RETURN
+ .IMPORT BN_num_bits_word,CODE
+ ;--- not PIC .IMPORT __iob,DATA
+ ;--- not PIC .IMPORT fprintf,CODE
+ .IMPORT abort,CODE
+ .IMPORT $$div2U,MILLICODE
+ .CALLINFO CALLER,FRAME=144,ENTRY_GR=%r9,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
+ .ENTRY
+ STW %r2,-20(%r30) ;offset 0x8ec
+ STW,MA %r3,192(%r30) ;offset 0x8f0
+ STW %r4,-188(%r30) ;offset 0x8f4
+ DEPD %r5,31,32,%r6 ;offset 0x8f8
+ STD %r6,-184(%r30) ;offset 0x8fc
+ DEPD %r7,31,32,%r8 ;offset 0x900
+ STD %r8,-176(%r30) ;offset 0x904
+ STW %r9,-168(%r30) ;offset 0x908
+ LDD -248(%r30),%r3 ;offset 0x90c
+ COPY %r26,%r4 ;offset 0x910
+ COPY %r24,%r5 ;offset 0x914
+ DEPD %r25,31,32,%r4 ;offset 0x918
+ CMPB,*<> %r3,%r0,$0006000C ;offset 0x91c
+ DEPD %r23,31,32,%r5 ;offset 0x920
+ MOVIB,TR -1,%r29,$00060002 ;offset 0x924
+ EXTRD,U %r29,31,32,%r28 ;offset 0x928
+$0006002A
+ LDO -1(%r29),%r29 ;offset 0x92c
+ SUB %r23,%r7,%r23 ;offset 0x930
+$00060024
+ SUB %r4,%r31,%r25 ;offset 0x934
+ AND %r25,%r19,%r26 ;offset 0x938
+ CMPB,*<>,N %r0,%r26,$00060046 ;offset 0x93c
+ DEPD,Z %r25,31,32,%r20 ;offset 0x940
+ OR %r20,%r24,%r21 ;offset 0x944
+ CMPB,*<<,N %r21,%r23,$0006002A ;offset 0x948
+ SUB %r31,%r2,%r31 ;offset 0x94c
+$00060046
+$0006002E
+ DEPD,Z %r23,31,32,%r25 ;offset 0x950
+ EXTRD,U %r23,31,32,%r26 ;offset 0x954
+ AND %r25,%r19,%r24 ;offset 0x958
+ ADD,L %r31,%r26,%r31 ;offset 0x95c
+ CMPCLR,*>>= %r5,%r24,%r0 ;offset 0x960
+ LDO 1(%r31),%r31 ;offset 0x964
+$00060032
+ CMPB,*<<=,N %r31,%r4,$00060036 ;offset 0x968
+ LDO -1(%r29),%r29 ;offset 0x96c
+ ADD,L %r4,%r3,%r4 ;offset 0x970
+$00060036
+ ADDIB,=,N -1,%r8,$D0 ;offset 0x974
+ SUB %r5,%r24,%r28 ;offset 0x978
+$0006003A
+ SUB %r4,%r31,%r24 ;offset 0x97c
+ SHRPD %r24,%r28,32,%r4 ;offset 0x980
+ DEPD,Z %r29,31,32,%r9 ;offset 0x984
+ DEPD,Z %r28,31,32,%r5 ;offset 0x988
+$0006001C
+ EXTRD,U %r4,31,32,%r31 ;offset 0x98c
+ CMPB,*<>,N %r31,%r2,$00060020 ;offset 0x990
+ MOVB,TR %r6,%r29,$D1 ;offset 0x994
+ STD %r29,-152(%r30) ;offset 0x998
+$0006000C
+ EXTRD,U %r3,31,32,%r25 ;offset 0x99c
+ COPY %r3,%r26 ;offset 0x9a0
+ EXTRD,U %r3,31,32,%r9 ;offset 0x9a4
+ EXTRD,U %r4,31,32,%r8 ;offset 0x9a8
+ .CALL ARGW0=GR,ARGW1=GR,RTNVAL=GR ;in=25,26;out=28;
+ B,L BN_num_bits_word,%r2 ;offset 0x9ac
+ EXTRD,U %r5,31,32,%r7 ;offset 0x9b0
+ LDI 64,%r20 ;offset 0x9b4
+ DEPD %r7,31,32,%r5 ;offset 0x9b8
+ DEPD %r8,31,32,%r4 ;offset 0x9bc
+ DEPD %r9,31,32,%r3 ;offset 0x9c0
+ CMPB,= %r28,%r20,$00060012 ;offset 0x9c4
+ COPY %r28,%r24 ;offset 0x9c8
+ MTSARCM %r24 ;offset 0x9cc
+ DEPDI,Z -1,%sar,1,%r19 ;offset 0x9d0
+ CMPB,*>>,N %r4,%r19,$D2 ;offset 0x9d4
+$00060012
+ SUBI 64,%r24,%r31 ;offset 0x9d8
+ CMPCLR,*<< %r4,%r3,%r0 ;offset 0x9dc
+ SUB %r4,%r3,%r4 ;offset 0x9e0
+$00060016
+ CMPB,= %r31,%r0,$0006001A ;offset 0x9e4
+ COPY %r0,%r9 ;offset 0x9e8
+ MTSARCM %r31 ;offset 0x9ec
+ DEPD,Z %r3,%sar,64,%r3 ;offset 0x9f0
+ SUBI 64,%r31,%r26 ;offset 0x9f4
+ MTSAR %r26 ;offset 0x9f8
+ SHRPD %r4,%r5,%sar,%r4 ;offset 0x9fc
+ MTSARCM %r31 ;offset 0xa00
+ DEPD,Z %r5,%sar,64,%r5 ;offset 0xa04
+$0006001A
+ DEPDI,Z -1,31,32,%r19 ;offset 0xa08
+ AND %r3,%r19,%r29 ;offset 0xa0c
+ EXTRD,U %r29,31,32,%r2 ;offset 0xa10
+ DEPDI,Z -1,63,32,%r6 ;offset 0xa14
+ MOVIB,TR 2,%r8,$0006001C ;offset 0xa18
+ EXTRD,U %r3,63,32,%r7 ;offset 0xa1c
+$D2
+ ;--- not PIC ADDIL LR'__iob-$global$,%r27,%r1 ;offset 0xa20
+ ;--- not PIC LDIL LR'C$7,%r21 ;offset 0xa24
+ ;--- not PIC LDO RR'__iob-$global$+32(%r1),%r26 ;offset 0xa28
+ ;--- not PIC .CALL ARGW0=GR,ARGW1=GR,ARGW2=GR,RTNVAL=GR ;in=24,25,26;out=28;
+ ;--- not PIC B,L fprintf,%r2 ;offset 0xa2c
+ ;--- not PIC LDO RR'C$7(%r21),%r25 ;offset 0xa30
+ .CALL ;
+ B,L abort,%r2 ;offset 0xa34
+ NOP ;offset 0xa38
+ B $D3 ;offset 0xa3c
+ LDW -212(%r30),%r2 ;offset 0xa40
+$00060020
+ COPY %r4,%r26 ;offset 0xa44
+ EXTRD,U %r4,31,32,%r25 ;offset 0xa48
+ COPY %r2,%r24 ;offset 0xa4c
+ .CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
+ B,L $$div2U,%r31 ;offset 0xa50
+ EXTRD,U %r2,31,32,%r23 ;offset 0xa54
+ DEPD %r28,31,32,%r29 ;offset 0xa58
+$00060022
+ STD %r29,-152(%r30) ;offset 0xa5c
+$D1
+ AND %r5,%r19,%r24 ;offset 0xa60
+ EXTRD,U %r24,31,32,%r24 ;offset 0xa64
+ STW %r2,-160(%r30) ;offset 0xa68
+ STW %r7,-128(%r30) ;offset 0xa6c
+ FLDD -152(%r30),%fr4 ;offset 0xa70
+ FLDD -152(%r30),%fr7 ;offset 0xa74
+ FLDW -160(%r30),%fr8L ;offset 0xa78
+ FLDW -128(%r30),%fr5L ;offset 0xa7c
+ XMPYU %fr8L,%fr7L,%fr10 ;offset 0xa80
+ FSTD %fr10,-136(%r30) ;offset 0xa84
+ XMPYU %fr8L,%fr7R,%fr22 ;offset 0xa88
+ FSTD %fr22,-144(%r30) ;offset 0xa8c
+ XMPYU %fr5L,%fr4L,%fr11 ;offset 0xa90
+ XMPYU %fr5L,%fr4R,%fr23 ;offset 0xa94
+ FSTD %fr11,-112(%r30) ;offset 0xa98
+ FSTD %fr23,-120(%r30) ;offset 0xa9c
+ LDD -136(%r30),%r28 ;offset 0xaa0
+ DEPD,Z %r28,31,32,%r31 ;offset 0xaa4
+ LDD -144(%r30),%r20 ;offset 0xaa8
+ ADD,L %r20,%r31,%r31 ;offset 0xaac
+ LDD -112(%r30),%r22 ;offset 0xab0
+ DEPD,Z %r22,31,32,%r22 ;offset 0xab4
+ LDD -120(%r30),%r21 ;offset 0xab8
+ B $00060024 ;offset 0xabc
+ ADD,L %r21,%r22,%r23 ;offset 0xac0
+$D0
+ OR %r9,%r29,%r29 ;offset 0xac4
+$00060040
+ EXTRD,U %r29,31,32,%r28 ;offset 0xac8
+$00060002
+$L2
+ LDW -212(%r30),%r2 ;offset 0xacc
+$D3
+ LDW -168(%r30),%r9 ;offset 0xad0
+ LDD -176(%r30),%r8 ;offset 0xad4
+ EXTRD,U %r8,31,32,%r7 ;offset 0xad8
+ LDD -184(%r30),%r6 ;offset 0xadc
+ EXTRD,U %r6,31,32,%r5 ;offset 0xae0
+ LDW -188(%r30),%r4 ;offset 0xae4
+ BVE (%r2) ;offset 0xae8
+ .EXIT
+ LDW,MB -192(%r30),%r3 ;offset 0xaec
+ .PROCEND ;in=23,25;out=28,29;fpin=105,107;
+
+
+
+
+;----------------------------------------------------------------------------
+;
+; Registers to hold 64-bit values to manipulate. The "L" part
+; of the register corresponds to the upper 32-bits, while the "R"
+; part corresponds to the lower 32-bits
+;
+; Note, that when using b6 and b7, the code must save these before
+; using them because they are callee save registers
+;
+;
+; Floating point registers to use to save values that
+; are manipulated. These don't collide with ftemp1-6 and
+; are all caller save registers
+;
+a0 .reg %fr22
+a0L .reg %fr22L
+a0R .reg %fr22R
+
+a1 .reg %fr23
+a1L .reg %fr23L
+a1R .reg %fr23R
+
+a2 .reg %fr24
+a2L .reg %fr24L
+a2R .reg %fr24R
+
+a3 .reg %fr25
+a3L .reg %fr25L
+a3R .reg %fr25R
+
+a4 .reg %fr26
+a4L .reg %fr26L
+a4R .reg %fr26R
+
+a5 .reg %fr27
+a5L .reg %fr27L
+a5R .reg %fr27R
+
+a6 .reg %fr28
+a6L .reg %fr28L
+a6R .reg %fr28R
+
+a7 .reg %fr29
+a7L .reg %fr29L
+a7R .reg %fr29R
+
+b0 .reg %fr30
+b0L .reg %fr30L
+b0R .reg %fr30R
+
+b1 .reg %fr31
+b1L .reg %fr31L
+b1R .reg %fr31R
+
+;
+; Temporary floating point variables, these are all caller save
+; registers
+;
+ftemp1 .reg %fr4
+ftemp2 .reg %fr5
+ftemp3 .reg %fr6
+ftemp4 .reg %fr7
+
+;
+; The B set of registers when used.
+;
+
+b2 .reg %fr8
+b2L .reg %fr8L
+b2R .reg %fr8R
+
+b3 .reg %fr9
+b3L .reg %fr9L
+b3R .reg %fr9R
+
+b4 .reg %fr10
+b4L .reg %fr10L
+b4R .reg %fr10R
+
+b5 .reg %fr11
+b5L .reg %fr11L
+b5R .reg %fr11R
+
+b6 .reg %fr12
+b6L .reg %fr12L
+b6R .reg %fr12R
+
+b7 .reg %fr13
+b7L .reg %fr13L
+b7R .reg %fr13R
+
+c1 .reg %r21 ; only reg
+temp1 .reg %r20 ; only reg
+temp2 .reg %r19 ; only reg
+temp3 .reg %r31 ; only reg
+
+m1 .reg %r28
+c2 .reg %r23
+high_one .reg %r1
+ht .reg %r6
+lt .reg %r5
+m .reg %r4
+c3 .reg %r3
+
+SQR_ADD_C .macro A0L,A0R,C1,C2,C3
+ XMPYU A0L,A0R,ftemp1 ; m
+ FSTD ftemp1,-24(%sp) ; store m
+
+ XMPYU A0R,A0R,ftemp2 ; lt
+ FSTD ftemp2,-16(%sp) ; store lt
+
+ XMPYU A0L,A0L,ftemp3 ; ht
+ FSTD ftemp3,-8(%sp) ; store ht
+
+ LDD -24(%sp),m ; load m
+ AND m,high_mask,temp2 ; m & Mask
+ DEPD,Z m,30,31,temp3 ; m << 32+1
+ LDD -16(%sp),lt ; lt
+
+ LDD -8(%sp),ht ; ht
+ EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
+ ADD temp3,lt,lt ; lt = lt+m
+ ADD,L ht,temp1,ht ; ht += temp1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C2,ht,C2 ; c2=c2+ht
+ ADD,DC C3,%r0,C3 ; c3++
+.endm
+
+SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
+ XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,A1L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD ht,ht,ht ; ht=ht+ht;
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+
+ ADD lt,lt,lt ; lt=lt+lt;
+ ADD,DC ht,%r0,ht ; add in carry (ht++)
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
+ LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+;
+;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba8
+ .PROC
+ .CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .ENTRY
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
+ SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
+ STD c2,56(r_ptr) ; r[7] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
+ SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
+ STD c3,64(r_ptr) ; r[8] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
+ STD c1,72(r_ptr) ; r[9] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a5L,a5R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
+ SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
+ STD c2,80(r_ptr) ; r[10] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
+ STD c3,88(r_ptr) ; r[11] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a6L,a6R,c1,c2,c3
+ SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
+ STD c1,96(r_ptr) ; r[12] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
+ STD c2,104(r_ptr) ; r[13] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a7L,a7R,c3,c1,c2
+ STD c3, 112(r_ptr) ; r[14] = c3
+ STD c1, 120(r_ptr) ; r[15] = c1
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ STD c2,56(r_ptr) ; r[7] = c2;
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+;---------------------------------------------------------------------------
+
+MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
+ XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,B0L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+
+;
+;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba8
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+ FLDD 32(b_ptr),b4
+ FLDD 40(b_ptr),b5
+ FLDD 48(b_ptr),b6
+ FLDD 56(b_ptr),b7
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
+ STD c1,48(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
+ STD c2,56(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
+ STD c3,64(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
+ STD c1,72(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
+ STD c2,80(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
+ STD c3,88(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
+ STD c1,96(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
+ STD c2,104(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
+ STD c3,112(r_ptr)
+ STD c1,120(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ STD c1,48(r_ptr)
+ STD c2,56(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+;--- not PIC .SPACE $TEXT$
+;--- not PIC .SUBSPA $CODE$
+;--- not PIC .SPACE $PRIVATE$,SORT=16
+;--- not PIC .IMPORT $global$,DATA
+;--- not PIC .SPACE $TEXT$
+;--- not PIC .SUBSPA $CODE$
+;--- not PIC .SUBSPA $LIT$,ACCESS=0x2c
+;--- not PIC C$7
+;--- not PIC .ALIGN 8
+;--- not PIC .STRINGZ "Division would overflow (%d)\n"
+ .END
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/pa-risc2W.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/pa-risc2W.S
new file mode 100644
index 00000000..a9954575
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/pa-risc2W.S
@@ -0,0 +1,1605 @@
+;
+; PA-RISC 64-bit implementation of bn_asm code
+;
+; This code is approximately 2x faster than the C version
+; for RSA/DSA.
+;
+; See http://devresource.hp.com/ for more details on the PA-RISC
+; architecture. Also see the book "PA-RISC 2.0 Architecture"
+; by Gerry Kane for information on the instruction set architecture.
+;
+; Code written by Chris Ruemmler (with some help from the HP C
+; compiler).
+;
+; The code compiles with HP's assembler
+;
+
+ .level 2.0W
+ .space $TEXT$
+ .subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
+
+;
+; Global Register definitions used for the routines.
+;
+; Some information about HP's runtime architecture for 64-bits.
+;
+; "Caller save" means the calling function must save the register
+; if it wants the register to be preserved.
+; "Callee save" means if a function uses the register, it must save
+; the value before using it.
+;
+; For the floating point registers
+;
+; "caller save" registers: fr4-fr11, fr22-fr31
+; "callee save" registers: fr12-fr21
+; "special" registers: fr0-fr3 (status and exception registers)
+;
+; For the integer registers
+; value zero : r0
+; "caller save" registers: r1,r19-r26
+; "callee save" registers: r3-r18
+; return register : r2 (rp)
+; return values ; r28 (ret0,ret1)
+; Stack pointer ; r30 (sp)
+; global data pointer ; r27 (dp)
+; argument pointer ; r29 (ap)
+; millicode return ptr ; r31 (also a caller save register)
+
+
+;
+; Arguments to the routines
+;
+r_ptr .reg %r26
+a_ptr .reg %r25
+b_ptr .reg %r24
+num .reg %r24
+w .reg %r23
+n .reg %r23
+
+
+;
+; Globals used in some routines
+;
+
+top_overflow .reg %r29
+high_mask .reg %r22 ; value 0xffffffff80000000L
+
+
+;------------------------------------------------------------------------------
+;
+; bn_mul_add_words
+;
+;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
+; int num, BN_ULONG w)
+;
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = num
+; arg3 = w
+;
+; Local register definitions
+;
+
+fm1 .reg %fr22
+fm .reg %fr23
+ht_temp .reg %fr24
+ht_temp_1 .reg %fr25
+lt_temp .reg %fr26
+lt_temp_1 .reg %fr27
+fm1_1 .reg %fr28
+fm_1 .reg %fr29
+
+fw_h .reg %fr7L
+fw_l .reg %fr7R
+fw .reg %fr7
+
+fht_0 .reg %fr8L
+flt_0 .reg %fr8R
+t_float_0 .reg %fr8
+
+fht_1 .reg %fr9L
+flt_1 .reg %fr9R
+t_float_1 .reg %fr9
+
+tmp_0 .reg %r31
+tmp_1 .reg %r21
+m_0 .reg %r20
+m_1 .reg %r19
+ht_0 .reg %r1
+ht_1 .reg %r3
+lt_0 .reg %r4
+lt_1 .reg %r5
+m1_0 .reg %r6
+m1_1 .reg %r7
+rp_val .reg %r8
+rp_val_1 .reg %r9
+
+bn_mul_add_words
+ .export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
+ .proc
+ .callinfo frame=128
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP ; Needed to make the loop 16-byte aligned
+ NOP ; Needed to make the loop 16-byte aligned
+
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ STD %r7,32(%sp) ; save r7
+ STD %r8,40(%sp) ; save r8
+
+ STD %r9,48(%sp) ; save r9
+ COPY %r0,%ret0 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+ STD w,56(%sp) ; store w on stack
+
+ CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; The loop is unrolled twice, so if there is only 1 number
+ ; then go straight to the cleanup code.
+ ;
+ CMPIB,= 1,num,bn_mul_add_words_single_top
+ FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_add_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDD 8(r_ptr),rp_val_1 ; rp[1]
+
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
+
+ XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m[0]
+ FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
+
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
+
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
+
+ LDD -8(%sp),m_0 ; m[0]
+ LDD -40(%sp),m_1 ; m[1]
+ LDD -16(%sp),m1_0 ; m1[0]
+ LDD -48(%sp),m1_1 ; m1[1]
+
+ LDD -24(%sp),ht_0 ; ht[0]
+ LDD -56(%sp),ht_1 ; ht[1]
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
+
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
+ ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
+
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
+ ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
+ EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
+
+ EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
+ ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
+
+ ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+
+ ADD %ret0,lt_0,lt_0 ; lt[0] = lt[0] + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+
+ LDO -2(num),num ; num = num - 2;
+ ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+
+ ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
+ ADD,DC ht_1,%r0,%ret0 ; ht[1]++
+ LDO 16(a_ptr),a_ptr ; a_ptr += 2
+
+ STD lt_1,8(r_ptr) ; rp[1] = lt[1]
+ CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
+ LDO 16(r_ptr),r_ptr ; r_ptr += 2
+
+ CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_add_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDO 8(a_ptr),a_ptr ; a_ptr++
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0 ; m1 = temp1
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD %ret0,tmp_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
+ ADD,DC ht_0,%r0,%ret0 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_add_words_exit
+ .EXIT
+ LDD -80(%sp),%r9 ; restore r9
+ LDD -88(%sp),%r8 ; restore r8
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = num
+; arg3 = w
+
+bn_mul_words
+ .proc
+ .callinfo frame=128
+ .entry
+ .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ STD %r7,32(%sp) ; save r7
+ COPY %r0,%ret0 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+ STD w,56(%sp) ; w on stack
+
+ CMPIB,>= 0,num,bn_mul_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; See if only 1 word to do, thus just do cleanup
+ ;
+ CMPIB,= 1,num,bn_mul_words_single_top
+ FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
+
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
+
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ FSTD fm_1,-40(%sp) ; -40(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
+
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
+ LDD -8(%sp),m_0
+ LDD -40(%sp),m_1
+
+ LDD -16(%sp),m1_0
+ LDD -48(%sp),m1_1
+ LDD -24(%sp),ht_0
+ LDD -56(%sp),ht_1
+
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
+ ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+ EXTRD,U tmp_1,31,32,m_1 ; m>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ ADD %ret0,lt_0,lt_0 ; lt = lt + c (ret0);
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD lt_1,8(r_ptr) ; rp[1] = lt
+
+ COPY ht_1,%ret0 ; carry = ht
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_mul_words_unroll2
+ LDO 16(r_ptr),r_ptr ; rp++
+
+ CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD %ret0,lt_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ COPY ht_0,%ret0 ; copy carry
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_words_exit
+ .EXIT
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = num
+;
+
+bn_sqr_words
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP
+ STD %r5,16(%sp) ; save r5
+
+ CMPIB,>= 0,num,bn_sqr_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; If only 1, the goto straight to cleanup
+ ;
+ CMPIB,= 1,num,bn_sqr_words_single_top
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+
+bn_sqr_words_unroll2
+ FLDD 0(a_ptr),t_float_0 ; a[0]
+ FLDD 8(a_ptr),t_float_1 ; a[1]
+ XMPYU fht_0,flt_0,fm ; m[0]
+ XMPYU fht_1,flt_1,fm_1 ; m[1]
+
+ FSTD fm,-24(%sp) ; store m[0]
+ FSTD fm_1,-56(%sp) ; store m[1]
+ XMPYU flt_0,flt_0,lt_temp ; lt[0]
+ XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
+
+ FSTD lt_temp,-16(%sp) ; store lt[0]
+ FSTD lt_temp_1,-48(%sp) ; store lt[1]
+ XMPYU fht_0,fht_0,ht_temp ; ht[0]
+ XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
+
+ FSTD ht_temp,-8(%sp) ; store ht[0]
+ FSTD ht_temp_1,-40(%sp) ; store ht[1]
+ LDD -24(%sp),m_0
+ LDD -56(%sp),m_1
+
+ AND m_0,high_mask,tmp_0 ; m[0] & Mask
+ AND m_1,high_mask,tmp_1 ; m[1] & Mask
+ DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
+ DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
+
+ LDD -16(%sp),lt_0
+ LDD -48(%sp),lt_1
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
+ EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
+
+ LDD -8(%sp),ht_0
+ LDD -40(%sp),ht_1
+ ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
+ ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
+
+ ADD lt_0,m_0,lt_0 ; lt = lt+m
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+ STD ht_0,8(r_ptr) ; rp[1] = ht[1]
+
+ ADD lt_1,m_1,lt_1 ; lt = lt+m
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_1,16(r_ptr) ; rp[2] = lt[1]
+ STD ht_1,24(r_ptr) ; rp[3] = ht[1]
+
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_sqr_words_unroll2
+ LDO 32(r_ptr),r_ptr ; rp += 4
+
+ CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_sqr_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,flt_0,fm ; m
+ FSTD fm,-24(%sp) ; store m
+
+ XMPYU flt_0,flt_0,lt_temp ; lt
+ FSTD lt_temp,-16(%sp) ; store lt
+
+ XMPYU fht_0,fht_0,ht_temp ; ht
+ FSTD ht_temp,-8(%sp) ; store ht
+
+ LDD -24(%sp),m_0 ; load m
+ AND m_0,high_mask,tmp_0 ; m & Mask
+ DEPD,Z m_0,30,31,m_0 ; m << 32+1
+ LDD -16(%sp),lt_0 ; lt
+
+ LDD -8(%sp),ht_0 ; ht
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
+ ADD m_0,lt_0,lt_0 ; lt = lt+m
+ ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD ht_0,8(r_ptr) ; rp[1] = ht
+
+bn_sqr_words_exit
+ .EXIT
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t .reg %r22
+b .reg %r21
+l .reg %r20
+
+bn_add_words
+ .proc
+ .entry
+ .callinfo
+ .EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ CMPIB,>= 0,n,bn_add_words_exit
+ COPY %r0,%ret0 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_add_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_add_words_unroll2
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+ ADD t,%ret0,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret0 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret0,%r0,%ret0 ; c+= carry
+ STD l,0(r_ptr)
+
+ LDD 8(a_ptr),t
+ LDD 8(b_ptr),b
+ ADD t,%ret0,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret0 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret0,%r0,%ret0 ; c+= carry
+ STD l,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_add_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
+
+bn_add_words_single_top
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+
+ ADD t,%ret0,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret0 ; set c to carry (could use CMPCLR??)
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret0,%r0,%ret0 ; c+= carry
+ STD l,0(r_ptr)
+
+bn_add_words_exit
+ .EXIT
+ BVE (%rp)
+ NOP
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t1 .reg %r22
+t2 .reg %r21
+sub_tmp1 .reg %r20
+sub_tmp2 .reg %r19
+
+
+bn_sub_words
+ .proc
+ .callinfo
+ .EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ CMPIB,>= 0,n,bn_sub_words_exit
+ COPY %r0,%ret0 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_sub_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_sub_words_unroll2
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
+
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret0
+ STD sub_tmp1,0(r_ptr)
+
+ LDD 8(a_ptr),t1
+ LDD 8(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret0
+ STD sub_tmp1,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_sub_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
+
+bn_sub_words_single_top
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret0
+
+ STD sub_tmp1,0(r_ptr)
+
+bn_sub_words_exit
+ .EXIT
+ BVE (%rp)
+ NOP
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;------------------------------------------------------------------------------
+;
+; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
+;
+; arg0 = h
+; arg1 = l
+; arg2 = d
+;
+; This is mainly just modified assembly from the compiler, thus the
+; lack of variable names.
+;
+;------------------------------------------------------------------------------
+bn_div_words
+ .proc
+ .callinfo CALLER,FRAME=272,ENTRY_GR=%r10,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_div_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .IMPORT BN_num_bits_word,CODE,NO_RELOCATION
+ .IMPORT __iob,DATA
+ .IMPORT fprintf,CODE,NO_RELOCATION
+ .IMPORT abort,CODE,NO_RELOCATION
+ .IMPORT $$div2U,MILLICODE
+ .entry
+ STD %r2,-16(%r30)
+ STD,MA %r3,352(%r30)
+ STD %r4,-344(%r30)
+ STD %r5,-336(%r30)
+ STD %r6,-328(%r30)
+ STD %r7,-320(%r30)
+ STD %r8,-312(%r30)
+ STD %r9,-304(%r30)
+ STD %r10,-296(%r30)
+
+ STD %r27,-288(%r30) ; save gp
+
+ COPY %r24,%r3 ; save d
+ COPY %r26,%r4 ; save h (high 64-bits)
+ LDO -1(%r0),%ret0 ; return -1 by default
+
+ CMPB,*= %r0,%arg2,$D3 ; if (d == 0)
+ COPY %r25,%r5 ; save l (low 64-bits)
+
+ LDO -48(%r30),%r29 ; create ap
+ .CALL ;in=26,29;out=28;
+ B,L BN_num_bits_word,%r2
+ COPY %r3,%r26
+ LDD -288(%r30),%r27 ; restore gp
+ LDI 64,%r21
+
+ CMPB,= %r21,%ret0,$00000012 ;if (i == 64) (forward)
+ COPY %ret0,%r24 ; i
+ MTSARCM %r24
+ DEPDI,Z -1,%sar,1,%r29
+ CMPB,*<<,N %r29,%r4,bn_div_err_case ; if (h > 1<<i) (forward)
+
+$00000012
+ SUBI 64,%r24,%r31 ; i = 64 - i;
+ CMPCLR,*<< %r4,%r3,%r0 ; if (h >= d)
+ SUB %r4,%r3,%r4 ; h -= d
+ CMPB,= %r31,%r0,$0000001A ; if (i)
+ COPY %r0,%r10 ; ret = 0
+ MTSARCM %r31 ; i to shift
+ DEPD,Z %r3,%sar,64,%r3 ; d <<= i;
+ SUBI 64,%r31,%r19 ; 64 - i; redundent
+ MTSAR %r19 ; (64 -i) to shift
+ SHRPD %r4,%r5,%sar,%r4 ; l>> (64-i)
+ MTSARCM %r31 ; i to shift
+ DEPD,Z %r5,%sar,64,%r5 ; l <<= i;
+
+$0000001A
+ DEPDI,Z -1,31,32,%r19
+ EXTRD,U %r3,31,32,%r6 ; dh=(d&0xfff)>>32
+ EXTRD,U %r3,63,32,%r8 ; dl = d&0xffffff
+ LDO 2(%r0),%r9
+ STD %r3,-280(%r30) ; "d" to stack
+
+$0000001C
+ DEPDI,Z -1,63,32,%r29 ;
+ EXTRD,U %r4,31,32,%r31 ; h >> 32
+ CMPB,*=,N %r31,%r6,$D2 ; if ((h>>32) != dh)(forward) div
+ COPY %r4,%r26
+ EXTRD,U %r4,31,32,%r25
+ COPY %r6,%r24
+ .CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
+ B,L $$div2U,%r2
+ EXTRD,U %r6,31,32,%r23
+ DEPD %r28,31,32,%r29
+$D2
+ STD %r29,-272(%r30) ; q
+ AND %r5,%r19,%r24 ; t & 0xffffffff00000000;
+ EXTRD,U %r24,31,32,%r24 ; ???
+ FLDD -272(%r30),%fr7 ; q
+ FLDD -280(%r30),%fr8 ; d
+ XMPYU %fr8L,%fr7L,%fr10
+ FSTD %fr10,-256(%r30)
+ XMPYU %fr8L,%fr7R,%fr22
+ FSTD %fr22,-264(%r30)
+ XMPYU %fr8R,%fr7L,%fr11
+ XMPYU %fr8R,%fr7R,%fr23
+ FSTD %fr11,-232(%r30)
+ FSTD %fr23,-240(%r30)
+ LDD -256(%r30),%r28
+ DEPD,Z %r28,31,32,%r2
+ LDD -264(%r30),%r20
+ ADD,L %r20,%r2,%r31
+ LDD -232(%r30),%r22
+ DEPD,Z %r22,31,32,%r22
+ LDD -240(%r30),%r21
+ B $00000024 ; enter loop
+ ADD,L %r21,%r22,%r23
+
+$0000002A
+ LDO -1(%r29),%r29
+ SUB %r23,%r8,%r23
+$00000024
+ SUB %r4,%r31,%r25
+ AND %r25,%r19,%r26
+ CMPB,*<>,N %r0,%r26,$00000046 ; (forward)
+ DEPD,Z %r25,31,32,%r20
+ OR %r20,%r24,%r21
+ CMPB,*<<,N %r21,%r23,$0000002A ;(backward)
+ SUB %r31,%r6,%r31
+;-------------Break path---------------------
+
+$00000046
+ DEPD,Z %r23,31,32,%r25 ;tl
+ EXTRD,U %r23,31,32,%r26 ;t
+ AND %r25,%r19,%r24 ;tl = (tl<<32)&0xfffffff0000000L
+ ADD,L %r31,%r26,%r31 ;th += t;
+ CMPCLR,*>>= %r5,%r24,%r0 ;if (l<tl)
+ LDO 1(%r31),%r31 ; th++;
+ CMPB,*<<=,N %r31,%r4,$00000036 ;if (n < th) (forward)
+ LDO -1(%r29),%r29 ;q--;
+ ADD,L %r4,%r3,%r4 ;h += d;
+$00000036
+ ADDIB,=,N -1,%r9,$D1 ;if (--count == 0) break (forward)
+ SUB %r5,%r24,%r28 ; l -= tl;
+ SUB %r4,%r31,%r24 ; h -= th;
+ SHRPD %r24,%r28,32,%r4 ; h = ((h<<32)|(l>>32));
+ DEPD,Z %r29,31,32,%r10 ; ret = q<<32
+ b $0000001C
+ DEPD,Z %r28,31,32,%r5 ; l = l << 32
+
+$D1
+ OR %r10,%r29,%r28 ; ret |= q
+$D3
+ LDD -368(%r30),%r2
+$D0
+ LDD -296(%r30),%r10
+ LDD -304(%r30),%r9
+ LDD -312(%r30),%r8
+ LDD -320(%r30),%r7
+ LDD -328(%r30),%r6
+ LDD -336(%r30),%r5
+ LDD -344(%r30),%r4
+ BVE (%r2)
+ .EXIT
+ LDD,MB -352(%r30),%r3
+
+bn_div_err_case
+ MFIA %r6
+ ADDIL L'bn_div_words-bn_div_err_case,%r6,%r1
+ LDO R'bn_div_words-bn_div_err_case(%r1),%r6
+ ADDIL LT'__iob,%r27,%r1
+ LDD RT'__iob(%r1),%r26
+ ADDIL L'C$4-bn_div_words,%r6,%r1
+ LDO R'C$4-bn_div_words(%r1),%r25
+ LDO 64(%r26),%r26
+ .CALL ;in=24,25,26,29;out=28;
+ B,L fprintf,%r2
+ LDO -48(%r30),%r29
+ LDD -288(%r30),%r27
+ .CALL ;in=29;
+ B,L abort,%r2
+ LDO -48(%r30),%r29
+ LDD -288(%r30),%r27
+ B $D0
+ LDD -368(%r30),%r2
+ .PROCEND ;in=24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+; Registers to hold 64-bit values to manipulate. The "L" part
+; of the register corresponds to the upper 32-bits, while the "R"
+; part corresponds to the lower 32-bits
+;
+; Note, that when using b6 and b7, the code must save these before
+; using them because they are callee save registers
+;
+;
+; Floating point registers to use to save values that
+; are manipulated. These don't collide with ftemp1-6 and
+; are all caller save registers
+;
+a0 .reg %fr22
+a0L .reg %fr22L
+a0R .reg %fr22R
+
+a1 .reg %fr23
+a1L .reg %fr23L
+a1R .reg %fr23R
+
+a2 .reg %fr24
+a2L .reg %fr24L
+a2R .reg %fr24R
+
+a3 .reg %fr25
+a3L .reg %fr25L
+a3R .reg %fr25R
+
+a4 .reg %fr26
+a4L .reg %fr26L
+a4R .reg %fr26R
+
+a5 .reg %fr27
+a5L .reg %fr27L
+a5R .reg %fr27R
+
+a6 .reg %fr28
+a6L .reg %fr28L
+a6R .reg %fr28R
+
+a7 .reg %fr29
+a7L .reg %fr29L
+a7R .reg %fr29R
+
+b0 .reg %fr30
+b0L .reg %fr30L
+b0R .reg %fr30R
+
+b1 .reg %fr31
+b1L .reg %fr31L
+b1R .reg %fr31R
+
+;
+; Temporary floating point variables, these are all caller save
+; registers
+;
+ftemp1 .reg %fr4
+ftemp2 .reg %fr5
+ftemp3 .reg %fr6
+ftemp4 .reg %fr7
+
+;
+; The B set of registers when used.
+;
+
+b2 .reg %fr8
+b2L .reg %fr8L
+b2R .reg %fr8R
+
+b3 .reg %fr9
+b3L .reg %fr9L
+b3R .reg %fr9R
+
+b4 .reg %fr10
+b4L .reg %fr10L
+b4R .reg %fr10R
+
+b5 .reg %fr11
+b5L .reg %fr11L
+b5R .reg %fr11R
+
+b6 .reg %fr12
+b6L .reg %fr12L
+b6R .reg %fr12R
+
+b7 .reg %fr13
+b7L .reg %fr13L
+b7R .reg %fr13R
+
+c1 .reg %r21 ; only reg
+temp1 .reg %r20 ; only reg
+temp2 .reg %r19 ; only reg
+temp3 .reg %r31 ; only reg
+
+m1 .reg %r28
+c2 .reg %r23
+high_one .reg %r1
+ht .reg %r6
+lt .reg %r5
+m .reg %r4
+c3 .reg %r3
+
+SQR_ADD_C .macro A0L,A0R,C1,C2,C3
+ XMPYU A0L,A0R,ftemp1 ; m
+ FSTD ftemp1,-24(%sp) ; store m
+
+ XMPYU A0R,A0R,ftemp2 ; lt
+ FSTD ftemp2,-16(%sp) ; store lt
+
+ XMPYU A0L,A0L,ftemp3 ; ht
+ FSTD ftemp3,-8(%sp) ; store ht
+
+ LDD -24(%sp),m ; load m
+ AND m,high_mask,temp2 ; m & Mask
+ DEPD,Z m,30,31,temp3 ; m << 32+1
+ LDD -16(%sp),lt ; lt
+
+ LDD -8(%sp),ht ; ht
+ EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
+ ADD temp3,lt,lt ; lt = lt+m
+ ADD,L ht,temp1,ht ; ht += temp1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C2,ht,C2 ; c2=c2+ht
+ ADD,DC C3,%r0,C3 ; c3++
+.endm
+
+SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
+ XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,A1L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD ht,ht,ht ; ht=ht+ht;
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+
+ ADD lt,lt,lt ; lt=lt+lt;
+ ADD,DC ht,%r0,ht ; add in carry (ht++)
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
+ LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+;
+;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba8
+ .PROC
+ .CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .ENTRY
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
+ SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
+ STD c2,56(r_ptr) ; r[7] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
+ SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
+ STD c3,64(r_ptr) ; r[8] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
+ STD c1,72(r_ptr) ; r[9] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a5L,a5R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
+ SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
+ STD c2,80(r_ptr) ; r[10] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
+ STD c3,88(r_ptr) ; r[11] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a6L,a6R,c1,c2,c3
+ SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
+ STD c1,96(r_ptr) ; r[12] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
+ STD c2,104(r_ptr) ; r[13] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a7L,a7R,c3,c1,c2
+ STD c3, 112(r_ptr) ; r[14] = c3
+ STD c1, 120(r_ptr) ; r[15] = c1
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ STD c2,56(r_ptr) ; r[7] = c2;
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+;---------------------------------------------------------------------------
+
+MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
+ XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,B0L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+
+;
+;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba8
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+ FLDD 32(b_ptr),b4
+ FLDD 40(b_ptr),b5
+ FLDD 48(b_ptr),b6
+ FLDD 56(b_ptr),b7
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
+ STD c1,48(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
+ STD c2,56(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
+ STD c3,64(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
+ STD c1,72(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
+ STD c2,80(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
+ STD c3,88(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
+ STD c1,96(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
+ STD c2,104(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
+ STD c3,112(r_ptr)
+ STD c1,120(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ STD c1,48(r_ptr)
+ STD c2,56(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+ .SPACE $PRIVATE$,SORT=16
+ .IMPORT $global$,DATA
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+ .SUBSPA $LIT$,ACCESS=0x2c
+C$4
+ .ALIGN 8
+ .STRINGZ "Division would overflow (%d)\n"
+ .END
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/parisc-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/parisc-mont.pl
new file mode 100644
index 00000000..c02ef6f0
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/parisc-mont.pl
@@ -0,0 +1,995 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# On PA-7100LC this module performs ~90-50% better, less for longer
+# keys, than code generated by gcc 3.2 for PA-RISC 1.1. Latter means
+# that compiler utilized xmpyu instruction to perform 32x32=64-bit
+# multiplication, which in turn means that "baseline" performance was
+# optimal in respect to instruction set capabilities. Fair comparison
+# with vendor compiler is problematic, because OpenSSL doesn't define
+# BN_LLONG [presumably] for historical reasons, which drives compiler
+# toward 4 times 16x16=32-bit multiplicatons [plus complementary
+# shifts and additions] instead. This means that you should observe
+# several times improvement over code generated by vendor compiler
+# for PA-RISC 1.1, but the "baseline" is far from optimal. The actual
+# improvement coefficient was never collected on PA-7100LC, or any
+# other 1.1 CPU, because I don't have access to such machine with
+# vendor compiler. But to give you a taste, PA-RISC 1.1 code path
+# reportedly outperformed code generated by cc +DA1.1 +O3 by factor
+# of ~5x on PA-8600.
+#
+# On PA-RISC 2.0 it has to compete with pa-risc2[W].s, which is
+# reportedly ~2x faster than vendor compiler generated code [according
+# to comment in pa-risc2[W].s]. Here comes a catch. Execution core of
+# this implementation is actually 32-bit one, in the sense that it
+# operates on 32-bit values. But pa-risc2[W].s operates on arrays of
+# 64-bit BN_LONGs... How do they interoperate then? No problem. This
+# module picks halves of 64-bit values in reverse order and pretends
+# they were 32-bit BN_LONGs. But can 32-bit core compete with "pure"
+# 64-bit code such as pa-risc2[W].s then? Well, the thing is that
+# 32x32=64-bit multiplication is the best even PA-RISC 2.0 can do,
+# i.e. there is no "wider" multiplication like on most other 64-bit
+# platforms. This means that even being effectively 32-bit, this
+# implementation performs "64-bit" computational task in same amount
+# of arithmetic operations, most notably multiplications. It requires
+# more memory references, most notably to tp[num], but this doesn't
+# seem to exhaust memory port capacity. And indeed, dedicated PA-RISC
+# 2.0 code path provides virtually same performance as pa-risc2[W].s:
+# it's ~10% better for shortest key length and ~10% worse for longest
+# one.
+#
+# In case it wasn't clear. The module has two distinct code paths:
+# PA-RISC 1.1 and PA-RISC 2.0 ones. Latter features carry-free 64-bit
+# additions and 64-bit integer loads, not to mention specific
+# instruction scheduling. In 64-bit build naturally only 2.0 code path
+# is assembled. In 32-bit application context both code paths are
+# assembled, PA-RISC 2.0 CPU is detected at run-time and proper path
+# is taken automatically. Also, in 32-bit build the module imposes
+# couple of limitations: vector lengths has to be even and vector
+# addresses has to be 64-bit aligned. Normally neither is a problem:
+# most common key lengths are even and vectors are commonly malloc-ed,
+# which ensures alignment.
+#
+# Special thanks to polarhome.com for providing HP-UX account on
+# PA-RISC 1.1 machine, and to correspondent who chose to remain
+# anonymous for testing the code on PA-RISC 2.0 machine.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+
+$flavour = shift;
+$output = shift;
+
+open STDOUT,">$output";
+
+if ($flavour =~ /64/) {
+ $LEVEL ="2.0W";
+ $SIZE_T =8;
+ $FRAME_MARKER =80;
+ $SAVED_RP =16;
+ $PUSH ="std";
+ $PUSHMA ="std,ma";
+ $POP ="ldd";
+ $POPMB ="ldd,mb";
+ $BN_SZ =$SIZE_T;
+} else {
+ $LEVEL ="1.1"; #$LEVEL.="\n\t.ALLOW\t2.0";
+ $SIZE_T =4;
+ $FRAME_MARKER =48;
+ $SAVED_RP =20;
+ $PUSH ="stw";
+ $PUSHMA ="stwm";
+ $POP ="ldw";
+ $POPMB ="ldwm";
+ $BN_SZ =$SIZE_T;
+ if (open CONF,"<${dir}../../opensslconf.h") {
+ while(<CONF>) {
+ if (m/#\s*define\s+SIXTY_FOUR_BIT/) {
+ $BN_SZ=8;
+ $LEVEL="2.0";
+ last;
+ }
+ }
+ close CONF;
+ }
+}
+
+$FRAME=8*$SIZE_T+$FRAME_MARKER; # 8 saved regs + frame marker
+ # [+ argument transfer]
+$LOCALS=$FRAME-$FRAME_MARKER;
+$FRAME+=32; # local variables
+
+$tp="%r31";
+$ti1="%r29";
+$ti0="%r28";
+
+$rp="%r26";
+$ap="%r25";
+$bp="%r24";
+$np="%r23";
+$n0="%r22"; # passed through stack in 32-bit
+$num="%r21"; # passed through stack in 32-bit
+$idx="%r20";
+$arrsz="%r19";
+
+$nm1="%r7";
+$nm0="%r6";
+$ab1="%r5";
+$ab0="%r4";
+
+$fp="%r3";
+$hi1="%r2";
+$hi0="%r1";
+
+$xfer=$n0; # accomodates [-16..15] offset in fld[dw]s
+
+$fm0="%fr4"; $fti=$fm0;
+$fbi="%fr5L";
+$fn0="%fr5R";
+$fai="%fr6"; $fab0="%fr7"; $fab1="%fr8";
+$fni="%fr9"; $fnm0="%fr10"; $fnm1="%fr11";
+
+$code=<<___;
+ .LEVEL $LEVEL
+ .SPACE \$TEXT\$
+ .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
+
+ .EXPORT bn_mul_mont,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR
+ .ALIGN 64
+bn_mul_mont
+ .PROC
+ .CALLINFO FRAME=`$FRAME-8*$SIZE_T`,NO_CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=6
+ .ENTRY
+ $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
+ $PUSHMA %r3,$FRAME(%sp)
+ $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
+ $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
+ $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
+ $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
+ $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
+ $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
+ $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
+ ldo -$FRAME(%sp),$fp
+___
+$code.=<<___ if ($SIZE_T==4);
+ ldw `-$FRAME_MARKER-4`($fp),$n0
+ ldw `-$FRAME_MARKER-8`($fp),$num
+ nop
+ nop ; alignment
+___
+$code.=<<___ if ($BN_SZ==4);
+ comiclr,<= 6,$num,%r0 ; are vectors long enough?
+ b L\$abort
+ ldi 0,%r28 ; signal "unhandled"
+ add,ev %r0,$num,$num ; is $num even?
+ b L\$abort
+ nop
+ or $ap,$np,$ti1
+ extru,= $ti1,31,3,%r0 ; are ap and np 64-bit aligned?
+ b L\$abort
+ nop
+ nop ; alignment
+ nop
+
+ fldws 0($n0),${fn0}
+ fldws,ma 4($bp),${fbi} ; bp[0]
+___
+$code.=<<___ if ($BN_SZ==8);
+ comib,> 3,$num,L\$abort ; are vectors long enough?
+ ldi 0,%r28 ; signal "unhandled"
+ addl $num,$num,$num ; I operate on 32-bit values
+
+ fldws 4($n0),${fn0} ; only low part of n0
+ fldws 4($bp),${fbi} ; bp[0] in flipped word order
+___
+$code.=<<___;
+ fldds 0($ap),${fai} ; ap[0,1]
+ fldds 0($np),${fni} ; np[0,1]
+
+ sh2addl $num,%r0,$arrsz
+ ldi 31,$hi0
+ ldo 36($arrsz),$hi1 ; space for tp[num+1]
+ andcm $hi1,$hi0,$hi1 ; align
+ addl $hi1,%sp,%sp
+ $PUSH $fp,-$SIZE_T(%sp)
+
+ ldo `$LOCALS+16`($fp),$xfer
+ ldo `$LOCALS+32+4`($fp),$tp
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[0]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[0]
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ addl $arrsz,$ap,$ap ; point at the end
+ addl $arrsz,$np,$np
+ subi 0,$arrsz,$idx ; j=0
+ ldo 8($idx),$idx ; j++++
+
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+ fstds ${fab1},0($xfer)
+ fstds ${fnm1},8($xfer)
+ flddx $idx($ap),${fai} ; ap[2,3]
+ flddx $idx($np),${fni} ; np[2,3]
+___
+$code.=<<___ if ($BN_SZ==4);
+ mtctl $hi0,%cr11 ; $hi0 still holds 31
+ extrd,u,*= $hi0,%sar,1,$hi0 ; executes on PA-RISC 1.0
+ b L\$parisc11
+ nop
+___
+$code.=<<___; # PA-RISC 2.0 code-path
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+
+ extrd,u $ab0,31,32,$hi0
+ extrd,u $ab0,63,32,$ab0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ ldo 8($idx),$idx ; j++++
+ addl $ab0,$nm0,$nm0 ; low part is discarded
+ extrd,u $nm0,31,32,$hi1
+
+L\$1st
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,63,32,$ab1
+ addl $hi1,$nm1,$nm1
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ flddx $idx($np),${fni} ; np[j,j+1]
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+ addl $hi0,$ab0,$ab0
+ extrd,u $ab0,31,32,$hi0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ stw $nm1,-4($tp) ; tp[j-1]
+ addl $ab0,$nm0,$nm0
+ stw,ma $nm0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$1st ; j++++
+ extrd,u $nm0,31,32,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,63,32,$ab1
+ addl $hi1,$nm1,$nm1
+ ldd -16($xfer),$ab0
+ addl $ab1,$nm1,$nm1
+ ldd -8($xfer),$nm0
+ extrd,u $nm1,31,32,$hi1
+
+ addl $hi0,$ab0,$ab0
+ extrd,u $ab0,31,32,$hi0
+ stw $nm1,-4($tp) ; tp[j-1]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ ldd 0($xfer),$ab1
+ addl $ab0,$nm0,$nm0
+ ldd,mb 8($xfer),$nm1
+ extrd,u $nm0,31,32,$hi1
+ stw,ma $nm0,8($tp) ; tp[j-1]
+
+ ldo -1($num),$num ; i--
+ subi 0,$arrsz,$idx ; j=0
+___
+$code.=<<___ if ($BN_SZ==4);
+ fldws,ma 4($bp),${fbi} ; bp[1]
+___
+$code.=<<___ if ($BN_SZ==8);
+ fldws 0($bp),${fbi} ; bp[1] in flipped word order
+___
+$code.=<<___;
+ flddx $idx($ap),${fai} ; ap[0,1]
+ flddx $idx($np),${fni} ; np[0,1]
+ fldws 8($xfer),${fti}R ; tp[0]
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ addl $hi1,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ xmpyu ${fn0},${fab0}R,${fm0}
+ ldo `$LOCALS+32+4`($fp),$tp
+L\$outer
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer) ; 33-bit value
+ fstds ${fnm0},-8($xfer)
+ flddx $idx($ap),${fai} ; ap[2]
+ flddx $idx($np),${fni} ; np[2]
+ ldo 8($idx),$idx ; j++++
+ ldd -16($xfer),$ab0 ; 33-bit value
+ ldd -8($xfer),$nm0
+ ldw 0($xfer),$hi0 ; high part
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ extrd,u $ab0,31,32,$ti0 ; carry bit
+ extrd,u $ab0,63,32,$ab0
+ fstds ${fab1},0($xfer)
+ addl $ti0,$hi0,$hi0 ; account carry bit
+ fstds ${fnm1},8($xfer)
+ addl $ab0,$nm0,$nm0 ; low part is discarded
+ ldw 0($tp),$ti1 ; tp[1]
+ extrd,u $nm0,31,32,$hi1
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+
+L\$inner
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ti1,$ti1
+ addl $ti1,$ab1,$ab1
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ flddx $idx($np),${fni} ; np[j,j+1]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ ldw 4($tp),$ti0 ; tp[j]
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+ addl $hi0,$ti0,$ti0
+ addl $ti0,$ab0,$ab0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ extrd,u $ab0,31,32,$hi0
+ extrd,u $nm1,31,32,$hi1
+ ldw 8($tp),$ti1 ; tp[j]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ addl $ab0,$nm0,$nm0
+ stw,ma $nm0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$inner ; j++++
+ extrd,u $nm0,31,32,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ti1,$ti1
+ addl $ti1,$ab1,$ab1
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ ldw 4($tp),$ti0 ; tp[j]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ ldd -16($xfer),$ab0
+ ldd -8($xfer),$nm0
+ extrd,u $nm1,31,32,$hi1
+
+ addl $hi0,$ab0,$ab0
+ addl $ti0,$ab0,$ab0
+ stw $nm1,-4($tp) ; tp[j-1]
+ extrd,u $ab0,31,32,$hi0
+ ldw 8($tp),$ti1 ; tp[j]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ ldd 0($xfer),$ab1
+ addl $ab0,$nm0,$nm0
+ ldd,mb 8($xfer),$nm1
+ extrd,u $nm0,31,32,$hi1
+ stw,ma $nm0,8($tp) ; tp[j-1]
+
+ addib,= -1,$num,L\$outerdone ; i--
+ subi 0,$arrsz,$idx ; j=0
+___
+$code.=<<___ if ($BN_SZ==4);
+ fldws,ma 4($bp),${fbi} ; bp[i]
+___
+$code.=<<___ if ($BN_SZ==8);
+ ldi 12,$ti0 ; bp[i] in flipped word order
+ addl,ev %r0,$num,$num
+ ldi -4,$ti0
+ addl $ti0,$bp,$bp
+ fldws 0($bp),${fbi}
+___
+$code.=<<___;
+ flddx $idx($ap),${fai} ; ap[0]
+ addl $hi0,$ab1,$ab1
+ flddx $idx($np),${fni} ; np[0]
+ fldws 8($xfer),${fti}R ; tp[0]
+ addl $ti1,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
+ ldw 4($tp),$ti0 ; tp[j]
+
+ addl $hi1,$nm1,$nm1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ addl $hi1,$hi0,$hi0
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ addl $ti0,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ b L\$outer
+ ldo `$LOCALS+32+4`($fp),$tp
+
+L\$outerdone
+ addl $hi0,$ab1,$ab1
+ addl $ti1,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+
+ ldw 4($tp),$ti0 ; tp[j]
+
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ addl $hi1,$hi0,$hi0
+ addl $ti0,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ ldo `$LOCALS+32`($fp),$tp
+ sub %r0,%r0,%r0 ; clear borrow
+___
+$code.=<<___ if ($BN_SZ==4);
+ ldws,ma 4($tp),$ti0
+ extru,= $rp,31,3,%r0 ; is rp 64-bit aligned?
+ b L\$sub_pa11
+ addl $tp,$arrsz,$tp
+L\$sub
+ ldwx $idx($np),$hi0
+ subb $ti0,$hi0,$hi1
+ ldwx $idx($tp),$ti0
+ addib,<> 4,$idx,L\$sub
+ stws,ma $hi1,4($rp)
+
+ subb $ti0,%r0,$hi1
+ ldo -4($tp),$tp
+___
+$code.=<<___ if ($BN_SZ==8);
+ ldd,ma 8($tp),$ti0
+L\$sub
+ ldd $idx($np),$hi0
+ shrpd $ti0,$ti0,32,$ti0 ; flip word order
+ std $ti0,-8($tp) ; save flipped value
+ sub,db $ti0,$hi0,$hi1
+ ldd,ma 8($tp),$ti0
+ addib,<> 8,$idx,L\$sub
+ std,ma $hi1,8($rp)
+
+ extrd,u $ti0,31,32,$ti0 ; carry in flipped word order
+ sub,db $ti0,%r0,$hi1
+ ldo -8($tp),$tp
+___
+$code.=<<___;
+ and $tp,$hi1,$ap
+ andcm $rp,$hi1,$bp
+ or $ap,$bp,$np
+
+ sub $rp,$arrsz,$rp ; rewind rp
+ subi 0,$arrsz,$idx
+ ldo `$LOCALS+32`($fp),$tp
+L\$copy
+ ldd $idx($np),$hi0
+ std,ma %r0,8($tp)
+ addib,<> 8,$idx,.-8 ; L\$copy
+ std,ma $hi0,8($rp)
+___
+
+if ($BN_SZ==4) { # PA-RISC 1.1 code-path
+$ablo=$ab0;
+$abhi=$ab1;
+$nmlo0=$nm0;
+$nmhi0=$nm1;
+$nmlo1="%r9";
+$nmhi1="%r8";
+
+$code.=<<___;
+ b L\$done
+ nop
+
+ .ALIGN 8
+L\$parisc11
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -12($xfer),$ablo
+ ldw -16($xfer),$hi0
+ ldw -4($xfer),$nmlo0
+ ldw -8($xfer),$nmhi0
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+
+ ldo 8($idx),$idx ; j++++
+ add $ablo,$nmlo0,$nmlo0 ; discarded
+ addc %r0,$nmhi0,$hi1
+ ldw 4($xfer),$ablo
+ ldw 0($xfer),$abhi
+ nop
+
+L\$1st_pa11
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ flddx $idx($np),${fni} ; np[j,j+1]
+ add $hi0,$ablo,$ablo
+ ldw 12($xfer),$nmlo1
+ addc %r0,$abhi,$hi0
+ ldw 8($xfer),$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ fstds ${fab1},0($xfer)
+ addc %r0,$nmhi1,$nmhi1
+ fstds ${fnm1},8($xfer)
+ add $hi1,$nmlo1,$nmlo1
+ ldw -12($xfer),$ablo
+ addc %r0,$nmhi1,$hi1
+ ldw -16($xfer),$abhi
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ ldw -4($xfer),$nmlo0
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -8($xfer),$nmhi0
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$hi0
+ fstds ${fab0},-16($xfer)
+ add $ablo,$nmlo0,$nmlo0
+ fstds ${fnm0},-8($xfer)
+ addc %r0,$nmhi0,$nmhi0
+ ldw 0($xfer),$abhi
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$1st_pa11 ; j++++
+ addc %r0,$nmhi0,$hi1
+
+ ldw 8($xfer),$nmhi1
+ ldw 12($xfer),$nmlo1
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ add $hi0,$ablo,$ablo
+ fstds ${fab1},0($xfer)
+ addc %r0,$abhi,$hi0
+ fstds ${fnm1},8($xfer)
+ add $ablo,$nmlo1,$nmlo1
+ ldw -16($xfer),$abhi
+ addc %r0,$nmhi1,$nmhi1
+ ldw -12($xfer),$ablo
+ add $hi1,$nmlo1,$nmlo1
+ ldw -8($xfer),$nmhi0
+ addc %r0,$nmhi1,$hi1
+ ldw -4($xfer),$nmlo0
+
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$hi0
+ ldw 0($xfer),$abhi
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldws,mb 8($xfer),$nmhi1
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$nmlo1
+ addc %r0,$nmhi0,$hi1
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+
+ ldo -1($num),$num ; i--
+ subi 0,$arrsz,$idx ; j=0
+
+ fldws,ma 4($bp),${fbi} ; bp[1]
+ flddx $idx($ap),${fai} ; ap[0,1]
+ flddx $idx($np),${fni} ; np[0,1]
+ fldws 8($xfer),${fti}R ; tp[0]
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ xmpyu ${fn0},${fab0}R,${fm0}
+ ldo `$LOCALS+32+4`($fp),$tp
+L\$outer_pa11
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer) ; 33-bit value
+ fstds ${fnm0},-8($xfer)
+ flddx $idx($ap),${fai} ; ap[2,3]
+ flddx $idx($np),${fni} ; np[2,3]
+ ldw -16($xfer),$abhi ; carry bit actually
+ ldo 8($idx),$idx ; j++++
+ ldw -12($xfer),$ablo
+ ldw -8($xfer),$nmhi0
+ ldw -4($xfer),$nmlo0
+ ldw 0($xfer),$hi0 ; high part
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ fstds ${fab1},0($xfer)
+ addl $abhi,$hi0,$hi0 ; account carry bit
+ fstds ${fnm1},8($xfer)
+ add $ablo,$nmlo0,$nmlo0 ; discarded
+ ldw 0($tp),$ti1 ; tp[1]
+ addc %r0,$nmhi0,$hi1
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+ ldw 4($xfer),$ablo
+ ldw 0($xfer),$abhi
+
+L\$inner_pa11
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ flddx $idx($np),${fni} ; np[j,j+1]
+ add $hi0,$ablo,$ablo
+ ldw 4($tp),$ti0 ; tp[j]
+ addc %r0,$abhi,$abhi
+ ldw 12($xfer),$nmlo1
+ add $ti1,$ablo,$ablo
+ ldw 8($xfer),$nmhi1
+ addc %r0,$abhi,$hi0
+ fstds ${fab1},0($xfer)
+ add $ablo,$nmlo1,$nmlo1
+ fstds ${fnm1},8($xfer)
+ addc %r0,$nmhi1,$nmhi1
+ ldw -12($xfer),$ablo
+ add $hi1,$nmlo1,$nmlo1
+ ldw -16($xfer),$abhi
+ addc %r0,$nmhi1,$hi1
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ ldw 8($tp),$ti1 ; tp[j]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -4($xfer),$nmlo0
+ add $hi0,$ablo,$ablo
+ ldw -8($xfer),$nmhi0
+ addc %r0,$abhi,$abhi
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ add $ti0,$ablo,$ablo
+ fstds ${fab0},-16($xfer)
+ addc %r0,$abhi,$hi0
+ fstds ${fnm0},-8($xfer)
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldw 0($xfer),$abhi
+ add $hi1,$nmlo0,$nmlo0
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$inner_pa11 ; j++++
+ addc %r0,$nmhi0,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
+ ldw 12($xfer),$nmlo1
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldw 8($xfer),$nmhi1
+ add $hi0,$ablo,$ablo
+ ldw 4($tp),$ti0 ; tp[j]
+ addc %r0,$abhi,$abhi
+ fstds ${fab1},0($xfer)
+ add $ti1,$ablo,$ablo
+ fstds ${fnm1},8($xfer)
+ addc %r0,$abhi,$hi0
+ ldw -16($xfer),$abhi
+ add $ablo,$nmlo1,$nmlo1
+ ldw -12($xfer),$ablo
+ addc %r0,$nmhi1,$nmhi1
+ ldw -8($xfer),$nmhi0
+ add $hi1,$nmlo1,$nmlo1
+ ldw -4($xfer),$nmlo0
+ addc %r0,$nmhi1,$hi1
+
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$abhi
+ add $ti0,$ablo,$ablo
+ ldw 8($tp),$ti1 ; tp[j]
+ addc %r0,$abhi,$hi0
+ ldw 0($xfer),$abhi
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldws,mb 8($xfer),$nmhi1
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$nmlo1
+ addc %r0,$nmhi0,$hi1
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+
+ addib,= -1,$num,L\$outerdone_pa11; i--
+ subi 0,$arrsz,$idx ; j=0
+
+ fldws,ma 4($bp),${fbi} ; bp[i]
+ flddx $idx($ap),${fai} ; ap[0]
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$abhi
+ flddx $idx($np),${fni} ; np[0]
+ fldws 8($xfer),${fti}R ; tp[0]
+ add $ti1,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
+ ldw 4($tp),$ti0 ; tp[j]
+
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ add $ti0,$hi0,$hi0
+ addc %r0,$hi1,$hi1
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ b L\$outer_pa11
+ ldo `$LOCALS+32+4`($fp),$tp
+
+L\$outerdone_pa11
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$abhi
+ add $ti1,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+
+ ldw 4($tp),$ti0 ; tp[j]
+
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ add $ti0,$hi0,$hi0
+ addc %r0,$hi1,$hi1
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ ldo `$LOCALS+32+4`($fp),$tp
+ sub %r0,%r0,%r0 ; clear borrow
+ ldw -4($tp),$ti0
+ addl $tp,$arrsz,$tp
+L\$sub_pa11
+ ldwx $idx($np),$hi0
+ subb $ti0,$hi0,$hi1
+ ldwx $idx($tp),$ti0
+ addib,<> 4,$idx,L\$sub_pa11
+ stws,ma $hi1,4($rp)
+
+ subb $ti0,%r0,$hi1
+ ldo -4($tp),$tp
+ and $tp,$hi1,$ap
+ andcm $rp,$hi1,$bp
+ or $ap,$bp,$np
+
+ sub $rp,$arrsz,$rp ; rewind rp
+ subi 0,$arrsz,$idx
+ ldo `$LOCALS+32`($fp),$tp
+L\$copy_pa11
+ ldwx $idx($np),$hi0
+ stws,ma %r0,4($tp)
+ addib,<> 4,$idx,L\$copy_pa11
+ stws,ma $hi0,4($rp)
+
+ nop ; alignment
+L\$done
+___
+}
+
+$code.=<<___;
+ ldi 1,%r28 ; signal "handled"
+ ldo $FRAME($fp),%sp ; destroy tp[num+1]
+
+ $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
+ $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
+ $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
+ $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
+ $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
+ $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
+ $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
+ $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
+L\$abort
+ bv (%r2)
+ .EXIT
+ $POPMB -$FRAME(%sp),%r3
+ .PROCEND
+ .STRINGZ "Montgomery Multiplication for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+# Explicitly encode PA-RISC 2.0 instructions used in this module, so
+# that it can be compiled with .LEVEL 1.0. It should be noted that I
+# wouldn't have to do this, if GNU assembler understood .ALLOW 2.0
+# directive...
+
+my $ldd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "ldd$mod\t$args";
+
+ if ($args =~ /%r([0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 4
+ { my $opcode=(0x03<<26)|($2<<21)|($1<<16)|(3<<6)|$3;
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ elsif ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 5
+ { my $opcode=(0x03<<26)|($2<<21)|(1<<12)|(3<<6)|$3;
+ $opcode|=(($1&0xF)<<17)|(($1&0x10)<<12); # encode offset
+ $opcode|=(1<<5) if ($mod =~ /^,m/);
+ $opcode|=(1<<13) if ($mod =~ /^,mb/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $std = sub {
+ my ($mod,$args) = @_;
+ my $orig = "std$mod\t$args";
+
+ if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/) # format 6
+ { my $opcode=(0x03<<26)|($3<<21)|($1<<16)|(1<<12)|(0xB<<6);
+ $opcode|=(($2&0xF)<<1)|(($2&0x10)>>4); # encode offset
+ $opcode|=(1<<5) if ($mod =~ /^,m/);
+ $opcode|=(1<<13) if ($mod =~ /^,mb/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $extrd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "extrd$mod\t$args";
+
+ # I only have ",u" completer, it's implicitly encoded...
+ if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 15
+ { my $opcode=(0x36<<26)|($1<<21)|($4<<16);
+ my $len=32-$3;
+ $opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5); # encode pos
+ $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/) # format 12
+ { my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9);
+ my $len=32-$2;
+ $opcode |= (($len&0x20)<<3)|($len&0x1f); # encode len
+ $opcode |= (1<<13) if ($mod =~ /,\**=/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $shrpd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "shrpd$mod\t$args";
+
+ if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/) # format 14
+ { my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4;
+ my $cpos=63-$3;
+ $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode sa
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $sub = sub {
+ my ($mod,$args) = @_;
+ my $orig = "sub$mod\t$args";
+
+ if ($mod eq ",db" && $args =~ /%r([0-9]+),%r([0-9]+),%r([0-9]+)/) {
+ my $opcode=(0x02<<26)|($2<<21)|($1<<16)|$3;
+ $opcode|=(1<<10); # e1
+ $opcode|=(1<<8); # e2
+ $opcode|=(1<<5); # d
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig
+ }
+ else { "\t".$orig; }
+};
+
+sub assemble {
+ my ($mnemonic,$mod,$args)=@_;
+ my $opcode = eval("\$$mnemonic");
+
+ ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args";
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+ # flip word order in 64-bit mode...
+ s/(xmpyu\s+)($fai|$fni)([LR])/$1.$2.($3 eq "L"?"R":"L")/e if ($BN_SZ==8);
+ # assemble 2.0 instructions in 32-bit mode...
+ s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e if ($BN_SZ==4);
+
+ s/\bbv\b/bve/gm if ($SIZE_T==8);
+
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc-mont.pl
new file mode 100644
index 00000000..f9b6992c
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc-mont.pl
@@ -0,0 +1,334 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# April 2006
+
+# "Teaser" Montgomery multiplication module for PowerPC. It's possible
+# to gain a bit more by modulo-scheduling outer loop, then dedicated
+# squaring procedure should give further 20% and code can be adapted
+# for 32-bit application running on 64-bit CPU. As for the latter.
+# It won't be able to achieve "native" 64-bit performance, because in
+# 32-bit application context every addc instruction will have to be
+# expanded as addc, twice right shift by 32 and finally adde, etc.
+# So far RSA *sign* performance improvement over pre-bn_mul_mont asm
+# for 64-bit application running on PPC970/G5 is:
+#
+# 512-bit +65%
+# 1024-bit +35%
+# 2048-bit +18%
+# 4096-bit +4%
+
+$flavour = shift;
+
+if ($flavour =~ /32/) {
+ $BITS= 32;
+ $BNSZ= $BITS/8;
+ $SIZE_T=4;
+ $RZONE= 224;
+
+ $LD= "lwz"; # load
+ $LDU= "lwzu"; # load and update
+ $LDX= "lwzx"; # load indexed
+ $ST= "stw"; # store
+ $STU= "stwu"; # store and update
+ $STX= "stwx"; # store indexed
+ $STUX= "stwux"; # store indexed and update
+ $UMULL= "mullw"; # unsigned multiply low
+ $UMULH= "mulhwu"; # unsigned multiply high
+ $UCMP= "cmplw"; # unsigned compare
+ $SHRI= "srwi"; # unsigned shift right by immediate
+ $PUSH= $ST;
+ $POP= $LD;
+} elsif ($flavour =~ /64/) {
+ $BITS= 64;
+ $BNSZ= $BITS/8;
+ $SIZE_T=8;
+ $RZONE= 288;
+
+ # same as above, but 64-bit mnemonics...
+ $LD= "ld"; # load
+ $LDU= "ldu"; # load and update
+ $LDX= "ldx"; # load indexed
+ $ST= "std"; # store
+ $STU= "stdu"; # store and update
+ $STX= "stdx"; # store indexed
+ $STUX= "stdux"; # store indexed and update
+ $UMULL= "mulld"; # unsigned multiply low
+ $UMULH= "mulhdu"; # unsigned multiply high
+ $UCMP= "cmpld"; # unsigned compare
+ $SHRI= "srdi"; # unsigned shift right by immediate
+ $PUSH= $ST;
+ $POP= $LD;
+} else { die "nonsense $flavour"; }
+
+$FRAME=8*$SIZE_T+$RZONE;
+$LOCALS=8*$SIZE_T;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$sp="r1";
+$toc="r2";
+$rp="r3"; $ovf="r3";
+$ap="r4";
+$bp="r5";
+$np="r6";
+$n0="r7";
+$num="r8";
+$rp="r9"; # $rp is reassigned
+$aj="r10";
+$nj="r11";
+$tj="r12";
+# non-volatile registers
+$i="r20";
+$j="r21";
+$tp="r22";
+$m0="r23";
+$m1="r24";
+$lo0="r25";
+$hi0="r26";
+$lo1="r27";
+$hi1="r28";
+$alo="r29";
+$ahi="r30";
+$nlo="r31";
+#
+$nhi="r0";
+
+$code=<<___;
+.machine "any"
+.text
+
+.globl .bn_mul_mont_int
+.align 4
+.bn_mul_mont_int:
+ cmpwi $num,4
+ mr $rp,r3 ; $rp is reassigned
+ li r3,0
+ bltlr
+___
+$code.=<<___ if ($BNSZ==4);
+ cmpwi $num,32 ; longer key performance is not better
+ bgelr
+___
+$code.=<<___;
+ slwi $num,$num,`log($BNSZ)/log(2)`
+ li $tj,-4096
+ addi $ovf,$num,$FRAME
+ subf $ovf,$ovf,$sp ; $sp-$ovf
+ and $ovf,$ovf,$tj ; minimize TLB usage
+ subf $ovf,$sp,$ovf ; $ovf-$sp
+ mr $tj,$sp
+ srwi $num,$num,`log($BNSZ)/log(2)`
+ $STUX $sp,$sp,$ovf
+
+ $PUSH r20,`-12*$SIZE_T`($tj)
+ $PUSH r21,`-11*$SIZE_T`($tj)
+ $PUSH r22,`-10*$SIZE_T`($tj)
+ $PUSH r23,`-9*$SIZE_T`($tj)
+ $PUSH r24,`-8*$SIZE_T`($tj)
+ $PUSH r25,`-7*$SIZE_T`($tj)
+ $PUSH r26,`-6*$SIZE_T`($tj)
+ $PUSH r27,`-5*$SIZE_T`($tj)
+ $PUSH r28,`-4*$SIZE_T`($tj)
+ $PUSH r29,`-3*$SIZE_T`($tj)
+ $PUSH r30,`-2*$SIZE_T`($tj)
+ $PUSH r31,`-1*$SIZE_T`($tj)
+
+ $LD $n0,0($n0) ; pull n0[0] value
+ addi $num,$num,-2 ; adjust $num for counter register
+
+ $LD $m0,0($bp) ; m0=bp[0]
+ $LD $aj,0($ap) ; ap[0]
+ addi $tp,$sp,$LOCALS
+ $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0]
+ $UMULH $hi0,$aj,$m0
+
+ $LD $aj,$BNSZ($ap) ; ap[1]
+ $LD $nj,0($np) ; np[0]
+
+ $UMULL $m1,$lo0,$n0 ; "tp[0]"*n0
+
+ $UMULL $alo,$aj,$m0 ; ap[1]*bp[0]
+ $UMULH $ahi,$aj,$m0
+
+ $UMULL $lo1,$nj,$m1 ; np[0]*m1
+ $UMULH $hi1,$nj,$m1
+ $LD $nj,$BNSZ($np) ; np[1]
+ addc $lo1,$lo1,$lo0
+ addze $hi1,$hi1
+
+ $UMULL $nlo,$nj,$m1 ; np[1]*m1
+ $UMULH $nhi,$nj,$m1
+
+ mtctr $num
+ li $j,`2*$BNSZ`
+.align 4
+L1st:
+ $LDX $aj,$ap,$j ; ap[j]
+ addc $lo0,$alo,$hi0
+ $LDX $nj,$np,$j ; np[j]
+ addze $hi0,$ahi
+ $UMULL $alo,$aj,$m0 ; ap[j]*bp[0]
+ addc $lo1,$nlo,$hi1
+ $UMULH $ahi,$aj,$m0
+ addze $hi1,$nhi
+ $UMULL $nlo,$nj,$m1 ; np[j]*m1
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
+ $UMULH $nhi,$nj,$m1
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+
+ addi $j,$j,$BNSZ ; j++
+ addi $tp,$tp,$BNSZ ; tp++
+ bdnz- L1st
+;L1st
+ addc $lo0,$alo,$hi0
+ addze $hi0,$ahi
+
+ addc $lo1,$nlo,$hi1
+ addze $hi1,$nhi
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+
+ li $ovf,0
+ addc $hi1,$hi1,$hi0
+ addze $ovf,$ovf ; upmost overflow bit
+ $ST $hi1,$BNSZ($tp)
+
+ li $i,$BNSZ
+.align 4
+Louter:
+ $LDX $m0,$bp,$i ; m0=bp[i]
+ $LD $aj,0($ap) ; ap[0]
+ addi $tp,$sp,$LOCALS
+ $LD $tj,$LOCALS($sp); tp[0]
+ $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i]
+ $UMULH $hi0,$aj,$m0
+ $LD $aj,$BNSZ($ap) ; ap[1]
+ $LD $nj,0($np) ; np[0]
+ addc $lo0,$lo0,$tj ; ap[0]*bp[i]+tp[0]
+ $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
+ addze $hi0,$hi0
+ $UMULL $m1,$lo0,$n0 ; tp[0]*n0
+ $UMULH $ahi,$aj,$m0
+ $UMULL $lo1,$nj,$m1 ; np[0]*m1
+ $UMULH $hi1,$nj,$m1
+ $LD $nj,$BNSZ($np) ; np[1]
+ addc $lo1,$lo1,$lo0
+ $UMULL $nlo,$nj,$m1 ; np[1]*m1
+ addze $hi1,$hi1
+ $UMULH $nhi,$nj,$m1
+
+ mtctr $num
+ li $j,`2*$BNSZ`
+.align 4
+Linner:
+ $LDX $aj,$ap,$j ; ap[j]
+ addc $lo0,$alo,$hi0
+ $LD $tj,$BNSZ($tp) ; tp[j]
+ addze $hi0,$ahi
+ $LDX $nj,$np,$j ; np[j]
+ addc $lo1,$nlo,$hi1
+ $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
+ addze $hi1,$nhi
+ $UMULH $ahi,$aj,$m0
+ addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
+ $UMULL $nlo,$nj,$m1 ; np[j]*m1
+ addze $hi0,$hi0
+ $UMULH $nhi,$nj,$m1
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
+ addi $j,$j,$BNSZ ; j++
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+ addi $tp,$tp,$BNSZ ; tp++
+ bdnz- Linner
+;Linner
+ $LD $tj,$BNSZ($tp) ; tp[j]
+ addc $lo0,$alo,$hi0
+ addze $hi0,$ahi
+ addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
+ addze $hi0,$hi0
+
+ addc $lo1,$nlo,$hi1
+ addze $hi1,$nhi
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+
+ addic $ovf,$ovf,-1 ; move upmost overflow to XER[CA]
+ li $ovf,0
+ adde $hi1,$hi1,$hi0
+ addze $ovf,$ovf
+ $ST $hi1,$BNSZ($tp)
+;
+ slwi $tj,$num,`log($BNSZ)/log(2)`
+ $UCMP $i,$tj
+ addi $i,$i,$BNSZ
+ ble- Louter
+
+ addi $num,$num,2 ; restore $num
+ subfc $j,$j,$j ; j=0 and "clear" XER[CA]
+ addi $tp,$sp,$LOCALS
+ mtctr $num
+
+.align 4
+Lsub: $LDX $tj,$tp,$j
+ $LDX $nj,$np,$j
+ subfe $aj,$nj,$tj ; tp[j]-np[j]
+ $STX $aj,$rp,$j
+ addi $j,$j,$BNSZ
+ bdnz- Lsub
+
+ li $j,0
+ mtctr $num
+ subfe $ovf,$j,$ovf ; handle upmost overflow bit
+ and $ap,$tp,$ovf
+ andc $np,$rp,$ovf
+ or $ap,$ap,$np ; ap=borrow?tp:rp
+
+.align 4
+Lcopy: ; copy or in-place refresh
+ $LDX $tj,$ap,$j
+ $STX $tj,$rp,$j
+ $STX $j,$tp,$j ; zap at once
+ addi $j,$j,$BNSZ
+ bdnz- Lcopy
+
+ $POP $tj,0($sp)
+ li r3,1
+ $POP r20,`-12*$SIZE_T`($tj)
+ $POP r21,`-11*$SIZE_T`($tj)
+ $POP r22,`-10*$SIZE_T`($tj)
+ $POP r23,`-9*$SIZE_T`($tj)
+ $POP r24,`-8*$SIZE_T`($tj)
+ $POP r25,`-7*$SIZE_T`($tj)
+ $POP r26,`-6*$SIZE_T`($tj)
+ $POP r27,`-5*$SIZE_T`($tj)
+ $POP r28,`-4*$SIZE_T`($tj)
+ $POP r29,`-3*$SIZE_T`($tj)
+ $POP r30,`-2*$SIZE_T`($tj)
+ $POP r31,`-1*$SIZE_T`($tj)
+ mr $sp,$tj
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,12,6,0
+ .long 0
+
+.asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc.pl
new file mode 100644
index 00000000..1249ce22
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc.pl
@@ -0,0 +1,1998 @@
+#!/usr/bin/env perl
+#
+# Implemented as a Perl wrapper as we want to support several different
+# architectures with single file. We pick up the target based on the
+# file name we are asked to generate.
+#
+# It should be noted though that this perl code is nothing like
+# <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
+# as pre-processor to cover for platform differences in name decoration,
+# linker tables, 32-/64-bit instruction sets...
+#
+# As you might know there're several PowerPC ABI in use. Most notably
+# Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
+# are similar enough to implement leaf(!) functions, which would be ABI
+# neutral. And that's what you find here: ABI neutral leaf functions.
+# In case you wonder what that is...
+#
+# AIX performance
+#
+# MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
+#
+# The following is the performance of 32-bit compiler
+# generated code:
+#
+# OpenSSL 0.9.6c 21 dec 2001
+# built on: Tue Jun 11 11:06:51 EDT 2002
+# options:bn(64,32) ...
+#compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3
+# sign verify sign/s verify/s
+#rsa 512 bits 0.0098s 0.0009s 102.0 1170.6
+#rsa 1024 bits 0.0507s 0.0026s 19.7 387.5
+#rsa 2048 bits 0.3036s 0.0085s 3.3 117.1
+#rsa 4096 bits 2.0040s 0.0299s 0.5 33.4
+#dsa 512 bits 0.0087s 0.0106s 114.3 94.5
+#dsa 1024 bits 0.0256s 0.0313s 39.0 32.0
+#
+# Same bechmark with this assembler code:
+#
+#rsa 512 bits 0.0056s 0.0005s 178.6 2049.2
+#rsa 1024 bits 0.0283s 0.0015s 35.3 674.1
+#rsa 2048 bits 0.1744s 0.0050s 5.7 201.2
+#rsa 4096 bits 1.1644s 0.0179s 0.9 55.7
+#dsa 512 bits 0.0052s 0.0062s 191.6 162.0
+#dsa 1024 bits 0.0149s 0.0180s 67.0 55.5
+#
+# Number of operations increases by at almost 75%
+#
+# Here are performance numbers for 64-bit compiler
+# generated code:
+#
+# OpenSSL 0.9.6g [engine] 9 Aug 2002
+# built on: Fri Apr 18 16:59:20 EDT 2003
+# options:bn(64,64) ...
+# compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
+# sign verify sign/s verify/s
+#rsa 512 bits 0.0028s 0.0003s 357.1 3844.4
+#rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7
+#rsa 2048 bits 0.0963s 0.0028s 10.4 353.0
+#rsa 4096 bits 0.6538s 0.0102s 1.5 98.1
+#dsa 512 bits 0.0026s 0.0032s 382.5 313.7
+#dsa 1024 bits 0.0081s 0.0099s 122.8 100.6
+#
+# Same benchmark with this assembler code:
+#
+#rsa 512 bits 0.0020s 0.0002s 510.4 6273.7
+#rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3
+#rsa 2048 bits 0.0540s 0.0016s 18.5 622.5
+#rsa 4096 bits 0.3700s 0.0058s 2.7 171.0
+#dsa 512 bits 0.0016s 0.0020s 610.7 507.1
+#dsa 1024 bits 0.0047s 0.0058s 212.5 173.2
+#
+# Again, performance increases by at about 75%
+#
+# Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
+# OpenSSL 0.9.7c 30 Sep 2003
+#
+# Original code.
+#
+#rsa 512 bits 0.0011s 0.0001s 906.1 11012.5
+#rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1
+#rsa 2048 bits 0.0370s 0.0010s 27.1 982.4
+#rsa 4096 bits 0.2426s 0.0036s 4.1 280.4
+#dsa 512 bits 0.0010s 0.0012s 1038.1 841.5
+#dsa 1024 bits 0.0030s 0.0037s 329.6 269.7
+#dsa 2048 bits 0.0101s 0.0127s 98.9 78.6
+#
+# Same benchmark with this assembler code:
+#
+#rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9
+#rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6
+#rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5
+#rsa 4096 bits 0.1469s 0.0022s 6.8 449.6
+#dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2
+#dsa 1024 bits 0.0018s 0.0023s 545.0 442.2
+#dsa 2048 bits 0.0061s 0.0075s 163.5 132.8
+#
+# Performance increase of ~60%
+#
+# If you have comments or suggestions to improve code send
+# me a note at schari@us.ibm.com
+#
+
+$flavour = shift;
+
+if ($flavour =~ /32/) {
+ $BITS= 32;
+ $BNSZ= $BITS/8;
+ $ISA= "\"ppc\"";
+
+ $LD= "lwz"; # load
+ $LDU= "lwzu"; # load and update
+ $ST= "stw"; # store
+ $STU= "stwu"; # store and update
+ $UMULL= "mullw"; # unsigned multiply low
+ $UMULH= "mulhwu"; # unsigned multiply high
+ $UDIV= "divwu"; # unsigned divide
+ $UCMPI= "cmplwi"; # unsigned compare with immediate
+ $UCMP= "cmplw"; # unsigned compare
+ $CNTLZ= "cntlzw"; # count leading zeros
+ $SHL= "slw"; # shift left
+ $SHR= "srw"; # unsigned shift right
+ $SHRI= "srwi"; # unsigned shift right by immediate
+ $SHLI= "slwi"; # shift left by immediate
+ $CLRU= "clrlwi"; # clear upper bits
+ $INSR= "insrwi"; # insert right
+ $ROTL= "rotlwi"; # rotate left by immediate
+ $TR= "tw"; # conditional trap
+} elsif ($flavour =~ /64/) {
+ $BITS= 64;
+ $BNSZ= $BITS/8;
+ $ISA= "\"ppc64\"";
+
+ # same as above, but 64-bit mnemonics...
+ $LD= "ld"; # load
+ $LDU= "ldu"; # load and update
+ $ST= "std"; # store
+ $STU= "stdu"; # store and update
+ $UMULL= "mulld"; # unsigned multiply low
+ $UMULH= "mulhdu"; # unsigned multiply high
+ $UDIV= "divdu"; # unsigned divide
+ $UCMPI= "cmpldi"; # unsigned compare with immediate
+ $UCMP= "cmpld"; # unsigned compare
+ $CNTLZ= "cntlzd"; # count leading zeros
+ $SHL= "sld"; # shift left
+ $SHR= "srd"; # unsigned shift right
+ $SHRI= "srdi"; # unsigned shift right by immediate
+ $SHLI= "sldi"; # shift left by immediate
+ $CLRU= "clrldi"; # clear upper bits
+ $INSR= "insrdi"; # insert right
+ $ROTL= "rotldi"; # rotate left by immediate
+ $TR= "td"; # conditional trap
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$data=<<EOF;
+#--------------------------------------------------------------------
+#
+#
+#
+#
+# File: ppc32.s
+#
+# Created by: Suresh Chari
+# IBM Thomas J. Watson Research Library
+# Hawthorne, NY
+#
+#
+# Description: Optimized assembly routines for OpenSSL crypto
+# on the 32 bitPowerPC platform.
+#
+#
+# Version History
+#
+# 2. Fixed bn_add,bn_sub and bn_div_words, added comments,
+# cleaned up code. Also made a single version which can
+# be used for both the AIX and Linux compilers. See NOTE
+# below.
+# 12/05/03 Suresh Chari
+# (with lots of help from) Andy Polyakov
+##
+# 1. Initial version 10/20/02 Suresh Chari
+#
+#
+# The following file works for the xlc,cc
+# and gcc compilers.
+#
+# NOTE: To get the file to link correctly with the gcc compiler
+# you have to change the names of the routines and remove
+# the first .(dot) character. This should automatically
+# be done in the build process.
+#
+# Hand optimized assembly code for the following routines
+#
+# bn_sqr_comba4
+# bn_sqr_comba8
+# bn_mul_comba4
+# bn_mul_comba8
+# bn_sub_words
+# bn_add_words
+# bn_div_words
+# bn_sqr_words
+# bn_mul_words
+# bn_mul_add_words
+#
+# NOTE: It is possible to optimize this code more for
+# specific PowerPC or Power architectures. On the Northstar
+# architecture the optimizations in this file do
+# NOT provide much improvement.
+#
+# If you have comments or suggestions to improve code send
+# me a note at schari\@us.ibm.com
+#
+#--------------------------------------------------------------------------
+#
+# Defines to be used in the assembly code.
+#
+#.set r0,0 # we use it as storage for value of 0
+#.set SP,1 # preserved
+#.set RTOC,2 # preserved
+#.set r3,3 # 1st argument/return value
+#.set r4,4 # 2nd argument/volatile register
+#.set r5,5 # 3rd argument/volatile register
+#.set r6,6 # ...
+#.set r7,7
+#.set r8,8
+#.set r9,9
+#.set r10,10
+#.set r11,11
+#.set r12,12
+#.set r13,13 # not used, nor any other "below" it...
+
+# Declare function names to be global
+# NOTE: For gcc these names MUST be changed to remove
+# the first . i.e. for example change ".bn_sqr_comba4"
+# to "bn_sqr_comba4". This should be automatically done
+# in the build.
+
+ .globl .bn_sqr_comba4
+ .globl .bn_sqr_comba8
+ .globl .bn_mul_comba4
+ .globl .bn_mul_comba8
+ .globl .bn_sub_words
+ .globl .bn_add_words
+ .globl .bn_div_words
+ .globl .bn_sqr_words
+ .globl .bn_mul_words
+ .globl .bn_mul_add_words
+
+# .text section
+
+ .machine "any"
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sqr_comba4" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_sqr_comba4:
+#
+# Optimized version of bn_sqr_comba4.
+#
+# void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+# r3 contains r
+# r4 contains a
+#
+# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
+#
+# r5,r6 are the two BN_ULONGs being multiplied.
+# r7,r8 are the results of the 32x32 giving 64 bit multiply.
+# r9,r10, r11 are the equivalents of c1,c2, c3.
+# Here's the assembly
+#
+#
+ xor r0,r0,r0 # set r0 = 0. Used in the addze
+ # instructions below
+
+ #sqr_add_c(a,0,c1,c2,c3)
+ $LD r5,`0*$BNSZ`(r4)
+ $UMULL r9,r5,r5
+ $UMULH r10,r5,r5 #in first iteration. No need
+ #to add since c1=c2=c3=0.
+ # Note c3(r11) is NOT set to 0
+ # but will be.
+
+ $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
+ # sqr_add_c2(a,1,0,c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
+ adde r8,r8,r8
+ addze r9,r0 # catch carry if any.
+ # r9= r0(=0) and carry
+
+ addc r10,r7,r10 # now add to temp result.
+ addze r11,r8 # r8 added to r11 which is 0
+ addze r9,r9
+
+ $ST r10,`1*$BNSZ`(r3) #r[1]=c2;
+ #sqr_add_c(a,1,c3,c1,c2)
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ #sqr_add_c2(a,2,0,c3,c1,c2)
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`2*$BNSZ`(r3) #r[2]=c3
+ #sqr_add_c2(a,3,0,c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r11,r0
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,2,1,c1,c2,c3);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`3*$BNSZ`(r3) #r[3]=c1
+ #sqr_add_c(a,2,c2,c3,c1);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ #sqr_add_c2(a,3,1,c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r9,r9
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`4*$BNSZ`(r3) #r[4]=c2
+ #sqr_add_c2(a,3,2,c3,c1,c2);
+ $LD r5,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r10,r0
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`5*$BNSZ`(r3) #r[5] = c3
+ #sqr_add_c(a,3,c1,c2,c3);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+
+ $ST r9,`6*$BNSZ`(r3) #r[6]=c1
+ $ST r10,`7*$BNSZ`(r3) #r[7]=c2
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sqr_comba8" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_sqr_comba8:
+#
+# This is an optimized version of the bn_sqr_comba8 routine.
+# Tightly uses the adde instruction
+#
+#
+# void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+# r3 contains r
+# r4 contains a
+#
+# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
+#
+# r5,r6 are the two BN_ULONGs being multiplied.
+# r7,r8 are the results of the 32x32 giving 64 bit multiply.
+# r9,r10, r11 are the equivalents of c1,c2, c3.
+#
+# Possible optimization of loading all 8 longs of a into registers
+# doesnt provide any speedup
+#
+
+ xor r0,r0,r0 #set r0 = 0.Used in addze
+ #instructions below.
+
+ #sqr_add_c(a,0,c1,c2,c3);
+ $LD r5,`0*$BNSZ`(r4)
+ $UMULL r9,r5,r5 #1st iteration: no carries.
+ $UMULH r10,r5,r5
+ $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
+ #sqr_add_c2(a,1,0,c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10 #add the two register number
+ adde r11,r8,r0 # (r8,r7) to the three register
+ addze r9,r0 # number (r9,r11,r10).NOTE:r0=0
+
+ addc r10,r7,r10 #add the two register number
+ adde r11,r8,r11 # (r8,r7) to the three register
+ addze r9,r9 # number (r9,r11,r10).
+
+ $ST r10,`1*$BNSZ`(r3) # r[1]=c2
+
+ #sqr_add_c(a,1,c3,c1,c2);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ #sqr_add_c2(a,2,0,c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ $ST r11,`2*$BNSZ`(r3) #r[2]=c3
+ #sqr_add_c2(a,3,0,c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0].
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,2,1,c1,c2,c3);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ $ST r9,`3*$BNSZ`(r3) #r[3]=c1;
+ #sqr_add_c(a,2,c2,c3,c1);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ #sqr_add_c2(a,3,1,c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,4,0,c2,c3,c1);
+ $LD r5,`0*$BNSZ`(r4)
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`4*$BNSZ`(r3) #r[4]=c2;
+ #sqr_add_c2(a,5,0,c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,4,1,c3,c1,c2);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,3,2,c3,c1,c2);
+ $LD r5,`2*$BNSZ`(r4)
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`5*$BNSZ`(r3) #r[5]=c3;
+ #sqr_add_c(a,3,c1,c2,c3);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+ #sqr_add_c2(a,4,2,c1,c2,c3);
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,5,1,c1,c2,c3);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,6,0,c1,c2,c3);
+ $LD r5,`0*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`6*$BNSZ`(r3) #r[6]=c1;
+ #sqr_add_c2(a,7,0,c2,c3,c1);
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,6,1,c2,c3,c1);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,5,2,c2,c3,c1);
+ $LD r5,`2*$BNSZ`(r4)
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,4,3,c2,c3,c1);
+ $LD r5,`3*$BNSZ`(r4)
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`7*$BNSZ`(r3) #r[7]=c2;
+ #sqr_add_c(a,4,c3,c1,c2);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ #sqr_add_c2(a,5,3,c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,6,2,c3,c1,c2);
+ $LD r5,`2*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,7,1,c3,c1,c2);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`8*$BNSZ`(r3) #r[8]=c3;
+ #sqr_add_c2(a,7,2,c1,c2,c3);
+ $LD r5,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,6,3,c1,c2,c3);
+ $LD r5,`3*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,5,4,c1,c2,c3);
+ $LD r5,`4*$BNSZ`(r4)
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`9*$BNSZ`(r3) #r[9]=c1;
+ #sqr_add_c(a,5,c2,c3,c1);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ #sqr_add_c2(a,6,4,c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,7,3,c2,c3,c1);
+ $LD r5,`3*$BNSZ`(r4)
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`10*$BNSZ`(r3) #r[10]=c2;
+ #sqr_add_c2(a,7,4,c3,c1,c2);
+ $LD r5,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,6,5,c3,c1,c2);
+ $LD r5,`5*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`11*$BNSZ`(r3) #r[11]=c3;
+ #sqr_add_c(a,6,c1,c2,c3);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+ #sqr_add_c2(a,7,5,c1,c2,c3)
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`12*$BNSZ`(r3) #r[12]=c1;
+
+ #sqr_add_c2(a,7,6,c2,c3,c1)
+ $LD r5,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`13*$BNSZ`(r3) #r[13]=c2;
+ #sqr_add_c(a,7,c3,c1,c2);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ $ST r11,`14*$BNSZ`(r3) #r[14]=c3;
+ $ST r9, `15*$BNSZ`(r3) #r[15]=c1;
+
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_comba4" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_comba4:
+#
+# This is an optimized version of the bn_mul_comba4 routine.
+#
+# void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+# r3 contains r
+# r4 contains a
+# r5 contains b
+# r6, r7 are the 2 BN_ULONGs being multiplied.
+# r8, r9 are the results of the 32x32 giving 64 multiply.
+# r10, r11, r12 are the equivalents of c1, c2, and c3.
+#
+ xor r0,r0,r0 #r0=0. Used in addze below.
+ #mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r10,r6,r7
+ $UMULH r11,r6,r7
+ $ST r10,`0*$BNSZ`(r3) #r[0]=c1
+ #mul_add_c(a[0],b[1],c2,c3,c1);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r0
+ addze r10,r0
+ #mul_add_c(a[1],b[0],c2,c3,c1);
+ $LD r6, `1*$BNSZ`(r4)
+ $LD r7, `0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r10
+ $ST r11,`1*$BNSZ`(r3) #r[1]=c2
+ #mul_add_c(a[2],b[0],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r0
+ #mul_add_c(a[1],b[1],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r11
+ #mul_add_c(a[0],b[2],c3,c1,c2);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r11
+ $ST r12,`2*$BNSZ`(r3) #r[2]=c3
+ #mul_add_c(a[0],b[3],c1,c2,c3);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r0
+ #mul_add_c(a[1],b[2],c1,c2,c3);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r12
+ #mul_add_c(a[2],b[1],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r12
+ #mul_add_c(a[3],b[0],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r12
+ $ST r10,`3*$BNSZ`(r3) #r[3]=c1
+ #mul_add_c(a[3],b[1],c2,c3,c1);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r0
+ #mul_add_c(a[2],b[2],c2,c3,c1);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r10
+ #mul_add_c(a[1],b[3],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r10
+ $ST r11,`4*$BNSZ`(r3) #r[4]=c2
+ #mul_add_c(a[2],b[3],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r0
+ #mul_add_c(a[3],b[2],c3,c1,c2);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r11
+ $ST r12,`5*$BNSZ`(r3) #r[5]=c3
+ #mul_add_c(a[3],b[3],c1,c2,c3);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+
+ $ST r10,`6*$BNSZ`(r3) #r[6]=c1
+ $ST r11,`7*$BNSZ`(r3) #r[7]=c2
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_comba8" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_comba8:
+#
+# Optimized version of the bn_mul_comba8 routine.
+#
+# void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+# r3 contains r
+# r4 contains a
+# r5 contains b
+# r6, r7 are the 2 BN_ULONGs being multiplied.
+# r8, r9 are the results of the 32x32 giving 64 multiply.
+# r10, r11, r12 are the equivalents of c1, c2, and c3.
+#
+ xor r0,r0,r0 #r0=0. Used in addze below.
+
+ #mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD r6,`0*$BNSZ`(r4) #a[0]
+ $LD r7,`0*$BNSZ`(r5) #b[0]
+ $UMULL r10,r6,r7
+ $UMULH r11,r6,r7
+ $ST r10,`0*$BNSZ`(r3) #r[0]=c1;
+ #mul_add_c(a[0],b[1],c2,c3,c1);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ addze r12,r9 # since we didnt set r12 to zero before.
+ addze r10,r0
+ #mul_add_c(a[1],b[0],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`1*$BNSZ`(r3) #r[1]=c2;
+ #mul_add_c(a[2],b[0],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[1],b[1],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[0],b[2],c3,c1,c2);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`2*$BNSZ`(r3) #r[2]=c3;
+ #mul_add_c(a[0],b[3],c1,c2,c3);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[1],b[2],c1,c2,c3);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+
+ #mul_add_c(a[2],b[1],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[3],b[0],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`3*$BNSZ`(r3) #r[3]=c1;
+ #mul_add_c(a[4],b[0],c2,c3,c1);
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[3],b[1],c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[2],b[2],c2,c3,c1);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[1],b[3],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[0],b[4],c2,c3,c1);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`4*$BNSZ`(r3) #r[4]=c2;
+ #mul_add_c(a[0],b[5],c3,c1,c2);
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[1],b[4],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[2],b[3],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[3],b[2],c3,c1,c2);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[4],b[1],c3,c1,c2);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[5],b[0],c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`5*$BNSZ`(r3) #r[5]=c3;
+ #mul_add_c(a[6],b[0],c1,c2,c3);
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[5],b[1],c1,c2,c3);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[4],b[2],c1,c2,c3);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[3],b[3],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[2],b[4],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[1],b[5],c1,c2,c3);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[0],b[6],c1,c2,c3);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`6*$BNSZ`(r3) #r[6]=c1;
+ #mul_add_c(a[0],b[7],c2,c3,c1);
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[1],b[6],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[2],b[5],c2,c3,c1);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[3],b[4],c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[4],b[3],c2,c3,c1);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[5],b[2],c2,c3,c1);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[6],b[1],c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[7],b[0],c2,c3,c1);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`7*$BNSZ`(r3) #r[7]=c2;
+ #mul_add_c(a[7],b[1],c3,c1,c2);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[6],b[2],c3,c1,c2);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[5],b[3],c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[4],b[4],c3,c1,c2);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[3],b[5],c3,c1,c2);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[2],b[6],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[1],b[7],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`8*$BNSZ`(r3) #r[8]=c3;
+ #mul_add_c(a[2],b[7],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[3],b[6],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[4],b[5],c1,c2,c3);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[5],b[4],c1,c2,c3);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[6],b[3],c1,c2,c3);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[7],b[2],c1,c2,c3);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`9*$BNSZ`(r3) #r[9]=c1;
+ #mul_add_c(a[7],b[3],c2,c3,c1);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[6],b[4],c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[5],b[5],c2,c3,c1);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[4],b[6],c2,c3,c1);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[3],b[7],c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`10*$BNSZ`(r3) #r[10]=c2;
+ #mul_add_c(a[4],b[7],c3,c1,c2);
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[5],b[6],c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[6],b[5],c3,c1,c2);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[7],b[4],c3,c1,c2);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`11*$BNSZ`(r3) #r[11]=c3;
+ #mul_add_c(a[7],b[5],c1,c2,c3);
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[6],b[6],c1,c2,c3);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[5],b[7],c1,c2,c3);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`12*$BNSZ`(r3) #r[12]=c1;
+ #mul_add_c(a[6],b[7],c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[7],b[6],c2,c3,c1);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`13*$BNSZ`(r3) #r[13]=c2;
+ #mul_add_c(a[7],b[7],c3,c1,c2);
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ $ST r12,`14*$BNSZ`(r3) #r[14]=c3;
+ $ST r10,`15*$BNSZ`(r3) #r[15]=c1;
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sub_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+#
+.align 4
+.bn_sub_words:
+#
+# Handcoded version of bn_sub_words
+#
+#BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+#
+# r3 = r
+# r4 = a
+# r5 = b
+# r6 = n
+#
+# Note: No loop unrolling done since this is not a performance
+# critical loop.
+
+ xor r0,r0,r0 #set r0 = 0
+#
+# check for r6 = 0 AND set carry bit.
+#
+ subfc. r7,r0,r6 # If r6 is 0 then result is 0.
+ # if r6 > 0 then result !=0
+ # In either case carry bit is set.
+ beq Lppcasm_sub_adios
+ addi r4,r4,-$BNSZ
+ addi r3,r3,-$BNSZ
+ addi r5,r5,-$BNSZ
+ mtctr r6
+Lppcasm_sub_mainloop:
+ $LDU r7,$BNSZ(r4)
+ $LDU r8,$BNSZ(r5)
+ subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8)
+ # if carry = 1 this is r7-r8. Else it
+ # is r7-r8 -1 as we need.
+ $STU r6,$BNSZ(r3)
+ bdnz- Lppcasm_sub_mainloop
+Lppcasm_sub_adios:
+ subfze r3,r0 # if carry bit is set then r3 = 0 else -1
+ andi. r3,r3,1 # keep only last bit.
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
+
+#
+# NOTE: The following label name should be changed to
+# "bn_add_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_add_words:
+#
+# Handcoded version of bn_add_words
+#
+#BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+#
+# r3 = r
+# r4 = a
+# r5 = b
+# r6 = n
+#
+# Note: No loop unrolling done since this is not a performance
+# critical loop.
+
+ xor r0,r0,r0
+#
+# check for r6 = 0. Is this needed?
+#
+ addic. r6,r6,0 #test r6 and clear carry bit.
+ beq Lppcasm_add_adios
+ addi r4,r4,-$BNSZ
+ addi r3,r3,-$BNSZ
+ addi r5,r5,-$BNSZ
+ mtctr r6
+Lppcasm_add_mainloop:
+ $LDU r7,$BNSZ(r4)
+ $LDU r8,$BNSZ(r5)
+ adde r8,r7,r8
+ $STU r8,$BNSZ(r3)
+ bdnz- Lppcasm_add_mainloop
+Lppcasm_add_adios:
+ addze r3,r0 #return carry bit.
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
+
+#
+# NOTE: The following label name should be changed to
+# "bn_div_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_div_words:
+#
+# This is a cleaned up version of code generated by
+# the AIX compiler. The only optimization is to use
+# the PPC instruction to count leading zeros instead
+# of call to num_bits_word. Since this was compiled
+# only at level -O2 we can possibly squeeze it more?
+#
+# r3 = h
+# r4 = l
+# r5 = d
+
+ $UCMPI 0,r5,0 # compare r5 and 0
+ bne Lppcasm_div1 # proceed if d!=0
+ li r3,-1 # d=0 return -1
+ blr
+Lppcasm_div1:
+ xor r0,r0,r0 #r0=0
+ li r8,$BITS
+ $CNTLZ. r7,r5 #r7 = num leading 0s in d.
+ beq Lppcasm_div2 #proceed if no leading zeros
+ subf r8,r7,r8 #r8 = BN_num_bits_word(d)
+ $SHR. r9,r3,r8 #are there any bits above r8'th?
+ $TR 16,r9,r0 #if there're, signal to dump core...
+Lppcasm_div2:
+ $UCMP 0,r3,r5 #h>=d?
+ blt Lppcasm_div3 #goto Lppcasm_div3 if not
+ subf r3,r5,r3 #h-=d ;
+Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i
+ cmpi 0,0,r7,0 # is (i == 0)?
+ beq Lppcasm_div4
+ $SHL r3,r3,r7 # h = (h<< i)
+ $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i)
+ $SHL r5,r5,r7 # d<<=i
+ or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i))
+ $SHL r4,r4,r7 # l <<=i
+Lppcasm_div4:
+ $SHRI r9,r5,`$BITS/2` # r9 = dh
+ # dl will be computed when needed
+ # as it saves registers.
+ li r6,2 #r6=2
+ mtctr r6 #counter will be in count.
+Lppcasm_divouterloop:
+ $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4)
+ $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4
+ # compute here for innerloop.
+ $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh
+ bne Lppcasm_div5 # goto Lppcasm_div5 if not
+
+ li r8,-1
+ $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l
+ b Lppcasm_div6
+Lppcasm_div5:
+ $UDIV r8,r3,r9 #q = h/dh
+Lppcasm_div6:
+ $UMULL r12,r9,r8 #th = q*dh
+ $CLRU r10,r5,`$BITS/2` #r10=dl
+ $UMULL r6,r8,r10 #tl = q*dl
+
+Lppcasm_divinnerloop:
+ subf r10,r12,r3 #t = h -th
+ $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of...
+ addic. r7,r7,0 #test if r7 == 0. used below.
+ # now want to compute
+ # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
+ # the following 2 instructions do that
+ $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4)
+ or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4)
+ $UCMP cr1,r6,r7 # compare (tl <= r7)
+ bne Lppcasm_divinnerexit
+ ble cr1,Lppcasm_divinnerexit
+ addi r8,r8,-1 #q--
+ subf r12,r9,r12 #th -=dh
+ $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop.
+ subf r6,r10,r6 #tl -=dl
+ b Lppcasm_divinnerloop
+Lppcasm_divinnerexit:
+ $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4)
+ $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h;
+ $UCMP cr1,r4,r11 # compare l and tl
+ add r12,r12,r10 # th+=t
+ bge cr1,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
+ addi r12,r12,1 # th++
+Lppcasm_div7:
+ subf r11,r11,r4 #r11=l-tl
+ $UCMP cr1,r3,r12 #compare h and th
+ bge cr1,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
+ addi r8,r8,-1 # q--
+ add r3,r5,r3 # h+=d
+Lppcasm_div8:
+ subf r12,r12,r3 #r12 = h-th
+ $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4
+ # want to compute
+ # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
+ # the following 2 instructions will do this.
+ $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2.
+ $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3
+ bdz Lppcasm_div9 #if (count==0) break ;
+ $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4
+ b Lppcasm_divouterloop
+Lppcasm_div9:
+ or r3,r8,r0
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sqr_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+.align 4
+.bn_sqr_words:
+#
+# Optimized version of bn_sqr_words
+#
+# void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
+#
+# r3 = r
+# r4 = a
+# r5 = n
+#
+# r6 = a[i].
+# r7,r8 = product.
+#
+# No unrolling done here. Not performance critical.
+
+ addic. r5,r5,0 #test r5.
+ beq Lppcasm_sqr_adios
+ addi r4,r4,-$BNSZ
+ addi r3,r3,-$BNSZ
+ mtctr r5
+Lppcasm_sqr_mainloop:
+ #sqr(r[0],r[1],a[0]);
+ $LDU r6,$BNSZ(r4)
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ $STU r7,$BNSZ(r3)
+ $STU r8,$BNSZ(r3)
+ bdnz- Lppcasm_sqr_mainloop
+Lppcasm_sqr_adios:
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_words:
+#
+# BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+#
+# r3 = rp
+# r4 = ap
+# r5 = num
+# r6 = w
+ xor r0,r0,r0
+ xor r12,r12,r12 # used for carry
+ rlwinm. r7,r5,30,2,31 # num >> 2
+ beq Lppcasm_mw_REM
+ mtctr r7
+Lppcasm_mw_LOOP:
+ #mul(rp[0],ap[0],w,c1);
+ $LD r8,`0*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ #addze r10,r10 #carry is NOT ignored.
+ #will be taken care of
+ #in second spin below
+ #using adde.
+ $ST r9,`0*$BNSZ`(r3)
+ #mul(rp[1],ap[1],w,c1);
+ $LD r8,`1*$BNSZ`(r4)
+ $UMULL r11,r6,r8
+ $UMULH r12,r6,r8
+ adde r11,r11,r10
+ #addze r12,r12
+ $ST r11,`1*$BNSZ`(r3)
+ #mul(rp[2],ap[2],w,c1);
+ $LD r8,`2*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ adde r9,r9,r12
+ #addze r10,r10
+ $ST r9,`2*$BNSZ`(r3)
+ #mul_add(rp[3],ap[3],w,c1);
+ $LD r8,`3*$BNSZ`(r4)
+ $UMULL r11,r6,r8
+ $UMULH r12,r6,r8
+ adde r11,r11,r10
+ addze r12,r12 #this spin we collect carry into
+ #r12
+ $ST r11,`3*$BNSZ`(r3)
+
+ addi r3,r3,`4*$BNSZ`
+ addi r4,r4,`4*$BNSZ`
+ bdnz- Lppcasm_mw_LOOP
+
+Lppcasm_mw_REM:
+ andi. r5,r5,0x3
+ beq Lppcasm_mw_OVER
+ #mul(rp[0],ap[0],w,c1);
+ $LD r8,`0*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ addze r10,r10
+ $ST r9,`0*$BNSZ`(r3)
+ addi r12,r10,0
+
+ addi r5,r5,-1
+ cmpli 0,0,r5,0
+ beq Lppcasm_mw_OVER
+
+
+ #mul(rp[1],ap[1],w,c1);
+ $LD r8,`1*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ addze r10,r10
+ $ST r9,`1*$BNSZ`(r3)
+ addi r12,r10,0
+
+ addi r5,r5,-1
+ cmpli 0,0,r5,0
+ beq Lppcasm_mw_OVER
+
+ #mul_add(rp[2],ap[2],w,c1);
+ $LD r8,`2*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ addze r10,r10
+ $ST r9,`2*$BNSZ`(r3)
+ addi r12,r10,0
+
+Lppcasm_mw_OVER:
+ addi r3,r12,0
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_add_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_add_words:
+#
+# BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+#
+# r3 = rp
+# r4 = ap
+# r5 = num
+# r6 = w
+#
+# empirical evidence suggests that unrolled version performs best!!
+#
+ xor r0,r0,r0 #r0 = 0
+ xor r12,r12,r12 #r12 = 0 . used for carry
+ rlwinm. r7,r5,30,2,31 # num >> 2
+ beq Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
+ mtctr r7
+Lppcasm_maw_mainloop:
+ #mul_add(rp[0],ap[0],w,c1);
+ $LD r8,`0*$BNSZ`(r4)
+ $LD r11,`0*$BNSZ`(r3)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12 #r12 is carry.
+ addze r10,r10
+ addc r9,r9,r11
+ #addze r10,r10
+ #the above instruction addze
+ #is NOT needed. Carry will NOT
+ #be ignored. It's not affected
+ #by multiply and will be collected
+ #in the next spin
+ $ST r9,`0*$BNSZ`(r3)
+
+ #mul_add(rp[1],ap[1],w,c1);
+ $LD r8,`1*$BNSZ`(r4)
+ $LD r9,`1*$BNSZ`(r3)
+ $UMULL r11,r6,r8
+ $UMULH r12,r6,r8
+ adde r11,r11,r10 #r10 is carry.
+ addze r12,r12
+ addc r11,r11,r9
+ #addze r12,r12
+ $ST r11,`1*$BNSZ`(r3)
+
+ #mul_add(rp[2],ap[2],w,c1);
+ $LD r8,`2*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $LD r11,`2*$BNSZ`(r3)
+ $UMULH r10,r6,r8
+ adde r9,r9,r12
+ addze r10,r10
+ addc r9,r9,r11
+ #addze r10,r10
+ $ST r9,`2*$BNSZ`(r3)
+
+ #mul_add(rp[3],ap[3],w,c1);
+ $LD r8,`3*$BNSZ`(r4)
+ $UMULL r11,r6,r8
+ $LD r9,`3*$BNSZ`(r3)
+ $UMULH r12,r6,r8
+ adde r11,r11,r10
+ addze r12,r12
+ addc r11,r11,r9
+ addze r12,r12
+ $ST r11,`3*$BNSZ`(r3)
+ addi r3,r3,`4*$BNSZ`
+ addi r4,r4,`4*$BNSZ`
+ bdnz- Lppcasm_maw_mainloop
+
+Lppcasm_maw_leftover:
+ andi. r5,r5,0x3
+ beq Lppcasm_maw_adios
+ addi r3,r3,-$BNSZ
+ addi r4,r4,-$BNSZ
+ #mul_add(rp[0],ap[0],w,c1);
+ mtctr r5
+ $LDU r8,$BNSZ(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ $LDU r11,$BNSZ(r3)
+ addc r9,r9,r11
+ addze r10,r10
+ addc r9,r9,r12
+ addze r12,r10
+ $ST r9,0(r3)
+
+ bdz Lppcasm_maw_adios
+ #mul_add(rp[1],ap[1],w,c1);
+ $LDU r8,$BNSZ(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ $LDU r11,$BNSZ(r3)
+ addc r9,r9,r11
+ addze r10,r10
+ addc r9,r9,r12
+ addze r12,r10
+ $ST r9,0(r3)
+
+ bdz Lppcasm_maw_adios
+ #mul_add(rp[2],ap[2],w,c1);
+ $LDU r8,$BNSZ(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ $LDU r11,$BNSZ(r3)
+ addc r9,r9,r11
+ addze r10,r10
+ addc r9,r9,r12
+ addze r12,r10
+ $ST r9,0(r3)
+
+Lppcasm_maw_adios:
+ addi r3,r12,0
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
+ .align 4
+EOF
+$data =~ s/\`([^\`]*)\`/eval $1/gem;
+print $data;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc64-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc64-mont.pl
new file mode 100644
index 00000000..a14e769a
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/ppc64-mont.pl
@@ -0,0 +1,1088 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# December 2007
+
+# The reason for undertaken effort is basically following. Even though
+# Power 6 CPU operates at incredible 4.7GHz clock frequency, its PKI
+# performance was observed to be less than impressive, essentially as
+# fast as 1.8GHz PPC970, or 2.6 times(!) slower than one would hope.
+# Well, it's not surprising that IBM had to make some sacrifices to
+# boost the clock frequency that much, but no overall improvement?
+# Having observed how much difference did switching to FPU make on
+# UltraSPARC, playing same stunt on Power 6 appeared appropriate...
+# Unfortunately the resulting performance improvement is not as
+# impressive, ~30%, and in absolute terms is still very far from what
+# one would expect from 4.7GHz CPU. There is a chance that I'm doing
+# something wrong, but in the lack of assembler level micro-profiling
+# data or at least decent platform guide I can't tell... Or better
+# results might be achieved with VMX... Anyway, this module provides
+# *worse* performance on other PowerPC implementations, ~40-15% slower
+# on PPC970 depending on key length and ~40% slower on Power 5 for all
+# key lengths. As it's obviously inappropriate as "best all-round"
+# alternative, it has to be complemented with run-time CPU family
+# detection. Oh! It should also be noted that unlike other PowerPC
+# implementation IALU ppc-mont.pl module performs *suboptimaly* on
+# >=1024-bit key lengths on Power 6. It should also be noted that
+# *everything* said so far applies to 64-bit builds! As far as 32-bit
+# application executed on 64-bit CPU goes, this module is likely to
+# become preferred choice, because it's easy to adapt it for such
+# case and *is* faster than 32-bit ppc-mont.pl on *all* processors.
+
+# February 2008
+
+# Micro-profiling assisted optimization results in ~15% improvement
+# over original ppc64-mont.pl version, or overall ~50% improvement
+# over ppc.pl module on Power 6. If compared to ppc-mont.pl on same
+# Power 6 CPU, this module is 5-150% faster depending on key length,
+# [hereafter] more for longer keys. But if compared to ppc-mont.pl
+# on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive
+# in absolute terms, but it's apparently the way Power 6 is...
+
+# December 2009
+
+# Adapted for 32-bit build this module delivers 25-120%, yes, more
+# than *twice* for longer keys, performance improvement over 32-bit
+# ppc-mont.pl on 1.8GHz PPC970. However! This implementation utilizes
+# even 64-bit integer operations and the trouble is that most PPC
+# operating systems don't preserve upper halves of general purpose
+# registers upon 32-bit signal delivery. They do preserve them upon
+# context switch, but not signalling:-( This means that asynchronous
+# signals have to be blocked upon entry to this subroutine. Signal
+# masking (and of course complementary unmasking) has quite an impact
+# on performance, naturally larger for shorter keys. It's so severe
+# that 512-bit key performance can be as low as 1/3 of expected one.
+# This is why this routine can be engaged for longer key operations
+# only on these OSes, see crypto/ppccap.c for further details. MacOS X
+# is an exception from this and doesn't require signal masking, and
+# that's where above improvement coefficients were collected. For
+# others alternative would be to break dependence on upper halves of
+# GPRs by sticking to 32-bit integer operations...
+
+$flavour = shift;
+
+if ($flavour =~ /32/) {
+ $SIZE_T=4;
+ $RZONE= 224;
+ $fname= "bn_mul_mont_fpu64";
+
+ $STUX= "stwux"; # store indexed and update
+ $PUSH= "stw";
+ $POP= "lwz";
+} elsif ($flavour =~ /64/) {
+ $SIZE_T=8;
+ $RZONE= 288;
+ $fname= "bn_mul_mont_fpu64";
+
+ # same as above, but 64-bit mnemonics...
+ $STUX= "stdux"; # store indexed and update
+ $PUSH= "std";
+ $POP= "ld";
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=64; # padded frame header
+$TRANSFER=16*8;
+
+$carry="r0";
+$sp="r1";
+$toc="r2";
+$rp="r3"; $ovf="r3";
+$ap="r4";
+$bp="r5";
+$np="r6";
+$n0="r7";
+$num="r8";
+$rp="r9"; # $rp is reassigned
+$tp="r10";
+$j="r11";
+$i="r12";
+# non-volatile registers
+$nap_d="r22"; # interleaved ap and np in double format
+$a0="r23"; # ap[0]
+$t0="r24"; # temporary registers
+$t1="r25";
+$t2="r26";
+$t3="r27";
+$t4="r28";
+$t5="r29";
+$t6="r30";
+$t7="r31";
+
+# PPC offers enough register bank capacity to unroll inner loops twice
+#
+# ..A3A2A1A0
+# dcba
+# -----------
+# A0a
+# A0b
+# A0c
+# A0d
+# A1a
+# A1b
+# A1c
+# A1d
+# A2a
+# A2b
+# A2c
+# A2d
+# A3a
+# A3b
+# A3c
+# A3d
+# ..a
+# ..b
+#
+$ba="f0"; $bb="f1"; $bc="f2"; $bd="f3";
+$na="f4"; $nb="f5"; $nc="f6"; $nd="f7";
+$dota="f8"; $dotb="f9";
+$A0="f10"; $A1="f11"; $A2="f12"; $A3="f13";
+$N0="f20"; $N1="f21"; $N2="f22"; $N3="f23";
+$T0a="f24"; $T0b="f25";
+$T1a="f26"; $T1b="f27";
+$T2a="f28"; $T2b="f29";
+$T3a="f30"; $T3b="f31";
+
+# sp----------->+-------------------------------+
+# | saved sp |
+# +-------------------------------+
+# . .
+# +64 +-------------------------------+
+# | 16 gpr<->fpr transfer zone |
+# . .
+# . .
+# +16*8 +-------------------------------+
+# | __int64 tmp[-1] |
+# +-------------------------------+
+# | __int64 tmp[num] |
+# . .
+# . .
+# . .
+# +(num+1)*8 +-------------------------------+
+# | padding to 64 byte boundary |
+# . .
+# +X +-------------------------------+
+# | double nap_d[4*num] |
+# . .
+# . .
+# . .
+# +-------------------------------+
+# . .
+# -12*size_t +-------------------------------+
+# | 10 saved gpr, r22-r31 |
+# . .
+# . .
+# -12*8 +-------------------------------+
+# | 12 saved fpr, f20-f31 |
+# . .
+# . .
+# +-------------------------------+
+
+$code=<<___;
+.machine "any"
+.text
+
+.globl .$fname
+.align 5
+.$fname:
+ cmpwi $num,`3*8/$SIZE_T`
+ mr $rp,r3 ; $rp is reassigned
+ li r3,0 ; possible "not handled" return code
+ bltlr-
+ andi. r0,$num,`16/$SIZE_T-1` ; $num has to be "even"
+ bnelr-
+
+ slwi $num,$num,`log($SIZE_T)/log(2)` ; num*=sizeof(BN_LONG)
+ li $i,-4096
+ slwi $tp,$num,2 ; place for {an}p_{lh}[num], i.e. 4*num
+ add $tp,$tp,$num ; place for tp[num+1]
+ addi $tp,$tp,`$FRAME+$TRANSFER+8+64+$RZONE`
+ subf $tp,$tp,$sp ; $sp-$tp
+ and $tp,$tp,$i ; minimize TLB usage
+ subf $tp,$sp,$tp ; $tp-$sp
+ mr $i,$sp
+ $STUX $sp,$sp,$tp ; alloca
+
+ $PUSH r22,`-12*8-10*$SIZE_T`($i)
+ $PUSH r23,`-12*8-9*$SIZE_T`($i)
+ $PUSH r24,`-12*8-8*$SIZE_T`($i)
+ $PUSH r25,`-12*8-7*$SIZE_T`($i)
+ $PUSH r26,`-12*8-6*$SIZE_T`($i)
+ $PUSH r27,`-12*8-5*$SIZE_T`($i)
+ $PUSH r28,`-12*8-4*$SIZE_T`($i)
+ $PUSH r29,`-12*8-3*$SIZE_T`($i)
+ $PUSH r30,`-12*8-2*$SIZE_T`($i)
+ $PUSH r31,`-12*8-1*$SIZE_T`($i)
+ stfd f20,`-12*8`($i)
+ stfd f21,`-11*8`($i)
+ stfd f22,`-10*8`($i)
+ stfd f23,`-9*8`($i)
+ stfd f24,`-8*8`($i)
+ stfd f25,`-7*8`($i)
+ stfd f26,`-6*8`($i)
+ stfd f27,`-5*8`($i)
+ stfd f28,`-4*8`($i)
+ stfd f29,`-3*8`($i)
+ stfd f30,`-2*8`($i)
+ stfd f31,`-1*8`($i)
+___
+$code.=<<___ if ($SIZE_T==8);
+ ld $a0,0($ap) ; pull ap[0] value
+ ld $n0,0($n0) ; pull n0[0] value
+ ld $t3,0($bp) ; bp[0]
+___
+$code.=<<___ if ($SIZE_T==4);
+ mr $t1,$n0
+ lwz $a0,0($ap) ; pull ap[0,1] value
+ lwz $t0,4($ap)
+ lwz $n0,0($t1) ; pull n0[0,1] value
+ lwz $t1,4($t1)
+ lwz $t3,0($bp) ; bp[0,1]
+ lwz $t2,4($bp)
+ insrdi $a0,$t0,32,0
+ insrdi $n0,$t1,32,0
+ insrdi $t3,$t2,32,0
+___
+$code.=<<___;
+ addi $tp,$sp,`$FRAME+$TRANSFER+8+64`
+ li $i,-64
+ add $nap_d,$tp,$num
+ and $nap_d,$nap_d,$i ; align to 64 bytes
+
+ mulld $t7,$a0,$t3 ; ap[0]*bp[0]
+ ; nap_d is off by 1, because it's used with stfdu/lfdu
+ addi $nap_d,$nap_d,-8
+ srwi $j,$num,`3+1` ; counter register, num/2
+ mulld $t7,$t7,$n0 ; tp[0]*n0
+ addi $j,$j,-1
+ addi $tp,$sp,`$FRAME+$TRANSFER-8`
+ li $carry,0
+ mtctr $j
+
+ ; transfer bp[0] to FPU as 4x16-bit values
+ extrdi $t0,$t3,16,48
+ extrdi $t1,$t3,16,32
+ extrdi $t2,$t3,16,16
+ extrdi $t3,$t3,16,0
+ std $t0,`$FRAME+0`($sp)
+ std $t1,`$FRAME+8`($sp)
+ std $t2,`$FRAME+16`($sp)
+ std $t3,`$FRAME+24`($sp)
+ ; transfer (ap[0]*bp[0])*n0 to FPU as 4x16-bit values
+ extrdi $t4,$t7,16,48
+ extrdi $t5,$t7,16,32
+ extrdi $t6,$t7,16,16
+ extrdi $t7,$t7,16,0
+ std $t4,`$FRAME+32`($sp)
+ std $t5,`$FRAME+40`($sp)
+ std $t6,`$FRAME+48`($sp)
+ std $t7,`$FRAME+56`($sp)
+___
+$code.=<<___ if ($SIZE_T==8);
+ lwz $t0,4($ap) ; load a[j] as 32-bit word pair
+ lwz $t1,0($ap)
+ lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair
+ lwz $t3,8($ap)
+ lwz $t4,4($np) ; load n[j] as 32-bit word pair
+ lwz $t5,0($np)
+ lwz $t6,12($np) ; load n[j+1] as 32-bit word pair
+ lwz $t7,8($np)
+___
+$code.=<<___ if ($SIZE_T==4);
+ lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs
+ lwz $t1,4($ap)
+ lwz $t2,8($ap)
+ lwz $t3,12($ap)
+ lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
+ lwz $t5,4($np)
+ lwz $t6,8($np)
+ lwz $t7,12($np)
+___
+$code.=<<___;
+ lfd $ba,`$FRAME+0`($sp)
+ lfd $bb,`$FRAME+8`($sp)
+ lfd $bc,`$FRAME+16`($sp)
+ lfd $bd,`$FRAME+24`($sp)
+ lfd $na,`$FRAME+32`($sp)
+ lfd $nb,`$FRAME+40`($sp)
+ lfd $nc,`$FRAME+48`($sp)
+ lfd $nd,`$FRAME+56`($sp)
+ std $t0,`$FRAME+64`($sp)
+ std $t1,`$FRAME+72`($sp)
+ std $t2,`$FRAME+80`($sp)
+ std $t3,`$FRAME+88`($sp)
+ std $t4,`$FRAME+96`($sp)
+ std $t5,`$FRAME+104`($sp)
+ std $t6,`$FRAME+112`($sp)
+ std $t7,`$FRAME+120`($sp)
+ fcfid $ba,$ba
+ fcfid $bb,$bb
+ fcfid $bc,$bc
+ fcfid $bd,$bd
+ fcfid $na,$na
+ fcfid $nb,$nb
+ fcfid $nc,$nc
+ fcfid $nd,$nd
+
+ lfd $A0,`$FRAME+64`($sp)
+ lfd $A1,`$FRAME+72`($sp)
+ lfd $A2,`$FRAME+80`($sp)
+ lfd $A3,`$FRAME+88`($sp)
+ lfd $N0,`$FRAME+96`($sp)
+ lfd $N1,`$FRAME+104`($sp)
+ lfd $N2,`$FRAME+112`($sp)
+ lfd $N3,`$FRAME+120`($sp)
+ fcfid $A0,$A0
+ fcfid $A1,$A1
+ fcfid $A2,$A2
+ fcfid $A3,$A3
+ fcfid $N0,$N0
+ fcfid $N1,$N1
+ fcfid $N2,$N2
+ fcfid $N3,$N3
+ addi $ap,$ap,16
+ addi $np,$np,16
+
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ stfd $A0,8($nap_d) ; save a[j] in double format
+ stfd $A1,16($nap_d)
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ stfd $A2,24($nap_d) ; save a[j+1] in double format
+ stfd $A3,32($nap_d)
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ stfd $N0,40($nap_d) ; save n[j] in double format
+ stfd $N1,48($nap_d)
+ fmul $T0a,$A0,$ba
+ fmul $T0b,$A0,$bb
+ stfd $N2,56($nap_d) ; save n[j+1] in double format
+ stfdu $N3,64($nap_d)
+
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+
+.align 5
+L1st:
+___
+$code.=<<___ if ($SIZE_T==8);
+ lwz $t0,4($ap) ; load a[j] as 32-bit word pair
+ lwz $t1,0($ap)
+ lwz $t2,12($ap) ; load a[j+1] as 32-bit word pair
+ lwz $t3,8($ap)
+ lwz $t4,4($np) ; load n[j] as 32-bit word pair
+ lwz $t5,0($np)
+ lwz $t6,12($np) ; load n[j+1] as 32-bit word pair
+ lwz $t7,8($np)
+___
+$code.=<<___ if ($SIZE_T==4);
+ lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs
+ lwz $t1,4($ap)
+ lwz $t2,8($ap)
+ lwz $t3,12($ap)
+ lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
+ lwz $t5,4($np)
+ lwz $t6,8($np)
+ lwz $t7,12($np)
+___
+$code.=<<___;
+ std $t0,`$FRAME+64`($sp)
+ std $t1,`$FRAME+72`($sp)
+ std $t2,`$FRAME+80`($sp)
+ std $t3,`$FRAME+88`($sp)
+ std $t4,`$FRAME+96`($sp)
+ std $t5,`$FRAME+104`($sp)
+ std $t6,`$FRAME+112`($sp)
+ std $t7,`$FRAME+120`($sp)
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+ lfd $A0,`$FRAME+64`($sp)
+ lfd $A1,`$FRAME+72`($sp)
+ lfd $A2,`$FRAME+80`($sp)
+ lfd $A3,`$FRAME+88`($sp)
+ lfd $N0,`$FRAME+96`($sp)
+ lfd $N1,`$FRAME+104`($sp)
+ lfd $N2,`$FRAME+112`($sp)
+ lfd $N3,`$FRAME+120`($sp)
+ fcfid $A0,$A0
+ fcfid $A1,$A1
+ fcfid $A2,$A2
+ fcfid $A3,$A3
+ fcfid $N0,$N0
+ fcfid $N1,$N1
+ fcfid $N2,$N2
+ fcfid $N3,$N3
+ addi $ap,$ap,16
+ addi $np,$np,16
+
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ stfd $A0,8($nap_d) ; save a[j] in double format
+ stfd $A1,16($nap_d)
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ fmadd $T0a,$A0,$ba,$dota
+ fmadd $T0b,$A0,$bb,$dotb
+ stfd $A2,24($nap_d) ; save a[j+1] in double format
+ stfd $A3,32($nap_d)
+
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ stfd $N0,40($nap_d) ; save n[j] in double format
+ stfd $N1,48($nap_d)
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ add $t0,$t0,$carry ; can not overflow
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+ stfd $N2,56($nap_d) ; save n[j+1] in double format
+ stfdu $N3,64($nap_d)
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ insrdi $t0,$t1,16,32
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ add $t2,$t2,$carry
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ srdi $carry,$t2,16
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+ insrdi $t0,$t2,16,16
+ add $t3,$t3,$carry
+ srdi $carry,$t3,16
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ add $t4,$t4,$carry
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ srdi $carry,$t4,16
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+ add $t5,$t5,$carry
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ add $t6,$t6,$carry
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ srdi $carry,$t6,16
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ insrdi $t4,$t6,16,16
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+ add $t7,$t7,$carry
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+ std $t0,8($tp) ; tp[j-1]
+ stdu $t4,16($tp) ; tp[j]
+ bdnz- L1st
+
+ fctid $dota,$dota
+ fctid $dotb,$dotb
+
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+ stfd $dota,`$FRAME+64`($sp)
+ stfd $dotb,`$FRAME+72`($sp)
+
+ add $t0,$t0,$carry ; can not overflow
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+ insrdi $t0,$t1,16,32
+ add $t2,$t2,$carry
+ srdi $carry,$t2,16
+ insrdi $t0,$t2,16,16
+ add $t3,$t3,$carry
+ srdi $carry,$t3,16
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ add $t4,$t4,$carry
+ srdi $carry,$t4,16
+ add $t5,$t5,$carry
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+ add $t6,$t6,$carry
+ srdi $carry,$t6,16
+ insrdi $t4,$t6,16,16
+ add $t7,$t7,$carry
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+ ld $t6,`$FRAME+64`($sp)
+ ld $t7,`$FRAME+72`($sp)
+
+ std $t0,8($tp) ; tp[j-1]
+ stdu $t4,16($tp) ; tp[j]
+
+ add $t6,$t6,$carry ; can not overflow
+ srdi $carry,$t6,16
+ add $t7,$t7,$carry
+ insrdi $t6,$t7,48,0
+ srdi $ovf,$t7,48
+ std $t6,8($tp) ; tp[num-1]
+
+ slwi $t7,$num,2
+ subf $nap_d,$t7,$nap_d ; rewind pointer
+
+ li $i,8 ; i=1
+.align 5
+Louter:
+___
+$code.=<<___ if ($SIZE_T==8);
+ ldx $t3,$bp,$i ; bp[i]
+___
+$code.=<<___ if ($SIZE_T==4);
+ add $t0,$bp,$i
+ lwz $t3,0($t0) ; bp[i,i+1]
+ lwz $t0,4($t0)
+ insrdi $t3,$t0,32,0
+___
+$code.=<<___;
+ ld $t6,`$FRAME+$TRANSFER+8`($sp) ; tp[0]
+ mulld $t7,$a0,$t3 ; ap[0]*bp[i]
+
+ addi $tp,$sp,`$FRAME+$TRANSFER`
+ add $t7,$t7,$t6 ; ap[0]*bp[i]+tp[0]
+ li $carry,0
+ mulld $t7,$t7,$n0 ; tp[0]*n0
+ mtctr $j
+
+ ; transfer bp[i] to FPU as 4x16-bit values
+ extrdi $t0,$t3,16,48
+ extrdi $t1,$t3,16,32
+ extrdi $t2,$t3,16,16
+ extrdi $t3,$t3,16,0
+ std $t0,`$FRAME+0`($sp)
+ std $t1,`$FRAME+8`($sp)
+ std $t2,`$FRAME+16`($sp)
+ std $t3,`$FRAME+24`($sp)
+ ; transfer (ap[0]*bp[i]+tp[0])*n0 to FPU as 4x16-bit values
+ extrdi $t4,$t7,16,48
+ extrdi $t5,$t7,16,32
+ extrdi $t6,$t7,16,16
+ extrdi $t7,$t7,16,0
+ std $t4,`$FRAME+32`($sp)
+ std $t5,`$FRAME+40`($sp)
+ std $t6,`$FRAME+48`($sp)
+ std $t7,`$FRAME+56`($sp)
+
+ lfd $A0,8($nap_d) ; load a[j] in double format
+ lfd $A1,16($nap_d)
+ lfd $A2,24($nap_d) ; load a[j+1] in double format
+ lfd $A3,32($nap_d)
+ lfd $N0,40($nap_d) ; load n[j] in double format
+ lfd $N1,48($nap_d)
+ lfd $N2,56($nap_d) ; load n[j+1] in double format
+ lfdu $N3,64($nap_d)
+
+ lfd $ba,`$FRAME+0`($sp)
+ lfd $bb,`$FRAME+8`($sp)
+ lfd $bc,`$FRAME+16`($sp)
+ lfd $bd,`$FRAME+24`($sp)
+ lfd $na,`$FRAME+32`($sp)
+ lfd $nb,`$FRAME+40`($sp)
+ lfd $nc,`$FRAME+48`($sp)
+ lfd $nd,`$FRAME+56`($sp)
+
+ fcfid $ba,$ba
+ fcfid $bb,$bb
+ fcfid $bc,$bc
+ fcfid $bd,$bd
+ fcfid $na,$na
+ fcfid $nb,$nb
+ fcfid $nc,$nc
+ fcfid $nd,$nd
+
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ fmul $T0a,$A0,$ba
+ fmul $T0b,$A0,$bb
+
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ lfd $A0,8($nap_d) ; load a[j] in double format
+ lfd $A1,16($nap_d)
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ lfd $A2,24($nap_d) ; load a[j+1] in double format
+ lfd $A3,32($nap_d)
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+
+.align 5
+Linner:
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ lfd $N0,40($nap_d) ; load n[j] in double format
+ lfd $N1,48($nap_d)
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ fmadd $T0a,$A0,$ba,$dota
+ fmadd $T0b,$A0,$bb,$dotb
+ lfd $N2,56($nap_d) ; load n[j+1] in double format
+ lfdu $N3,64($nap_d)
+
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ lfd $A0,8($nap_d) ; load a[j] in double format
+ lfd $A1,16($nap_d)
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+ lfd $A2,24($nap_d) ; load a[j+1] in double format
+ lfd $A3,32($nap_d)
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ add $t0,$t0,$carry ; can not overflow
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ insrdi $t0,$t1,16,32
+ ld $t1,8($tp) ; tp[j]
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ add $t2,$t2,$carry
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ srdi $carry,$t2,16
+ insrdi $t0,$t2,16,16
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+ add $t3,$t3,$carry
+ ldu $t2,16($tp) ; tp[j+1]
+ srdi $carry,$t3,16
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ add $t4,$t4,$carry
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ srdi $carry,$t4,16
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ add $t5,$t5,$carry
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+ add $t6,$t6,$carry
+ srdi $carry,$t6,16
+ insrdi $t4,$t6,16,16
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ add $t7,$t7,$carry
+ addc $t3,$t0,$t1
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t0,$t0,32,0
+ extrdi $t1,$t1,32,0
+ adde $t0,$t0,$t1
+___
+$code.=<<___;
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ adde $t5,$t4,$t2
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t4,$t4,32,0
+ extrdi $t2,$t2,32,0
+ adde $t4,$t4,$t2
+___
+$code.=<<___;
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+ addze $carry,$carry
+ std $t3,-16($tp) ; tp[j-1]
+ std $t5,-8($tp) ; tp[j]
+ bdnz- Linner
+
+ fctid $dota,$dota
+ fctid $dotb,$dotb
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+ stfd $dota,`$FRAME+64`($sp)
+ stfd $dotb,`$FRAME+72`($sp)
+
+ add $t0,$t0,$carry ; can not overflow
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+ insrdi $t0,$t1,16,32
+ add $t2,$t2,$carry
+ ld $t1,8($tp) ; tp[j]
+ srdi $carry,$t2,16
+ insrdi $t0,$t2,16,16
+ add $t3,$t3,$carry
+ ldu $t2,16($tp) ; tp[j+1]
+ srdi $carry,$t3,16
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ add $t4,$t4,$carry
+ srdi $carry,$t4,16
+ add $t5,$t5,$carry
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+ add $t6,$t6,$carry
+ srdi $carry,$t6,16
+ insrdi $t4,$t6,16,16
+ add $t7,$t7,$carry
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+ ld $t6,`$FRAME+64`($sp)
+ ld $t7,`$FRAME+72`($sp)
+
+ addc $t3,$t0,$t1
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t0,$t0,32,0
+ extrdi $t1,$t1,32,0
+ adde $t0,$t0,$t1
+___
+$code.=<<___;
+ adde $t5,$t4,$t2
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t4,$t4,32,0
+ extrdi $t2,$t2,32,0
+ adde $t4,$t4,$t2
+___
+$code.=<<___;
+ addze $carry,$carry
+
+ std $t3,-16($tp) ; tp[j-1]
+ std $t5,-8($tp) ; tp[j]
+
+ add $carry,$carry,$ovf ; comsume upmost overflow
+ add $t6,$t6,$carry ; can not overflow
+ srdi $carry,$t6,16
+ add $t7,$t7,$carry
+ insrdi $t6,$t7,48,0
+ srdi $ovf,$t7,48
+ std $t6,0($tp) ; tp[num-1]
+
+ slwi $t7,$num,2
+ addi $i,$i,8
+ subf $nap_d,$t7,$nap_d ; rewind pointer
+ cmpw $i,$num
+ blt- Louter
+___
+
+$code.=<<___ if ($SIZE_T==8);
+ subf $np,$num,$np ; rewind np
+ addi $j,$j,1 ; restore counter
+ subfc $i,$i,$i ; j=0 and "clear" XER[CA]
+ addi $tp,$sp,`$FRAME+$TRANSFER+8`
+ addi $t4,$sp,`$FRAME+$TRANSFER+16`
+ addi $t5,$np,8
+ addi $t6,$rp,8
+ mtctr $j
+
+.align 4
+Lsub: ldx $t0,$tp,$i
+ ldx $t1,$np,$i
+ ldx $t2,$t4,$i
+ ldx $t3,$t5,$i
+ subfe $t0,$t1,$t0 ; tp[j]-np[j]
+ subfe $t2,$t3,$t2 ; tp[j+1]-np[j+1]
+ stdx $t0,$rp,$i
+ stdx $t2,$t6,$i
+ addi $i,$i,16
+ bdnz- Lsub
+
+ li $i,0
+ subfe $ovf,$i,$ovf ; handle upmost overflow bit
+ and $ap,$tp,$ovf
+ andc $np,$rp,$ovf
+ or $ap,$ap,$np ; ap=borrow?tp:rp
+ addi $t7,$ap,8
+ mtctr $j
+
+.align 4
+Lcopy: ; copy or in-place refresh
+ ldx $t0,$ap,$i
+ ldx $t1,$t7,$i
+ std $i,8($nap_d) ; zap nap_d
+ std $i,16($nap_d)
+ std $i,24($nap_d)
+ std $i,32($nap_d)
+ std $i,40($nap_d)
+ std $i,48($nap_d)
+ std $i,56($nap_d)
+ stdu $i,64($nap_d)
+ stdx $t0,$rp,$i
+ stdx $t1,$t6,$i
+ stdx $i,$tp,$i ; zap tp at once
+ stdx $i,$t4,$i
+ addi $i,$i,16
+ bdnz- Lcopy
+___
+$code.=<<___ if ($SIZE_T==4);
+ subf $np,$num,$np ; rewind np
+ addi $j,$j,1 ; restore counter
+ subfc $i,$i,$i ; j=0 and "clear" XER[CA]
+ addi $tp,$sp,`$FRAME+$TRANSFER`
+ addi $np,$np,-4
+ addi $rp,$rp,-4
+ addi $ap,$sp,`$FRAME+$TRANSFER+4`
+ mtctr $j
+
+.align 4
+Lsub: ld $t0,8($tp) ; load tp[j..j+3] in 64-bit word order
+ ldu $t2,16($tp)
+ lwz $t4,4($np) ; load np[j..j+3] in 32-bit word order
+ lwz $t5,8($np)
+ lwz $t6,12($np)
+ lwzu $t7,16($np)
+ extrdi $t1,$t0,32,0
+ extrdi $t3,$t2,32,0
+ subfe $t4,$t4,$t0 ; tp[j]-np[j]
+ stw $t0,4($ap) ; save tp[j..j+3] in 32-bit word order
+ subfe $t5,$t5,$t1 ; tp[j+1]-np[j+1]
+ stw $t1,8($ap)
+ subfe $t6,$t6,$t2 ; tp[j+2]-np[j+2]
+ stw $t2,12($ap)
+ subfe $t7,$t7,$t3 ; tp[j+3]-np[j+3]
+ stwu $t3,16($ap)
+ stw $t4,4($rp)
+ stw $t5,8($rp)
+ stw $t6,12($rp)
+ stwu $t7,16($rp)
+ bdnz- Lsub
+
+ li $i,0
+ subfe $ovf,$i,$ovf ; handle upmost overflow bit
+ addi $tp,$sp,`$FRAME+$TRANSFER+4`
+ subf $rp,$num,$rp ; rewind rp
+ and $ap,$tp,$ovf
+ andc $np,$rp,$ovf
+ or $ap,$ap,$np ; ap=borrow?tp:rp
+ addi $tp,$sp,`$FRAME+$TRANSFER`
+ mtctr $j
+
+.align 4
+Lcopy: ; copy or in-place refresh
+ lwz $t0,4($ap)
+ lwz $t1,8($ap)
+ lwz $t2,12($ap)
+ lwzu $t3,16($ap)
+ std $i,8($nap_d) ; zap nap_d
+ std $i,16($nap_d)
+ std $i,24($nap_d)
+ std $i,32($nap_d)
+ std $i,40($nap_d)
+ std $i,48($nap_d)
+ std $i,56($nap_d)
+ stdu $i,64($nap_d)
+ stw $t0,4($rp)
+ stw $t1,8($rp)
+ stw $t2,12($rp)
+ stwu $t3,16($rp)
+ std $i,8($tp) ; zap tp at once
+ stdu $i,16($tp)
+ bdnz- Lcopy
+___
+
+$code.=<<___;
+ $POP $i,0($sp)
+ li r3,1 ; signal "handled"
+ $POP r22,`-12*8-10*$SIZE_T`($i)
+ $POP r23,`-12*8-9*$SIZE_T`($i)
+ $POP r24,`-12*8-8*$SIZE_T`($i)
+ $POP r25,`-12*8-7*$SIZE_T`($i)
+ $POP r26,`-12*8-6*$SIZE_T`($i)
+ $POP r27,`-12*8-5*$SIZE_T`($i)
+ $POP r28,`-12*8-4*$SIZE_T`($i)
+ $POP r29,`-12*8-3*$SIZE_T`($i)
+ $POP r30,`-12*8-2*$SIZE_T`($i)
+ $POP r31,`-12*8-1*$SIZE_T`($i)
+ lfd f20,`-12*8`($i)
+ lfd f21,`-11*8`($i)
+ lfd f22,`-10*8`($i)
+ lfd f23,`-9*8`($i)
+ lfd f24,`-8*8`($i)
+ lfd f25,`-7*8`($i)
+ lfd f26,`-6*8`($i)
+ lfd f27,`-5*8`($i)
+ lfd f28,`-4*8`($i)
+ lfd f29,`-3*8`($i)
+ lfd f30,`-2*8`($i)
+ lfd f31,`-1*8`($i)
+ mr $sp,$i
+ blr
+ .long 0
+ .byte 0,12,4,0,0x8c,10,6,0
+ .long 0
+
+.asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x-gf2m.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x-gf2m.pl
new file mode 100644
index 00000000..9d18d40e
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x-gf2m.pl
@@ -0,0 +1,221 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... gcc 4.3 appeared to generate poor code, therefore
+# the effort. And indeed, the module delivers 55%-90%(*) improvement
+# on haviest ECDSA verify and ECDH benchmarks for 163- and 571-bit
+# key lengths on z990, 30%-55%(*) - on z10, and 70%-110%(*) - on z196.
+# This is for 64-bit build. In 32-bit "highgprs" case improvement is
+# even higher, for example on z990 it was measured 80%-150%. ECDSA
+# sign is modest 9%-12% faster. Keep in mind that these coefficients
+# are not ones for bn_GF2m_mul_2x2 itself, as not all CPU time is
+# burnt in it...
+#
+# (*) gcc 4.1 was observed to deliver better results than gcc 4.3,
+# so that improvement coefficients can vary from one specific
+# setup to another.
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+ $SIZE_T=4;
+ $g="";
+} else {
+ $SIZE_T=8;
+ $g="g";
+}
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$stdframe=16*$SIZE_T+4*8;
+
+$rp="%r2";
+$a1="%r3";
+$a0="%r4";
+$b1="%r5";
+$b0="%r6";
+
+$ra="%r14";
+$sp="%r15";
+
+@T=("%r0","%r1");
+@i=("%r12","%r13");
+
+($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(6..11));
+($lo,$hi,$b)=map("%r$_",(3..5)); $a=$lo; $mask=$a8;
+
+$code.=<<___;
+.text
+
+.type _mul_1x1,\@function
+.align 16
+_mul_1x1:
+ lgr $a1,$a
+ sllg $a2,$a,1
+ sllg $a4,$a,2
+ sllg $a8,$a,3
+
+ srag $lo,$a1,63 # broadcast 63rd bit
+ nihh $a1,0x1fff
+ srag @i[0],$a2,63 # broadcast 62nd bit
+ nihh $a2,0x3fff
+ srag @i[1],$a4,63 # broadcast 61st bit
+ nihh $a4,0x7fff
+ ngr $lo,$b
+ ngr @i[0],$b
+ ngr @i[1],$b
+
+ lghi @T[0],0
+ lgr $a12,$a1
+ stg @T[0],`$stdframe+0*8`($sp) # tab[0]=0
+ xgr $a12,$a2
+ stg $a1,`$stdframe+1*8`($sp) # tab[1]=a1
+ lgr $a48,$a4
+ stg $a2,`$stdframe+2*8`($sp) # tab[2]=a2
+ xgr $a48,$a8
+ stg $a12,`$stdframe+3*8`($sp) # tab[3]=a1^a2
+ xgr $a1,$a4
+
+ stg $a4,`$stdframe+4*8`($sp) # tab[4]=a4
+ xgr $a2,$a4
+ stg $a1,`$stdframe+5*8`($sp) # tab[5]=a1^a4
+ xgr $a12,$a4
+ stg $a2,`$stdframe+6*8`($sp) # tab[6]=a2^a4
+ xgr $a1,$a48
+ stg $a12,`$stdframe+7*8`($sp) # tab[7]=a1^a2^a4
+ xgr $a2,$a48
+
+ stg $a8,`$stdframe+8*8`($sp) # tab[8]=a8
+ xgr $a12,$a48
+ stg $a1,`$stdframe+9*8`($sp) # tab[9]=a1^a8
+ xgr $a1,$a4
+ stg $a2,`$stdframe+10*8`($sp) # tab[10]=a2^a8
+ xgr $a2,$a4
+ stg $a12,`$stdframe+11*8`($sp) # tab[11]=a1^a2^a8
+
+ xgr $a12,$a4
+ stg $a48,`$stdframe+12*8`($sp) # tab[12]=a4^a8
+ srlg $hi,$lo,1
+ stg $a1,`$stdframe+13*8`($sp) # tab[13]=a1^a4^a8
+ sllg $lo,$lo,63
+ stg $a2,`$stdframe+14*8`($sp) # tab[14]=a2^a4^a8
+ srlg @T[0],@i[0],2
+ stg $a12,`$stdframe+15*8`($sp) # tab[15]=a1^a2^a4^a8
+
+ lghi $mask,`0xf<<3`
+ sllg $a1,@i[0],62
+ sllg @i[0],$b,3
+ srlg @T[1],@i[1],3
+ ngr @i[0],$mask
+ sllg $a2,@i[1],61
+ srlg @i[1],$b,4-3
+ xgr $hi,@T[0]
+ ngr @i[1],$mask
+ xgr $lo,$a1
+ xgr $hi,@T[1]
+ xgr $lo,$a2
+
+ xg $lo,$stdframe(@i[0],$sp)
+ srlg @i[0],$b,8-3
+ ngr @i[0],$mask
+___
+for($n=1;$n<14;$n++) {
+$code.=<<___;
+ lg @T[1],$stdframe(@i[1],$sp)
+ srlg @i[1],$b,`($n+2)*4`-3
+ sllg @T[0],@T[1],`$n*4`
+ ngr @i[1],$mask
+ srlg @T[1],@T[1],`64-$n*4`
+ xgr $lo,@T[0]
+ xgr $hi,@T[1]
+___
+ push(@i,shift(@i)); push(@T,shift(@T));
+}
+$code.=<<___;
+ lg @T[1],$stdframe(@i[1],$sp)
+ sllg @T[0],@T[1],`$n*4`
+ srlg @T[1],@T[1],`64-$n*4`
+ xgr $lo,@T[0]
+ xgr $hi,@T[1]
+
+ lg @T[0],$stdframe(@i[0],$sp)
+ sllg @T[1],@T[0],`($n+1)*4`
+ srlg @T[0],@T[0],`64-($n+1)*4`
+ xgr $lo,@T[1]
+ xgr $hi,@T[0]
+
+ br $ra
+.size _mul_1x1,.-_mul_1x1
+
+.globl bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,\@function
+.align 16
+bn_GF2m_mul_2x2:
+ stm${g} %r3,%r15,3*$SIZE_T($sp)
+
+ lghi %r1,-$stdframe-128
+ la %r0,0($sp)
+ la $sp,0(%r1,$sp) # alloca
+ st${g} %r0,0($sp) # back chain
+___
+if ($SIZE_T==8) {
+my @r=map("%r$_",(6..9));
+$code.=<<___;
+ bras $ra,_mul_1x1 # a1·b1
+ stmg $lo,$hi,16($rp)
+
+ lg $a,`$stdframe+128+4*$SIZE_T`($sp)
+ lg $b,`$stdframe+128+6*$SIZE_T`($sp)
+ bras $ra,_mul_1x1 # a0·b0
+ stmg $lo,$hi,0($rp)
+
+ lg $a,`$stdframe+128+3*$SIZE_T`($sp)
+ lg $b,`$stdframe+128+5*$SIZE_T`($sp)
+ xg $a,`$stdframe+128+4*$SIZE_T`($sp)
+ xg $b,`$stdframe+128+6*$SIZE_T`($sp)
+ bras $ra,_mul_1x1 # (a0+a1)·(b0+b1)
+ lmg @r[0],@r[3],0($rp)
+
+ xgr $lo,$hi
+ xgr $hi,@r[1]
+ xgr $lo,@r[0]
+ xgr $hi,@r[2]
+ xgr $lo,@r[3]
+ xgr $hi,@r[3]
+ xgr $lo,$hi
+ stg $hi,16($rp)
+ stg $lo,8($rp)
+___
+} else {
+$code.=<<___;
+ sllg %r3,%r3,32
+ sllg %r5,%r5,32
+ or %r3,%r4
+ or %r5,%r6
+ bras $ra,_mul_1x1
+ rllg $lo,$lo,32
+ rllg $hi,$hi,32
+ stmg $lo,$hi,0($rp)
+___
+}
+$code.=<<___;
+ lm${g} %r6,%r15,`$stdframe+128+6*$SIZE_T`($sp)
+ br $ra
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+.string "GF(2^m) Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x-mont.pl
new file mode 100644
index 00000000..9fd64e81
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x-mont.pl
@@ -0,0 +1,277 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# April 2007.
+#
+# Performance improvement over vanilla C code varies from 85% to 45%
+# depending on key length and benchmark. Unfortunately in this context
+# these are not very impressive results [for code that utilizes "wide"
+# 64x64=128-bit multiplication, which is not commonly available to C
+# programmers], at least hand-coded bn_asm.c replacement is known to
+# provide 30-40% better results for longest keys. Well, on a second
+# thought it's not very surprising, because z-CPUs are single-issue
+# and _strictly_ in-order execution, while bn_mul_mont is more or less
+# dependent on CPU ability to pipe-line instructions and have several
+# of them "in-flight" at the same time. I mean while other methods,
+# for example Karatsuba, aim to minimize amount of multiplications at
+# the cost of other operations increase, bn_mul_mont aim to neatly
+# "overlap" multiplications and the other operations [and on most
+# platforms even minimize the amount of the other operations, in
+# particular references to memory]. But it's possible to improve this
+# module performance by implementing dedicated squaring code-path and
+# possibly by unrolling loops...
+
+# January 2009.
+#
+# Reschedule to minimize/avoid Address Generation Interlock hazard,
+# make inner loops counter-based.
+
+# November 2010.
+#
+# Adapt for -m31 build. If kernel supports what's called "highgprs"
+# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
+# instructions and achieve "64-bit" performance even in 31-bit legacy
+# application context. The feature is not specific to any particular
+# processor, as long as it's "z-CPU". Latter implies that the code
+# remains z/Architecture specific. Compatibility with 32-bit BN_ULONG
+# is achieved by swapping words after 64-bit loads, follow _dswap-s.
+# On z990 it was measured to perform 2.6-2.2 times better than
+# compiler-generated code, less for longer keys...
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+ $SIZE_T=4;
+ $g="";
+} else {
+ $SIZE_T=8;
+ $g="g";
+}
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$stdframe=16*$SIZE_T+4*8;
+
+$mn0="%r0";
+$num="%r1";
+
+# int bn_mul_mont(
+$rp="%r2"; # BN_ULONG *rp,
+$ap="%r3"; # const BN_ULONG *ap,
+$bp="%r4"; # const BN_ULONG *bp,
+$np="%r5"; # const BN_ULONG *np,
+$n0="%r6"; # const BN_ULONG *n0,
+#$num="160(%r15)" # int num);
+
+$bi="%r2"; # zaps rp
+$j="%r7";
+
+$ahi="%r8";
+$alo="%r9";
+$nhi="%r10";
+$nlo="%r11";
+$AHI="%r12";
+$NHI="%r13";
+$count="%r14";
+$sp="%r15";
+
+$code.=<<___;
+.text
+.globl bn_mul_mont
+.type bn_mul_mont,\@function
+bn_mul_mont:
+ lgf $num,`$stdframe+$SIZE_T-4`($sp) # pull $num
+ sla $num,`log($SIZE_T)/log(2)` # $num to enumerate bytes
+ la $bp,0($num,$bp)
+
+ st${g} %r2,2*$SIZE_T($sp)
+
+ cghi $num,16 #
+ lghi %r2,0 #
+ blr %r14 # if($num<16) return 0;
+___
+$code.=<<___ if ($flavour =~ /3[12]/);
+ tmll $num,4
+ bnzr %r14 # if ($num&1) return 0;
+___
+$code.=<<___ if ($flavour !~ /3[12]/);
+ cghi $num,96 #
+ bhr %r14 # if($num>96) return 0;
+___
+$code.=<<___;
+ stm${g} %r3,%r15,3*$SIZE_T($sp)
+
+ lghi $rp,-$stdframe-8 # leave room for carry bit
+ lcgr $j,$num # -$num
+ lgr %r0,$sp
+ la $rp,0($rp,$sp)
+ la $sp,0($j,$rp) # alloca
+ st${g} %r0,0($sp) # back chain
+
+ sra $num,3 # restore $num
+ la $bp,0($j,$bp) # restore $bp
+ ahi $num,-1 # adjust $num for inner loop
+ lg $n0,0($n0) # pull n0
+ _dswap $n0
+
+ lg $bi,0($bp)
+ _dswap $bi
+ lg $alo,0($ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[0]*bp[0]
+ lgr $AHI,$ahi
+
+ lgr $mn0,$alo # "tp[0]"*n0
+ msgr $mn0,$n0
+
+ lg $nlo,0($np) #
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[0]*m1
+ algr $nlo,$alo # +="tp[0]"
+ lghi $NHI,0
+ alcgr $NHI,$nhi
+
+ la $j,8(%r0) # j=1
+ lr $count,$num
+
+.align 16
+.L1st:
+ lg $alo,0($j,$ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[j]*bp[0]
+ algr $alo,$AHI
+ lghi $AHI,0
+ alcgr $AHI,$ahi
+
+ lg $nlo,0($j,$np)
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[j]*m1
+ algr $nlo,$NHI
+ lghi $NHI,0
+ alcgr $nhi,$NHI # +="tp[j]"
+ algr $nlo,$alo
+ alcgr $NHI,$nhi
+
+ stg $nlo,$stdframe-8($j,$sp) # tp[j-1]=
+ la $j,8($j) # j++
+ brct $count,.L1st
+
+ algr $NHI,$AHI
+ lghi $AHI,0
+ alcgr $AHI,$AHI # upmost overflow bit
+ stg $NHI,$stdframe-8($j,$sp)
+ stg $AHI,$stdframe($j,$sp)
+ la $bp,8($bp) # bp++
+
+.Louter:
+ lg $bi,0($bp) # bp[i]
+ _dswap $bi
+ lg $alo,0($ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[0]*bp[i]
+ alg $alo,$stdframe($sp) # +=tp[0]
+ lghi $AHI,0
+ alcgr $AHI,$ahi
+
+ lgr $mn0,$alo
+ msgr $mn0,$n0 # tp[0]*n0
+
+ lg $nlo,0($np) # np[0]
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[0]*m1
+ algr $nlo,$alo # +="tp[0]"
+ lghi $NHI,0
+ alcgr $NHI,$nhi
+
+ la $j,8(%r0) # j=1
+ lr $count,$num
+
+.align 16
+.Linner:
+ lg $alo,0($j,$ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[j]*bp[i]
+ algr $alo,$AHI
+ lghi $AHI,0
+ alcgr $ahi,$AHI
+ alg $alo,$stdframe($j,$sp)# +=tp[j]
+ alcgr $AHI,$ahi
+
+ lg $nlo,0($j,$np)
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[j]*m1
+ algr $nlo,$NHI
+ lghi $NHI,0
+ alcgr $nhi,$NHI
+ algr $nlo,$alo # +="tp[j]"
+ alcgr $NHI,$nhi
+
+ stg $nlo,$stdframe-8($j,$sp) # tp[j-1]=
+ la $j,8($j) # j++
+ brct $count,.Linner
+
+ algr $NHI,$AHI
+ lghi $AHI,0
+ alcgr $AHI,$AHI
+ alg $NHI,$stdframe($j,$sp)# accumulate previous upmost overflow bit
+ lghi $ahi,0
+ alcgr $AHI,$ahi # new upmost overflow bit
+ stg $NHI,$stdframe-8($j,$sp)
+ stg $AHI,$stdframe($j,$sp)
+
+ la $bp,8($bp) # bp++
+ cl${g} $bp,`$stdframe+8+4*$SIZE_T`($j,$sp) # compare to &bp[num]
+ jne .Louter
+
+ l${g} $rp,`$stdframe+8+2*$SIZE_T`($j,$sp) # reincarnate rp
+ la $ap,$stdframe($sp)
+ ahi $num,1 # restore $num, incidentally clears "borrow"
+
+ la $j,0(%r0)
+ lr $count,$num
+.Lsub: lg $alo,0($j,$ap)
+ lg $nlo,0($j,$np)
+ _dswap $nlo
+ slbgr $alo,$nlo
+ stg $alo,0($j,$rp)
+ la $j,8($j)
+ brct $count,.Lsub
+ lghi $ahi,0
+ slbgr $AHI,$ahi # handle upmost carry
+
+ ngr $ap,$AHI
+ lghi $np,-1
+ xgr $np,$AHI
+ ngr $np,$rp
+ ogr $ap,$np # ap=borrow?tp:rp
+
+ la $j,0(%r0)
+ lgr $count,$num
+.Lcopy: lg $alo,0($j,$ap) # copy or in-place refresh
+ _dswap $alo
+ stg $j,$stdframe($j,$sp) # zap tp
+ stg $alo,0($j,$rp)
+ la $j,8($j)
+ brct $count,.Lcopy
+
+ la %r1,`$stdframe+8+6*$SIZE_T`($j,$sp)
+ lm${g} %r6,%r15,0(%r1)
+ lghi %r2,1 # signal "processed"
+ br %r14
+.size bn_mul_mont,.-bn_mul_mont
+.string "Montgomery Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+ s/_dswap\s+(%r[0-9]+)/sprintf("rllg\t%s,%s,32",$1,$1) if($SIZE_T==4)/e;
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x.S
new file mode 100755
index 00000000..43fcb79b
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/s390x.S
@@ -0,0 +1,678 @@
+.ident "s390x.S, version 1.1"
+// ====================================================================
+// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+// project.
+//
+// Rights for redistribution and usage in source and binary forms are
+// granted according to the OpenSSL license. Warranty of any kind is
+// disclaimed.
+// ====================================================================
+
+.text
+
+#define zero %r0
+
+// BN_ULONG bn_mul_add_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
+.globl bn_mul_add_words
+.type bn_mul_add_words,@function
+.align 4
+bn_mul_add_words:
+ lghi zero,0 // zero = 0
+ la %r1,0(%r2) // put rp aside
+ lghi %r2,0 // i=0;
+ ltgfr %r4,%r4
+ bler %r14 // if (len<=0) return 0;
+
+ stmg %r6,%r10,48(%r15)
+ lghi %r10,3
+ lghi %r8,0 // carry = 0
+ nr %r10,%r4 // len%4
+ sra %r4,2 // cnt=len/4
+ jz .Loop1_madd // carry is incidentally cleared if branch taken
+ algr zero,zero // clear carry
+
+.Loop4_madd:
+ lg %r7,0(%r2,%r3) // ap[i]
+ mlgr %r6,%r5 // *=w
+ alcgr %r7,%r8 // +=carry
+ alcgr %r6,zero
+ alg %r7,0(%r2,%r1) // +=rp[i]
+ stg %r7,0(%r2,%r1) // rp[i]=
+
+ lg %r9,8(%r2,%r3)
+ mlgr %r8,%r5
+ alcgr %r9,%r6
+ alcgr %r8,zero
+ alg %r9,8(%r2,%r1)
+ stg %r9,8(%r2,%r1)
+
+ lg %r7,16(%r2,%r3)
+ mlgr %r6,%r5
+ alcgr %r7,%r8
+ alcgr %r6,zero
+ alg %r7,16(%r2,%r1)
+ stg %r7,16(%r2,%r1)
+
+ lg %r9,24(%r2,%r3)
+ mlgr %r8,%r5
+ alcgr %r9,%r6
+ alcgr %r8,zero
+ alg %r9,24(%r2,%r1)
+ stg %r9,24(%r2,%r1)
+
+ la %r2,32(%r2) // i+=4
+ brct %r4,.Loop4_madd
+
+ la %r10,1(%r10) // see if len%4 is zero ...
+ brct %r10,.Loop1_madd // without touching condition code:-)
+
+.Lend_madd:
+ alcgr %r8,zero // collect carry bit
+ lgr %r2,%r8
+ lmg %r6,%r10,48(%r15)
+ br %r14
+
+.Loop1_madd:
+ lg %r7,0(%r2,%r3) // ap[i]
+ mlgr %r6,%r5 // *=w
+ alcgr %r7,%r8 // +=carry
+ alcgr %r6,zero
+ alg %r7,0(%r2,%r1) // +=rp[i]
+ stg %r7,0(%r2,%r1) // rp[i]=
+
+ lgr %r8,%r6
+ la %r2,8(%r2) // i++
+ brct %r10,.Loop1_madd
+
+ j .Lend_madd
+.size bn_mul_add_words,.-bn_mul_add_words
+
+// BN_ULONG bn_mul_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
+.globl bn_mul_words
+.type bn_mul_words,@function
+.align 4
+bn_mul_words:
+ lghi zero,0 // zero = 0
+ la %r1,0(%r2) // put rp aside
+ lghi %r2,0 // i=0;
+ ltgfr %r4,%r4
+ bler %r14 // if (len<=0) return 0;
+
+ stmg %r6,%r10,48(%r15)
+ lghi %r10,3
+ lghi %r8,0 // carry = 0
+ nr %r10,%r4 // len%4
+ sra %r4,2 // cnt=len/4
+ jz .Loop1_mul // carry is incidentally cleared if branch taken
+ algr zero,zero // clear carry
+
+.Loop4_mul:
+ lg %r7,0(%r2,%r3) // ap[i]
+ mlgr %r6,%r5 // *=w
+ alcgr %r7,%r8 // +=carry
+ stg %r7,0(%r2,%r1) // rp[i]=
+
+ lg %r9,8(%r2,%r3)
+ mlgr %r8,%r5
+ alcgr %r9,%r6
+ stg %r9,8(%r2,%r1)
+
+ lg %r7,16(%r2,%r3)
+ mlgr %r6,%r5
+ alcgr %r7,%r8
+ stg %r7,16(%r2,%r1)
+
+ lg %r9,24(%r2,%r3)
+ mlgr %r8,%r5
+ alcgr %r9,%r6
+ stg %r9,24(%r2,%r1)
+
+ la %r2,32(%r2) // i+=4
+ brct %r4,.Loop4_mul
+
+ la %r10,1(%r10) // see if len%4 is zero ...
+ brct %r10,.Loop1_mul // without touching condition code:-)
+
+.Lend_mul:
+ alcgr %r8,zero // collect carry bit
+ lgr %r2,%r8
+ lmg %r6,%r10,48(%r15)
+ br %r14
+
+.Loop1_mul:
+ lg %r7,0(%r2,%r3) // ap[i]
+ mlgr %r6,%r5 // *=w
+ alcgr %r7,%r8 // +=carry
+ stg %r7,0(%r2,%r1) // rp[i]=
+
+ lgr %r8,%r6
+ la %r2,8(%r2) // i++
+ brct %r10,.Loop1_mul
+
+ j .Lend_mul
+.size bn_mul_words,.-bn_mul_words
+
+// void bn_sqr_words(BN_ULONG *r2,BN_ULONG *r2,int r4)
+.globl bn_sqr_words
+.type bn_sqr_words,@function
+.align 4
+bn_sqr_words:
+ ltgfr %r4,%r4
+ bler %r14
+
+ stmg %r6,%r7,48(%r15)
+ srag %r1,%r4,2 // cnt=len/4
+ jz .Loop1_sqr
+
+.Loop4_sqr:
+ lg %r7,0(%r3)
+ mlgr %r6,%r7
+ stg %r7,0(%r2)
+ stg %r6,8(%r2)
+
+ lg %r7,8(%r3)
+ mlgr %r6,%r7
+ stg %r7,16(%r2)
+ stg %r6,24(%r2)
+
+ lg %r7,16(%r3)
+ mlgr %r6,%r7
+ stg %r7,32(%r2)
+ stg %r6,40(%r2)
+
+ lg %r7,24(%r3)
+ mlgr %r6,%r7
+ stg %r7,48(%r2)
+ stg %r6,56(%r2)
+
+ la %r3,32(%r3)
+ la %r2,64(%r2)
+ brct %r1,.Loop4_sqr
+
+ lghi %r1,3
+ nr %r4,%r1 // cnt=len%4
+ jz .Lend_sqr
+
+.Loop1_sqr:
+ lg %r7,0(%r3)
+ mlgr %r6,%r7
+ stg %r7,0(%r2)
+ stg %r6,8(%r2)
+
+ la %r3,8(%r3)
+ la %r2,16(%r2)
+ brct %r4,.Loop1_sqr
+
+.Lend_sqr:
+ lmg %r6,%r7,48(%r15)
+ br %r14
+.size bn_sqr_words,.-bn_sqr_words
+
+// BN_ULONG bn_div_words(BN_ULONG h,BN_ULONG l,BN_ULONG d);
+.globl bn_div_words
+.type bn_div_words,@function
+.align 4
+bn_div_words:
+ dlgr %r2,%r4
+ lgr %r2,%r3
+ br %r14
+.size bn_div_words,.-bn_div_words
+
+// BN_ULONG bn_add_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
+.globl bn_add_words
+.type bn_add_words,@function
+.align 4
+bn_add_words:
+ la %r1,0(%r2) // put rp aside
+ lghi %r2,0 // i=0
+ ltgfr %r5,%r5
+ bler %r14 // if (len<=0) return 0;
+
+ stg %r6,48(%r15)
+ lghi %r6,3
+ nr %r6,%r5 // len%4
+ sra %r5,2 // len/4, use sra because it sets condition code
+ jz .Loop1_add // carry is incidentally cleared if branch taken
+ algr %r2,%r2 // clear carry
+
+.Loop4_add:
+ lg %r0,0(%r2,%r3)
+ alcg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+ lg %r0,8(%r2,%r3)
+ alcg %r0,8(%r2,%r4)
+ stg %r0,8(%r2,%r1)
+ lg %r0,16(%r2,%r3)
+ alcg %r0,16(%r2,%r4)
+ stg %r0,16(%r2,%r1)
+ lg %r0,24(%r2,%r3)
+ alcg %r0,24(%r2,%r4)
+ stg %r0,24(%r2,%r1)
+
+ la %r2,32(%r2) // i+=4
+ brct %r5,.Loop4_add
+
+ la %r6,1(%r6) // see if len%4 is zero ...
+ brct %r6,.Loop1_add // without touching condition code:-)
+
+.Lexit_add:
+ lghi %r2,0
+ alcgr %r2,%r2
+ lg %r6,48(%r15)
+ br %r14
+
+.Loop1_add:
+ lg %r0,0(%r2,%r3)
+ alcg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+
+ la %r2,8(%r2) // i++
+ brct %r6,.Loop1_add
+
+ j .Lexit_add
+.size bn_add_words,.-bn_add_words
+
+// BN_ULONG bn_sub_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
+.globl bn_sub_words
+.type bn_sub_words,@function
+.align 4
+bn_sub_words:
+ la %r1,0(%r2) // put rp aside
+ lghi %r2,0 // i=0
+ ltgfr %r5,%r5
+ bler %r14 // if (len<=0) return 0;
+
+ stg %r6,48(%r15)
+ lghi %r6,3
+ nr %r6,%r5 // len%4
+ sra %r5,2 // len/4, use sra because it sets condition code
+ jnz .Loop4_sub // borrow is incidentally cleared if branch taken
+ slgr %r2,%r2 // clear borrow
+
+.Loop1_sub:
+ lg %r0,0(%r2,%r3)
+ slbg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+
+ la %r2,8(%r2) // i++
+ brct %r6,.Loop1_sub
+ j .Lexit_sub
+
+.Loop4_sub:
+ lg %r0,0(%r2,%r3)
+ slbg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+ lg %r0,8(%r2,%r3)
+ slbg %r0,8(%r2,%r4)
+ stg %r0,8(%r2,%r1)
+ lg %r0,16(%r2,%r3)
+ slbg %r0,16(%r2,%r4)
+ stg %r0,16(%r2,%r1)
+ lg %r0,24(%r2,%r3)
+ slbg %r0,24(%r2,%r4)
+ stg %r0,24(%r2,%r1)
+
+ la %r2,32(%r2) // i+=4
+ brct %r5,.Loop4_sub
+
+ la %r6,1(%r6) // see if len%4 is zero ...
+ brct %r6,.Loop1_sub // without touching condition code:-)
+
+.Lexit_sub:
+ lghi %r2,0
+ slbgr %r2,%r2
+ lcgr %r2,%r2
+ lg %r6,48(%r15)
+ br %r14
+.size bn_sub_words,.-bn_sub_words
+
+#define c1 %r1
+#define c2 %r5
+#define c3 %r8
+
+#define mul_add_c(ai,bi,c1,c2,c3) \
+ lg %r7,ai*8(%r3); \
+ mlg %r6,bi*8(%r4); \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero
+
+// void bn_mul_comba8(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
+.globl bn_mul_comba8
+.type bn_mul_comba8,@function
+.align 4
+bn_mul_comba8:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ mul_add_c(0,0,c1,c2,c3);
+ stg c1,0*8(%r2)
+ lghi c1,0
+
+ mul_add_c(0,1,c2,c3,c1);
+ mul_add_c(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ mul_add_c(2,0,c3,c1,c2);
+ mul_add_c(1,1,c3,c1,c2);
+ mul_add_c(0,2,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ mul_add_c(0,3,c1,c2,c3);
+ mul_add_c(1,2,c1,c2,c3);
+ mul_add_c(2,1,c1,c2,c3);
+ mul_add_c(3,0,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ mul_add_c(4,0,c2,c3,c1);
+ mul_add_c(3,1,c2,c3,c1);
+ mul_add_c(2,2,c2,c3,c1);
+ mul_add_c(1,3,c2,c3,c1);
+ mul_add_c(0,4,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ mul_add_c(0,5,c3,c1,c2);
+ mul_add_c(1,4,c3,c1,c2);
+ mul_add_c(2,3,c3,c1,c2);
+ mul_add_c(3,2,c3,c1,c2);
+ mul_add_c(4,1,c3,c1,c2);
+ mul_add_c(5,0,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ mul_add_c(6,0,c1,c2,c3);
+ mul_add_c(5,1,c1,c2,c3);
+ mul_add_c(4,2,c1,c2,c3);
+ mul_add_c(3,3,c1,c2,c3);
+ mul_add_c(2,4,c1,c2,c3);
+ mul_add_c(1,5,c1,c2,c3);
+ mul_add_c(0,6,c1,c2,c3);
+ stg c1,6*8(%r2)
+ lghi c1,0
+
+ mul_add_c(0,7,c2,c3,c1);
+ mul_add_c(1,6,c2,c3,c1);
+ mul_add_c(2,5,c2,c3,c1);
+ mul_add_c(3,4,c2,c3,c1);
+ mul_add_c(4,3,c2,c3,c1);
+ mul_add_c(5,2,c2,c3,c1);
+ mul_add_c(6,1,c2,c3,c1);
+ mul_add_c(7,0,c2,c3,c1);
+ stg c2,7*8(%r2)
+ lghi c2,0
+
+ mul_add_c(7,1,c3,c1,c2);
+ mul_add_c(6,2,c3,c1,c2);
+ mul_add_c(5,3,c3,c1,c2);
+ mul_add_c(4,4,c3,c1,c2);
+ mul_add_c(3,5,c3,c1,c2);
+ mul_add_c(2,6,c3,c1,c2);
+ mul_add_c(1,7,c3,c1,c2);
+ stg c3,8*8(%r2)
+ lghi c3,0
+
+ mul_add_c(2,7,c1,c2,c3);
+ mul_add_c(3,6,c1,c2,c3);
+ mul_add_c(4,5,c1,c2,c3);
+ mul_add_c(5,4,c1,c2,c3);
+ mul_add_c(6,3,c1,c2,c3);
+ mul_add_c(7,2,c1,c2,c3);
+ stg c1,9*8(%r2)
+ lghi c1,0
+
+ mul_add_c(7,3,c2,c3,c1);
+ mul_add_c(6,4,c2,c3,c1);
+ mul_add_c(5,5,c2,c3,c1);
+ mul_add_c(4,6,c2,c3,c1);
+ mul_add_c(3,7,c2,c3,c1);
+ stg c2,10*8(%r2)
+ lghi c2,0
+
+ mul_add_c(4,7,c3,c1,c2);
+ mul_add_c(5,6,c3,c1,c2);
+ mul_add_c(6,5,c3,c1,c2);
+ mul_add_c(7,4,c3,c1,c2);
+ stg c3,11*8(%r2)
+ lghi c3,0
+
+ mul_add_c(7,5,c1,c2,c3);
+ mul_add_c(6,6,c1,c2,c3);
+ mul_add_c(5,7,c1,c2,c3);
+ stg c1,12*8(%r2)
+ lghi c1,0
+
+
+ mul_add_c(6,7,c2,c3,c1);
+ mul_add_c(7,6,c2,c3,c1);
+ stg c2,13*8(%r2)
+ lghi c2,0
+
+ mul_add_c(7,7,c3,c1,c2);
+ stg c3,14*8(%r2)
+ stg c1,15*8(%r2)
+
+ lmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_mul_comba8,.-bn_mul_comba8
+
+// void bn_mul_comba4(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
+.globl bn_mul_comba4
+.type bn_mul_comba4,@function
+.align 4
+bn_mul_comba4:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ mul_add_c(0,0,c1,c2,c3);
+ stg c1,0*8(%r3)
+ lghi c1,0
+
+ mul_add_c(0,1,c2,c3,c1);
+ mul_add_c(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ mul_add_c(2,0,c3,c1,c2);
+ mul_add_c(1,1,c3,c1,c2);
+ mul_add_c(0,2,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ mul_add_c(0,3,c1,c2,c3);
+ mul_add_c(1,2,c1,c2,c3);
+ mul_add_c(2,1,c1,c2,c3);
+ mul_add_c(3,0,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ mul_add_c(3,1,c2,c3,c1);
+ mul_add_c(2,2,c2,c3,c1);
+ mul_add_c(1,3,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ mul_add_c(2,3,c3,c1,c2);
+ mul_add_c(3,2,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ mul_add_c(3,3,c1,c2,c3);
+ stg c1,6*8(%r2)
+ stg c2,7*8(%r2)
+
+ stmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_mul_comba4,.-bn_mul_comba4
+
+#define sqr_add_c(ai,c1,c2,c3) \
+ lg %r7,ai*8(%r3); \
+ mlgr %r6,%r7; \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero
+
+#define sqr_add_c2(ai,aj,c1,c2,c3) \
+ lg %r7,ai*8(%r3); \
+ mlg %r6,aj*8(%r3); \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero; \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero
+
+// void bn_sqr_comba8(BN_ULONG *r2,BN_ULONG *r3);
+.globl bn_sqr_comba8
+.type bn_sqr_comba8,@function
+.align 4
+bn_sqr_comba8:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ sqr_add_c(0,c1,c2,c3);
+ stg c1,0*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(1,c3,c1,c2);
+ sqr_add_c2(2,0,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ sqr_add_c2(3,0,c1,c2,c3);
+ sqr_add_c2(2,1,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ sqr_add_c(2,c2,c3,c1);
+ sqr_add_c2(3,1,c2,c3,c1);
+ sqr_add_c2(4,0,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ sqr_add_c2(5,0,c3,c1,c2);
+ sqr_add_c2(4,1,c3,c1,c2);
+ sqr_add_c2(3,2,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ sqr_add_c(3,c1,c2,c3);
+ sqr_add_c2(4,2,c1,c2,c3);
+ sqr_add_c2(5,1,c1,c2,c3);
+ sqr_add_c2(6,0,c1,c2,c3);
+ stg c1,6*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(7,0,c2,c3,c1);
+ sqr_add_c2(6,1,c2,c3,c1);
+ sqr_add_c2(5,2,c2,c3,c1);
+ sqr_add_c2(4,3,c2,c3,c1);
+ stg c2,7*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(4,c3,c1,c2);
+ sqr_add_c2(5,3,c3,c1,c2);
+ sqr_add_c2(6,2,c3,c1,c2);
+ sqr_add_c2(7,1,c3,c1,c2);
+ stg c3,8*8(%r2)
+ lghi c3,0
+
+ sqr_add_c2(7,2,c1,c2,c3);
+ sqr_add_c2(6,3,c1,c2,c3);
+ sqr_add_c2(5,4,c1,c2,c3);
+ stg c1,9*8(%r2)
+ lghi c1,0
+
+ sqr_add_c(5,c2,c3,c1);
+ sqr_add_c2(6,4,c2,c3,c1);
+ sqr_add_c2(7,3,c2,c3,c1);
+ stg c2,10*8(%r2)
+ lghi c2,0
+
+ sqr_add_c2(7,4,c3,c1,c2);
+ sqr_add_c2(6,5,c3,c1,c2);
+ stg c3,11*8(%r2)
+ lghi c3,0
+
+ sqr_add_c(6,c1,c2,c3);
+ sqr_add_c2(7,5,c1,c2,c3);
+ stg c1,12*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(7,6,c2,c3,c1);
+ stg c2,13*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(7,c3,c1,c2);
+ stg c3,14*8(%r2)
+ stg c1,15*8(%r2)
+
+ lmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_sqr_comba8,.-bn_sqr_comba8
+
+// void bn_sqr_comba4(BN_ULONG *r2,BN_ULONG *r3);
+.globl bn_sqr_comba4
+.type bn_sqr_comba4,@function
+.align 4
+bn_sqr_comba4:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ sqr_add_c(0,c1,c2,c3);
+ stg c1,0*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(1,c3,c1,c2);
+ sqr_add_c2(2,0,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ sqr_add_c2(3,0,c1,c2,c3);
+ sqr_add_c2(2,1,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ sqr_add_c(2,c2,c3,c1);
+ sqr_add_c2(3,1,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ sqr_add_c2(3,2,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ sqr_add_c(3,c1,c2,c3);
+ stg c1,6*8(%r2)
+ stg c2,7*8(%r2)
+
+ lmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_sqr_comba4,.-bn_sqr_comba4
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv8.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv8.S
new file mode 100644
index 00000000..88c5dc48
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv8.S
@@ -0,0 +1,1458 @@
+.ident "sparcv8.s, Version 1.4"
+.ident "SPARC v8 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+/*
+ * ====================================================================
+ * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+ * project.
+ *
+ * Rights for redistribution and usage in source and binary forms are
+ * granted according to the OpenSSL license. Warranty of any kind is
+ * disclaimed.
+ * ====================================================================
+ */
+
+/*
+ * This is my modest contributon to OpenSSL project (see
+ * http://www.openssl.org/ for more information about it) and is
+ * a drop-in SuperSPARC ISA replacement for crypto/bn/bn_asm.c
+ * module. For updates see http://fy.chalmers.se/~appro/hpe/.
+ *
+ * See bn_asm.sparc.v8plus.S for more details.
+ */
+
+/*
+ * Revision history.
+ *
+ * 1.1 - new loop unrolling model(*);
+ * 1.2 - made gas friendly;
+ * 1.3 - fixed problem with /usr/ccs/lib/cpp;
+ * 1.4 - some retunes;
+ *
+ * (*) see bn_asm.sparc.v8plus.S for details
+ */
+
+.section ".text",#alloc,#execinstr
+.file "bn_asm.sparc.v8.S"
+
+.align 32
+
+.global bn_mul_add_words
+/*
+ * BN_ULONG bn_mul_add_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_add_words:
+ cmp %o2,0
+ bg,a .L_bn_mul_add_words_proceed
+ ld [%o1],%g2
+ retl
+ clr %o0
+
+.L_bn_mul_add_words_proceed:
+ andcc %o2,-4,%g0
+ bz .L_bn_mul_add_words_tail
+ clr %o5
+
+.L_bn_mul_add_words_loop:
+ ld [%o0],%o4
+ ld [%o1+4],%g3
+ umul %o3,%g2,%g2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ st %o4,[%o0]
+ addx %g1,0,%o5
+
+ ld [%o0+4],%o4
+ ld [%o1+8],%g2
+ umul %o3,%g3,%g3
+ dec 4,%o2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g3,%o4
+ st %o4,[%o0+4]
+ addx %g1,0,%o5
+
+ ld [%o0+8],%o4
+ ld [%o1+12],%g3
+ umul %o3,%g2,%g2
+ inc 16,%o1
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ st %o4,[%o0+8]
+ addx %g1,0,%o5
+
+ ld [%o0+12],%o4
+ umul %o3,%g3,%g3
+ inc 16,%o0
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g3,%o4
+ st %o4,[%o0-4]
+ addx %g1,0,%o5
+ andcc %o2,-4,%g0
+ bnz,a .L_bn_mul_add_words_loop
+ ld [%o1],%g2
+
+ tst %o2
+ bnz,a .L_bn_mul_add_words_tail
+ ld [%o1],%g2
+.L_bn_mul_add_words_return:
+ retl
+ mov %o5,%o0
+ nop
+
+.L_bn_mul_add_words_tail:
+ ld [%o0],%o4
+ umul %o3,%g2,%g2
+ addcc %o4,%o5,%o4
+ rd %y,%g1
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_add_words_return
+ st %o4,[%o0]
+
+ ld [%o1+4],%g2
+ ld [%o0+4],%o4
+ umul %o3,%g2,%g2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_add_words_return
+ st %o4,[%o0+4]
+
+ ld [%o1+8],%g2
+ ld [%o0+8],%o4
+ umul %o3,%g2,%g2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ st %o4,[%o0+8]
+ retl
+ addx %g1,0,%o0
+
+.type bn_mul_add_words,#function
+.size bn_mul_add_words,(.-bn_mul_add_words)
+
+.align 32
+
+.global bn_mul_words
+/*
+ * BN_ULONG bn_mul_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_words:
+ cmp %o2,0
+ bg,a .L_bn_mul_words_proceeed
+ ld [%o1],%g2
+ retl
+ clr %o0
+
+.L_bn_mul_words_proceeed:
+ andcc %o2,-4,%g0
+ bz .L_bn_mul_words_tail
+ clr %o5
+
+.L_bn_mul_words_loop:
+ ld [%o1+4],%g3
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ addx %g1,0,%o5
+ st %g2,[%o0]
+
+ ld [%o1+8],%g2
+ umul %o3,%g3,%g3
+ addcc %g3,%o5,%g3
+ rd %y,%g1
+ dec 4,%o2
+ addx %g1,0,%o5
+ st %g3,[%o0+4]
+
+ ld [%o1+12],%g3
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ inc 16,%o1
+ st %g2,[%o0+8]
+ addx %g1,0,%o5
+
+ umul %o3,%g3,%g3
+ addcc %g3,%o5,%g3
+ rd %y,%g1
+ inc 16,%o0
+ addx %g1,0,%o5
+ st %g3,[%o0-4]
+ andcc %o2,-4,%g0
+ nop
+ bnz,a .L_bn_mul_words_loop
+ ld [%o1],%g2
+
+ tst %o2
+ bnz,a .L_bn_mul_words_tail
+ ld [%o1],%g2
+.L_bn_mul_words_return:
+ retl
+ mov %o5,%o0
+ nop
+
+.L_bn_mul_words_tail:
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_words_return
+ st %g2,[%o0]
+ nop
+
+ ld [%o1+4],%g2
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_words_return
+ st %g2,[%o0+4]
+
+ ld [%o1+8],%g2
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ st %g2,[%o0+8]
+ retl
+ addx %g1,0,%o0
+
+.type bn_mul_words,#function
+.size bn_mul_words,(.-bn_mul_words)
+
+.align 32
+.global bn_sqr_words
+/*
+ * void bn_sqr_words(r,a,n)
+ * BN_ULONG *r,*a;
+ * int n;
+ */
+bn_sqr_words:
+ cmp %o2,0
+ bg,a .L_bn_sqr_words_proceeed
+ ld [%o1],%g2
+ retl
+ clr %o0
+
+.L_bn_sqr_words_proceeed:
+ andcc %o2,-4,%g0
+ bz .L_bn_sqr_words_tail
+ clr %o5
+
+.L_bn_sqr_words_loop:
+ ld [%o1+4],%g3
+ umul %g2,%g2,%o4
+ st %o4,[%o0]
+ rd %y,%o5
+ st %o5,[%o0+4]
+
+ ld [%o1+8],%g2
+ umul %g3,%g3,%o4
+ dec 4,%o2
+ st %o4,[%o0+8]
+ rd %y,%o5
+ st %o5,[%o0+12]
+ nop
+
+ ld [%o1+12],%g3
+ umul %g2,%g2,%o4
+ st %o4,[%o0+16]
+ rd %y,%o5
+ inc 16,%o1
+ st %o5,[%o0+20]
+
+ umul %g3,%g3,%o4
+ inc 32,%o0
+ st %o4,[%o0-8]
+ rd %y,%o5
+ st %o5,[%o0-4]
+ andcc %o2,-4,%g2
+ bnz,a .L_bn_sqr_words_loop
+ ld [%o1],%g2
+
+ tst %o2
+ nop
+ bnz,a .L_bn_sqr_words_tail
+ ld [%o1],%g2
+.L_bn_sqr_words_return:
+ retl
+ clr %o0
+
+.L_bn_sqr_words_tail:
+ umul %g2,%g2,%o4
+ st %o4,[%o0]
+ deccc %o2
+ rd %y,%o5
+ bz .L_bn_sqr_words_return
+ st %o5,[%o0+4]
+
+ ld [%o1+4],%g2
+ umul %g2,%g2,%o4
+ st %o4,[%o0+8]
+ deccc %o2
+ rd %y,%o5
+ nop
+ bz .L_bn_sqr_words_return
+ st %o5,[%o0+12]
+
+ ld [%o1+8],%g2
+ umul %g2,%g2,%o4
+ st %o4,[%o0+16]
+ rd %y,%o5
+ st %o5,[%o0+20]
+ retl
+ clr %o0
+
+.type bn_sqr_words,#function
+.size bn_sqr_words,(.-bn_sqr_words)
+
+.align 32
+
+.global bn_div_words
+/*
+ * BN_ULONG bn_div_words(h,l,d)
+ * BN_ULONG h,l,d;
+ */
+bn_div_words:
+ wr %o0,%y
+ udiv %o1,%o2,%o0
+ retl
+ nop
+
+.type bn_div_words,#function
+.size bn_div_words,(.-bn_div_words)
+
+.align 32
+
+.global bn_add_words
+/*
+ * BN_ULONG bn_add_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_add_words:
+ cmp %o3,0
+ bg,a .L_bn_add_words_proceed
+ ld [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_add_words_proceed:
+ andcc %o3,-4,%g0
+ bz .L_bn_add_words_tail
+ clr %g1
+ ba .L_bn_add_words_warn_loop
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_add_words_loop:
+ ld [%o1],%o4
+.L_bn_add_words_warn_loop:
+ ld [%o2],%o5
+ ld [%o1+4],%g3
+ ld [%o2+4],%g4
+ dec 4,%o3
+ addxcc %o5,%o4,%o5
+ st %o5,[%o0]
+
+ ld [%o1+8],%o4
+ ld [%o2+8],%o5
+ inc 16,%o1
+ addxcc %g3,%g4,%g3
+ st %g3,[%o0+4]
+
+ ld [%o1-4],%g3
+ ld [%o2+12],%g4
+ inc 16,%o2
+ addxcc %o5,%o4,%o5
+ st %o5,[%o0+8]
+
+ inc 16,%o0
+ addxcc %g3,%g4,%g3
+ st %g3,[%o0-4]
+ addx %g0,0,%g1
+ andcc %o3,-4,%g0
+ bnz,a .L_bn_add_words_loop
+ addcc %g1,-1,%g0
+
+ tst %o3
+ bnz,a .L_bn_add_words_tail
+ ld [%o1],%o4
+.L_bn_add_words_return:
+ retl
+ mov %g1,%o0
+
+.L_bn_add_words_tail:
+ addcc %g1,-1,%g0
+ ld [%o2],%o5
+ addxcc %o5,%o4,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_add_words_return
+ st %o5,[%o0]
+
+ ld [%o1+4],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+4],%o5
+ addxcc %o5,%o4,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_add_words_return
+ st %o5,[%o0+4]
+
+ ld [%o1+8],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+8],%o5
+ addxcc %o5,%o4,%o5
+ st %o5,[%o0+8]
+ retl
+ addx %g0,0,%o0
+
+.type bn_add_words,#function
+.size bn_add_words,(.-bn_add_words)
+
+.align 32
+
+.global bn_sub_words
+/*
+ * BN_ULONG bn_sub_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_sub_words:
+ cmp %o3,0
+ bg,a .L_bn_sub_words_proceed
+ ld [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_sub_words_proceed:
+ andcc %o3,-4,%g0
+ bz .L_bn_sub_words_tail
+ clr %g1
+ ba .L_bn_sub_words_warm_loop
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_sub_words_loop:
+ ld [%o1],%o4
+.L_bn_sub_words_warm_loop:
+ ld [%o2],%o5
+ ld [%o1+4],%g3
+ ld [%o2+4],%g4
+ dec 4,%o3
+ subxcc %o4,%o5,%o5
+ st %o5,[%o0]
+
+ ld [%o1+8],%o4
+ ld [%o2+8],%o5
+ inc 16,%o1
+ subxcc %g3,%g4,%g4
+ st %g4,[%o0+4]
+
+ ld [%o1-4],%g3
+ ld [%o2+12],%g4
+ inc 16,%o2
+ subxcc %o4,%o5,%o5
+ st %o5,[%o0+8]
+
+ inc 16,%o0
+ subxcc %g3,%g4,%g4
+ st %g4,[%o0-4]
+ addx %g0,0,%g1
+ andcc %o3,-4,%g0
+ bnz,a .L_bn_sub_words_loop
+ addcc %g1,-1,%g0
+
+ tst %o3
+ nop
+ bnz,a .L_bn_sub_words_tail
+ ld [%o1],%o4
+.L_bn_sub_words_return:
+ retl
+ mov %g1,%o0
+
+.L_bn_sub_words_tail:
+ addcc %g1,-1,%g0
+ ld [%o2],%o5
+ subxcc %o4,%o5,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_sub_words_return
+ st %o5,[%o0]
+ nop
+
+ ld [%o1+4],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+4],%o5
+ subxcc %o4,%o5,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_sub_words_return
+ st %o5,[%o0+4]
+
+ ld [%o1+8],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+8],%o5
+ subxcc %o4,%o5,%o5
+ st %o5,[%o0+8]
+ retl
+ addx %g0,0,%o0
+
+.type bn_sub_words,#function
+.size bn_sub_words,(.-bn_sub_words)
+
+#define FRAME_SIZE -96
+
+/*
+ * Here is register usage map for *all* routines below.
+ */
+#define t_1 %o0
+#define t_2 %o1
+#define c_1 %o2
+#define c_2 %o3
+#define c_3 %o4
+
+#define ap(I) [%i1+4*I]
+#define bp(I) [%i2+4*I]
+#define rp(I) [%i0+4*I]
+
+#define a_0 %l0
+#define a_1 %l1
+#define a_2 %l2
+#define a_3 %l3
+#define a_4 %l4
+#define a_5 %l5
+#define a_6 %l6
+#define a_7 %l7
+
+#define b_0 %i3
+#define b_1 %i4
+#define b_2 %i5
+#define b_3 %o5
+#define b_4 %g1
+#define b_5 %g2
+#define b_6 %g3
+#define b_7 %g4
+
+.align 32
+.global bn_mul_comba8
+/*
+ * void bn_mul_comba8(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba8:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ ld bp(0),b_0
+ umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3);
+ ld bp(1),b_1
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1);
+ ld ap(1),a_1
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc %g0,t_2,c_3 !=
+ addx %g0,%g0,c_1
+ ld ap(2),a_2
+ umul a_1,b_0,t_1 !mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ st c_2,rp(1) !r[1]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx %g0,%g0,c_2
+ ld bp(2),b_2
+ umul a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ ld bp(3),b_3
+ addx c_2,%g0,c_2 !=
+ umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ st c_3,rp(2) !r[2]=c3;
+
+ umul a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ umul a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ ld ap(3),a_3
+ umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ ld ap(4),a_4
+ umul a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!=
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ ld bp(4),b_4
+ umul a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ ld bp(5),b_5
+ umul a_0,b_4,t_1 !=!mul_add_c(a[0],b[4],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(4) !r[4]=c2;
+
+ umul a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ umul a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_2,b_3,t_1 !=!mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ ld ap(5),a_5
+ umul a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ ld ap(6),a_6
+ addx c_2,%g0,c_2 !=
+ umul a_5,b_0,t_1 !mul_add_c(a[5],b[0],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ st c_3,rp(5) !r[5]=c3;
+
+ umul a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ umul a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_4,b_2,t_1 !mul_add_c(a[4],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_2,b_4,t_1 !mul_add_c(a[2],b[4],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ ld bp(6),b_6
+ addx c_3,%g0,c_3 !=
+ umul a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ ld bp(7),b_7
+ umul a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ st c_1,rp(6) !r[6]=c1;
+ addx c_3,%g0,c_3 !=
+
+ umul a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx %g0,%g0,c_1
+ umul a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_3,b_4,t_1 !=!mul_add_c(a[3],b[4],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ umul a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ ld ap(7),a_7
+ umul a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ umul a_7,b_0,t_1 !mul_add_c(a[7],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ st c_2,rp(7) !r[7]=c2;
+
+ umul a_7,b_1,t_1 !mul_add_c(a[7],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ umul a_6,b_2,t_1 !=!mul_add_c(a[6],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ umul a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_2,b_6,t_1 !=!mul_add_c(a[2],b[6],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !
+ addx c_2,%g0,c_2
+ st c_3,rp(8) !r[8]=c3;
+
+ umul a_2,b_7,t_1 !mul_add_c(a[2],b[7],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ umul a_3,b_6,t_1 !=!mul_add_c(a[3],b[6],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ umul a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_7,b_2,t_1 !=!mul_add_c(a[7],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(9) !r[9]=c1;
+
+ umul a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ umul a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_5,b_5,t_1 !=!mul_add_c(a[5],b[5],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ umul a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(10) !r[10]=c2;
+
+ umul a_4,b_7,t_1 !=!mul_add_c(a[4],b[7],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2 !=
+ umul a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ umul a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(11) !r[11]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ umul a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ st c_1,rp(12) !r[12]=c1;
+ addx c_3,%g0,c_3 !=
+
+ umul a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx %g0,%g0,c_1
+ umul a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(13) !r[13]=c2;
+
+ umul a_7,b_7,t_1 !=!mul_add_c(a[7],b[7],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ nop !=
+ st c_3,rp(14) !r[14]=c3;
+ st c_1,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_mul_comba8,#function
+.size bn_mul_comba8,(.-bn_mul_comba8)
+
+.align 32
+
+.global bn_mul_comba4
+/*
+ * void bn_mul_comba4(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba4:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ ld bp(0),b_0
+ umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3);
+ ld bp(1),b_1
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1);
+ ld ap(1),a_1
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc %g0,t_2,c_3
+ addx %g0,%g0,c_1
+ ld ap(2),a_2
+ umul a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(1) !r[1]=c2;
+
+ umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ ld bp(2),b_2
+ umul a_1,b_1,t_1 !=!mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ ld bp(3),b_3
+ umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ st c_3,rp(2) !r[2]=c3;
+
+ umul a_0,b_3,t_1 !=!mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3 !=
+ umul a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ ld ap(3),a_3
+ umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ umul a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_1,b_3,t_1 !=!mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(4) !r[4]=c2;
+
+ umul a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(5) !r[5]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ st c_1,rp(6) !r[6]=c1;
+ st c_2,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_mul_comba4,#function
+.size bn_mul_comba4,(.-bn_mul_comba4)
+
+.align 32
+
+.global bn_sqr_comba8
+bn_sqr_comba8:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ ld ap(1),a_1
+ umul a_0,a_0,c_1 !=!sqr_add_c(a,0,c1,c2,c3);
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ ld ap(2),a_2
+ umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc %g0,t_2,c_3
+ addx %g0,%g0,c_1 !=
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3
+ st c_2,rp(1) !r[1]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ ld ap(3),a_3
+ umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ st c_3,rp(2) !r[2]=c3;
+
+ umul a_0,a_3,t_1 !=!sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3 !=
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ ld ap(4),a_4
+ addx c_3,%g0,c_3 !=
+ umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ ld ap(5),a_5
+ umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ st c_2,rp(4) !r[4]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ ld ap(6),a_6
+ umul a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ st c_3,rp(5) !r[5]=c3;
+
+ umul a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ ld ap(7),a_7
+ umul a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(6) !r[6]=c1;
+
+ umul a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ st c_2,rp(7) !r[7]=c2;
+
+ umul a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(8) !r[8]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(9) !r[9]=c1;
+
+ umul a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(10) !r[10]=c2;
+
+ umul a_4,a_7,t_1 !=!sqr_add_c2(a,7,4,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2 !=
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_5,a_6,t_1 !=!sqr_add_c2(a,6,5,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ st c_3,rp(11) !r[11]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ st c_1,rp(12) !r[12]=c1;
+
+ umul a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2 !=
+ addxcc c_3,t_2,c_3
+ st c_2,rp(13) !r[13]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ st c_3,rp(14) !r[14]=c3;
+ st c_1,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba8,#function
+.size bn_sqr_comba8,(.-bn_sqr_comba8)
+
+.align 32
+
+.global bn_sqr_comba4
+/*
+ * void bn_sqr_comba4(r,a)
+ * BN_ULONG *r,*a;
+ */
+bn_sqr_comba4:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ umul a_0,a_0,c_1 !sqr_add_c(a,0,c1,c2,c3);
+ ld ap(1),a_1 !=
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ ld ap(2),a_2
+ umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc %g0,t_2,c_3
+ addx %g0,%g0,c_1 !=
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(1) !r[1]=c2;
+
+ umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ ld ap(3),a_3
+ umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(2) !r[2]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(4) !r[4]=c2;
+
+ umul a_2,a_3,t_1 !=!sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2 !=
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ st c_3,rp(5) !r[5]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ st c_1,rp(6) !r[6]=c1;
+ st c_2,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba4,#function
+.size bn_sqr_comba4,(.-bn_sqr_comba4)
+
+.align 32
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv8plus.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv8plus.S
new file mode 100644
index 00000000..63de1860
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv8plus.S
@@ -0,0 +1,1558 @@
+.ident "sparcv8plus.s, Version 1.4"
+.ident "SPARC v9 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+/*
+ * ====================================================================
+ * Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+ * project.
+ *
+ * Rights for redistribution and usage in source and binary forms are
+ * granted according to the OpenSSL license. Warranty of any kind is
+ * disclaimed.
+ * ====================================================================
+ */
+
+/*
+ * This is my modest contributon to OpenSSL project (see
+ * http://www.openssl.org/ for more information about it) and is
+ * a drop-in UltraSPARC ISA replacement for crypto/bn/bn_asm.c
+ * module. For updates see http://fy.chalmers.se/~appro/hpe/.
+ *
+ * Questions-n-answers.
+ *
+ * Q. How to compile?
+ * A. With SC4.x/SC5.x:
+ *
+ * cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
+ *
+ * and with gcc:
+ *
+ * gcc -mcpu=ultrasparc -c bn_asm.sparc.v8plus.S -o bn_asm.o
+ *
+ * or if above fails (it does if you have gas installed):
+ *
+ * gcc -E bn_asm.sparc.v8plus.S | as -xarch=v8plus /dev/fd/0 -o bn_asm.o
+ *
+ * Quick-n-dirty way to fuse the module into the library.
+ * Provided that the library is already configured and built
+ * (in 0.9.2 case with no-asm option):
+ *
+ * # cd crypto/bn
+ * # cp /some/place/bn_asm.sparc.v8plus.S .
+ * # cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
+ * # make
+ * # cd ../..
+ * # make; make test
+ *
+ * Quick-n-dirty way to get rid of it:
+ *
+ * # cd crypto/bn
+ * # touch bn_asm.c
+ * # make
+ * # cd ../..
+ * # make; make test
+ *
+ * Q. V8plus achitecture? What kind of beast is that?
+ * A. Well, it's rather a programming model than an architecture...
+ * It's actually v9-compliant, i.e. *any* UltraSPARC, CPU under
+ * special conditions, namely when kernel doesn't preserve upper
+ * 32 bits of otherwise 64-bit registers during a context switch.
+ *
+ * Q. Why just UltraSPARC? What about SuperSPARC?
+ * A. Original release did target UltraSPARC only. Now SuperSPARC
+ * version is provided along. Both version share bn_*comba[48]
+ * implementations (see comment later in code for explanation).
+ * But what's so special about this UltraSPARC implementation?
+ * Why didn't I let compiler do the job? Trouble is that most of
+ * available compilers (well, SC5.0 is the only exception) don't
+ * attempt to take advantage of UltraSPARC's 64-bitness under
+ * 32-bit kernels even though it's perfectly possible (see next
+ * question).
+ *
+ * Q. 64-bit registers under 32-bit kernels? Didn't you just say it
+ * doesn't work?
+ * A. You can't adress *all* registers as 64-bit wide:-( The catch is
+ * that you actually may rely upon %o0-%o5 and %g1-%g4 being fully
+ * preserved if you're in a leaf function, i.e. such never calling
+ * any other functions. All functions in this module are leaf and
+ * 10 registers is a handful. And as a matter of fact none-"comba"
+ * routines don't require even that much and I could even afford to
+ * not allocate own stack frame for 'em:-)
+ *
+ * Q. What about 64-bit kernels?
+ * A. What about 'em? Just kidding:-) Pure 64-bit version is currently
+ * under evaluation and development...
+ *
+ * Q. What about shared libraries?
+ * A. What about 'em? Kidding again:-) Code does *not* contain any
+ * code position dependencies and it's safe to include it into
+ * shared library as is.
+ *
+ * Q. How much faster does it go?
+ * A. Do you have a good benchmark? In either case below is what I
+ * experience with crypto/bn/expspeed.c test program:
+ *
+ * v8plus module on U10/300MHz against bn_asm.c compiled with:
+ *
+ * cc-5.0 -xarch=v8plus -xO5 -xdepend +7-12%
+ * cc-4.2 -xarch=v8plus -xO5 -xdepend +25-35%
+ * egcs-1.1.2 -mcpu=ultrasparc -O3 +35-45%
+ *
+ * v8 module on SS10/60MHz against bn_asm.c compiled with:
+ *
+ * cc-5.0 -xarch=v8 -xO5 -xdepend +7-10%
+ * cc-4.2 -xarch=v8 -xO5 -xdepend +10%
+ * egcs-1.1.2 -mv8 -O3 +35-45%
+ *
+ * As you can see it's damn hard to beat the new Sun C compiler
+ * and it's in first place GNU C users who will appreciate this
+ * assembler implementation:-)
+ */
+
+/*
+ * Revision history.
+ *
+ * 1.0 - initial release;
+ * 1.1 - new loop unrolling model(*);
+ * - some more fine tuning;
+ * 1.2 - made gas friendly;
+ * - updates to documentation concerning v9;
+ * - new performance comparison matrix;
+ * 1.3 - fixed problem with /usr/ccs/lib/cpp;
+ * 1.4 - native V9 bn_*_comba[48] implementation (15% more efficient)
+ * resulting in slight overall performance kick;
+ * - some retunes;
+ * - support for GNU as added;
+ *
+ * (*) Originally unrolled loop looked like this:
+ * for (;;) {
+ * op(p+0); if (--n==0) break;
+ * op(p+1); if (--n==0) break;
+ * op(p+2); if (--n==0) break;
+ * op(p+3); if (--n==0) break;
+ * p+=4;
+ * }
+ * I unroll according to following:
+ * while (n&~3) {
+ * op(p+0); op(p+1); op(p+2); op(p+3);
+ * p+=4; n=-4;
+ * }
+ * if (n) {
+ * op(p+0); if (--n==0) return;
+ * op(p+2); if (--n==0) return;
+ * op(p+3); return;
+ * }
+ */
+
+#if defined(__SUNPRO_C) && defined(__sparcv9)
+ /* They've said -xarch=v9 at command line */
+ .register %g2,#scratch
+ .register %g3,#scratch
+# define FRAME_SIZE -192
+#elif defined(__GNUC__) && defined(__arch64__)
+ /* They've said -m64 at command line */
+ .register %g2,#scratch
+ .register %g3,#scratch
+# define FRAME_SIZE -192
+#else
+# define FRAME_SIZE -96
+#endif
+/*
+ * GNU assembler can't stand stuw:-(
+ */
+#define stuw st
+
+.section ".text",#alloc,#execinstr
+.file "bn_asm.sparc.v8plus.S"
+
+.align 32
+
+.global bn_mul_add_words
+/*
+ * BN_ULONG bn_mul_add_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_add_words:
+ sra %o2,%g0,%o2 ! signx %o2
+ brgz,a %o2,.L_bn_mul_add_words_proceed
+ lduw [%o1],%g2
+ retl
+ clr %o0
+ nop
+ nop
+ nop
+
+.L_bn_mul_add_words_proceed:
+ srl %o3,%g0,%o3 ! clruw %o3
+ andcc %o2,-4,%g0
+ bz,pn %icc,.L_bn_mul_add_words_tail
+ clr %o5
+
+.L_bn_mul_add_words_loop: ! wow! 32 aligned!
+ lduw [%o0],%g1
+ lduw [%o1+4],%g3
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ nop
+ add %o4,%g2,%o4
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+
+ lduw [%o0+4],%g1
+ lduw [%o1+8],%g2
+ mulx %o3,%g3,%g3
+ add %g1,%o5,%o4
+ dec 4,%o2
+ add %o4,%g3,%o4
+ stuw %o4,[%o0+4]
+ srlx %o4,32,%o5
+
+ lduw [%o0+8],%g1
+ lduw [%o1+12],%g3
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ inc 16,%o1
+ add %o4,%g2,%o4
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+
+ lduw [%o0+12],%g1
+ mulx %o3,%g3,%g3
+ add %g1,%o5,%o4
+ inc 16,%o0
+ add %o4,%g3,%o4
+ andcc %o2,-4,%g0
+ stuw %o4,[%o0-4]
+ srlx %o4,32,%o5
+ bnz,a,pt %icc,.L_bn_mul_add_words_loop
+ lduw [%o1],%g2
+
+ brnz,a,pn %o2,.L_bn_mul_add_words_tail
+ lduw [%o1],%g2
+.L_bn_mul_add_words_return:
+ retl
+ mov %o5,%o0
+
+.L_bn_mul_add_words_tail:
+ lduw [%o0],%g1
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ dec %o2
+ add %o4,%g2,%o4
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_add_words_return
+ stuw %o4,[%o0]
+
+ lduw [%o1+4],%g2
+ lduw [%o0+4],%g1
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ dec %o2
+ add %o4,%g2,%o4
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_add_words_return
+ stuw %o4,[%o0+4]
+
+ lduw [%o1+8],%g2
+ lduw [%o0+8],%g1
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ add %o4,%g2,%o4
+ stuw %o4,[%o0+8]
+ retl
+ srlx %o4,32,%o0
+
+.type bn_mul_add_words,#function
+.size bn_mul_add_words,(.-bn_mul_add_words)
+
+.align 32
+
+.global bn_mul_words
+/*
+ * BN_ULONG bn_mul_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_words:
+ sra %o2,%g0,%o2 ! signx %o2
+ brgz,a %o2,.L_bn_mul_words_proceeed
+ lduw [%o1],%g2
+ retl
+ clr %o0
+ nop
+ nop
+ nop
+
+.L_bn_mul_words_proceeed:
+ srl %o3,%g0,%o3 ! clruw %o3
+ andcc %o2,-4,%g0
+ bz,pn %icc,.L_bn_mul_words_tail
+ clr %o5
+
+.L_bn_mul_words_loop: ! wow! 32 aligned!
+ lduw [%o1+4],%g3
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ nop
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+
+ lduw [%o1+8],%g2
+ mulx %o3,%g3,%g3
+ add %g3,%o5,%o4
+ dec 4,%o2
+ stuw %o4,[%o0+4]
+ srlx %o4,32,%o5
+
+ lduw [%o1+12],%g3
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ inc 16,%o1
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+
+ mulx %o3,%g3,%g3
+ add %g3,%o5,%o4
+ inc 16,%o0
+ stuw %o4,[%o0-4]
+ srlx %o4,32,%o5
+ andcc %o2,-4,%g0
+ bnz,a,pt %icc,.L_bn_mul_words_loop
+ lduw [%o1],%g2
+ nop
+ nop
+
+ brnz,a,pn %o2,.L_bn_mul_words_tail
+ lduw [%o1],%g2
+.L_bn_mul_words_return:
+ retl
+ mov %o5,%o0
+
+.L_bn_mul_words_tail:
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ dec %o2
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_words_return
+ stuw %o4,[%o0]
+
+ lduw [%o1+4],%g2
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ dec %o2
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_words_return
+ stuw %o4,[%o0+4]
+
+ lduw [%o1+8],%g2
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ stuw %o4,[%o0+8]
+ retl
+ srlx %o4,32,%o0
+
+.type bn_mul_words,#function
+.size bn_mul_words,(.-bn_mul_words)
+
+.align 32
+.global bn_sqr_words
+/*
+ * void bn_sqr_words(r,a,n)
+ * BN_ULONG *r,*a;
+ * int n;
+ */
+bn_sqr_words:
+ sra %o2,%g0,%o2 ! signx %o2
+ brgz,a %o2,.L_bn_sqr_words_proceeed
+ lduw [%o1],%g2
+ retl
+ clr %o0
+ nop
+ nop
+ nop
+
+.L_bn_sqr_words_proceeed:
+ andcc %o2,-4,%g0
+ nop
+ bz,pn %icc,.L_bn_sqr_words_tail
+ nop
+
+.L_bn_sqr_words_loop: ! wow! 32 aligned!
+ lduw [%o1+4],%g3
+ mulx %g2,%g2,%o4
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+ stuw %o5,[%o0+4]
+ nop
+
+ lduw [%o1+8],%g2
+ mulx %g3,%g3,%o4
+ dec 4,%o2
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+ stuw %o5,[%o0+12]
+
+ lduw [%o1+12],%g3
+ mulx %g2,%g2,%o4
+ srlx %o4,32,%o5
+ stuw %o4,[%o0+16]
+ inc 16,%o1
+ stuw %o5,[%o0+20]
+
+ mulx %g3,%g3,%o4
+ inc 32,%o0
+ stuw %o4,[%o0-8]
+ srlx %o4,32,%o5
+ andcc %o2,-4,%g2
+ stuw %o5,[%o0-4]
+ bnz,a,pt %icc,.L_bn_sqr_words_loop
+ lduw [%o1],%g2
+ nop
+
+ brnz,a,pn %o2,.L_bn_sqr_words_tail
+ lduw [%o1],%g2
+.L_bn_sqr_words_return:
+ retl
+ clr %o0
+
+.L_bn_sqr_words_tail:
+ mulx %g2,%g2,%o4
+ dec %o2
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_sqr_words_return
+ stuw %o5,[%o0+4]
+
+ lduw [%o1+4],%g2
+ mulx %g2,%g2,%o4
+ dec %o2
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_sqr_words_return
+ stuw %o5,[%o0+12]
+
+ lduw [%o1+8],%g2
+ mulx %g2,%g2,%o4
+ srlx %o4,32,%o5
+ stuw %o4,[%o0+16]
+ stuw %o5,[%o0+20]
+ retl
+ clr %o0
+
+.type bn_sqr_words,#function
+.size bn_sqr_words,(.-bn_sqr_words)
+
+.align 32
+.global bn_div_words
+/*
+ * BN_ULONG bn_div_words(h,l,d)
+ * BN_ULONG h,l,d;
+ */
+bn_div_words:
+ sllx %o0,32,%o0
+ or %o0,%o1,%o0
+ udivx %o0,%o2,%o0
+ retl
+ srl %o0,%g0,%o0 ! clruw %o0
+
+.type bn_div_words,#function
+.size bn_div_words,(.-bn_div_words)
+
+.align 32
+
+.global bn_add_words
+/*
+ * BN_ULONG bn_add_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_add_words:
+ sra %o3,%g0,%o3 ! signx %o3
+ brgz,a %o3,.L_bn_add_words_proceed
+ lduw [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_add_words_proceed:
+ andcc %o3,-4,%g0
+ bz,pn %icc,.L_bn_add_words_tail
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_add_words_loop: ! wow! 32 aligned!
+ dec 4,%o3
+ lduw [%o2],%o5
+ lduw [%o1+4],%g1
+ lduw [%o2+4],%g2
+ lduw [%o1+8],%g3
+ lduw [%o2+8],%g4
+ addccc %o5,%o4,%o5
+ stuw %o5,[%o0]
+
+ lduw [%o1+12],%o4
+ lduw [%o2+12],%o5
+ inc 16,%o1
+ addccc %g1,%g2,%g1
+ stuw %g1,[%o0+4]
+
+ inc 16,%o2
+ addccc %g3,%g4,%g3
+ stuw %g3,[%o0+8]
+
+ inc 16,%o0
+ addccc %o5,%o4,%o5
+ stuw %o5,[%o0-4]
+ and %o3,-4,%g1
+ brnz,a,pt %g1,.L_bn_add_words_loop
+ lduw [%o1],%o4
+
+ brnz,a,pn %o3,.L_bn_add_words_tail
+ lduw [%o1],%o4
+.L_bn_add_words_return:
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+ nop
+
+.L_bn_add_words_tail:
+ lduw [%o2],%o5
+ dec %o3
+ addccc %o5,%o4,%o5
+ brz,pt %o3,.L_bn_add_words_return
+ stuw %o5,[%o0]
+
+ lduw [%o1+4],%o4
+ lduw [%o2+4],%o5
+ dec %o3
+ addccc %o5,%o4,%o5
+ brz,pt %o3,.L_bn_add_words_return
+ stuw %o5,[%o0+4]
+
+ lduw [%o1+8],%o4
+ lduw [%o2+8],%o5
+ addccc %o5,%o4,%o5
+ stuw %o5,[%o0+8]
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+
+.type bn_add_words,#function
+.size bn_add_words,(.-bn_add_words)
+
+.global bn_sub_words
+/*
+ * BN_ULONG bn_sub_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_sub_words:
+ sra %o3,%g0,%o3 ! signx %o3
+ brgz,a %o3,.L_bn_sub_words_proceed
+ lduw [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_sub_words_proceed:
+ andcc %o3,-4,%g0
+ bz,pn %icc,.L_bn_sub_words_tail
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_sub_words_loop: ! wow! 32 aligned!
+ dec 4,%o3
+ lduw [%o2],%o5
+ lduw [%o1+4],%g1
+ lduw [%o2+4],%g2
+ lduw [%o1+8],%g3
+ lduw [%o2+8],%g4
+ subccc %o4,%o5,%o5
+ stuw %o5,[%o0]
+
+ lduw [%o1+12],%o4
+ lduw [%o2+12],%o5
+ inc 16,%o1
+ subccc %g1,%g2,%g2
+ stuw %g2,[%o0+4]
+
+ inc 16,%o2
+ subccc %g3,%g4,%g4
+ stuw %g4,[%o0+8]
+
+ inc 16,%o0
+ subccc %o4,%o5,%o5
+ stuw %o5,[%o0-4]
+ and %o3,-4,%g1
+ brnz,a,pt %g1,.L_bn_sub_words_loop
+ lduw [%o1],%o4
+
+ brnz,a,pn %o3,.L_bn_sub_words_tail
+ lduw [%o1],%o4
+.L_bn_sub_words_return:
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+ nop
+
+.L_bn_sub_words_tail: ! wow! 32 aligned!
+ lduw [%o2],%o5
+ dec %o3
+ subccc %o4,%o5,%o5
+ brz,pt %o3,.L_bn_sub_words_return
+ stuw %o5,[%o0]
+
+ lduw [%o1+4],%o4
+ lduw [%o2+4],%o5
+ dec %o3
+ subccc %o4,%o5,%o5
+ brz,pt %o3,.L_bn_sub_words_return
+ stuw %o5,[%o0+4]
+
+ lduw [%o1+8],%o4
+ lduw [%o2+8],%o5
+ subccc %o4,%o5,%o5
+ stuw %o5,[%o0+8]
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+
+.type bn_sub_words,#function
+.size bn_sub_words,(.-bn_sub_words)
+
+/*
+ * Code below depends on the fact that upper parts of the %l0-%l7
+ * and %i0-%i7 are zeroed by kernel after context switch. In
+ * previous versions this comment stated that "the trouble is that
+ * it's not feasible to implement the mumbo-jumbo in less V9
+ * instructions:-(" which apparently isn't true thanks to
+ * 'bcs,a %xcc,.+8; inc %rd' pair. But the performance improvement
+ * results not from the shorter code, but from elimination of
+ * multicycle none-pairable 'rd %y,%rd' instructions.
+ *
+ * Andy.
+ */
+
+/*
+ * Here is register usage map for *all* routines below.
+ */
+#define t_1 %o0
+#define t_2 %o1
+#define c_12 %o2
+#define c_3 %o3
+
+#define ap(I) [%i1+4*I]
+#define bp(I) [%i2+4*I]
+#define rp(I) [%i0+4*I]
+
+#define a_0 %l0
+#define a_1 %l1
+#define a_2 %l2
+#define a_3 %l3
+#define a_4 %l4
+#define a_5 %l5
+#define a_6 %l6
+#define a_7 %l7
+
+#define b_0 %i3
+#define b_1 %i4
+#define b_2 %i5
+#define b_3 %o4
+#define b_4 %o5
+#define b_5 %o7
+#define b_6 %g1
+#define b_7 %g4
+
+.align 32
+.global bn_mul_comba8
+/*
+ * void bn_mul_comba8(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba8:
+ save %sp,FRAME_SIZE,%sp
+ mov 1,t_2
+ lduw ap(0),a_0
+ sllx t_2,32,t_2
+ lduw bp(0),b_0 !=
+ lduw bp(1),b_1
+ mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !=!r[0]=c1;
+
+ lduw ap(1),a_1
+ mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(2),a_2
+ mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(2),b_2 !=
+ mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(3),b_3
+ mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(4),a_4
+ mulx a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);!=
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(3) !r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_1,t_1 !=!mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(4),b_4 !=
+ mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(5),b_5
+ mulx a_0,b_4,t_1 !mul_add_c(a[0],b[4],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !r[4]=c2;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(5),a_5
+ mulx a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(6),a_6
+ mulx a_5,b_0,t_1 !=!mul_add_c(a[5],b[0],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(5) !r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,b_2,t_1 !=!mul_add_c(a[4],b[2],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_3,t_1 !=!mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,b_4,t_1 !=!mul_add_c(a[2],b[4],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(6),b_6 !=
+ mulx a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(7),b_7
+ mulx a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(6) !r[6]=c1;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_3,b_4,t_1 !mul_add_c(a[3],b[4],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(7),a_7
+ mulx a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_7,b_0,t_1 !=!mul_add_c(a[7],b[0],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(7) !r[7]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_7,b_1,t_1 !=!mul_add_c(a[7],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_6,b_2,t_1 !mul_add_c(a[6],b[2],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_2,b_6,t_1 !mul_add_c(a[2],b[6],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ srlx t_1,32,c_12
+ stuw t_1,rp(8) !r[8]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_7,t_1 !=!mul_add_c(a[2],b[7],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_3,b_6,t_1 !mul_add_c(a[3],b[6],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_7,b_2,t_1 !mul_add_c(a[7],b[2],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(9) !r[9]=c1;
+ or c_12,c_3,c_12 !=
+
+ mulx a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_5,t_1 !mul_add_c(a[5],b[5],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(10) !r[10]=c2;
+ or c_12,c_3,c_12 !=
+
+ mulx a_4,b_7,t_1 !mul_add_c(a[4],b[7],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(11) !r[11]=c3;
+ or c_12,c_3,c_12 !=
+
+ mulx a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(12) !r[12]=c1;
+ or c_12,c_3,c_12 !=
+
+ mulx a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ st t_1,rp(13) !r[13]=c2;
+ or c_12,c_3,c_12 !=
+
+ mulx a_7,b_7,t_1 !mul_add_c(a[7],b[7],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(14) !r[14]=c3;
+ stuw c_12,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0 !=
+
+.type bn_mul_comba8,#function
+.size bn_mul_comba8,(.-bn_mul_comba8)
+
+.align 32
+
+.global bn_mul_comba4
+/*
+ * void bn_mul_comba4(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba4:
+ save %sp,FRAME_SIZE,%sp
+ lduw ap(0),a_0
+ mov 1,t_2
+ lduw bp(0),b_0
+ sllx t_2,32,t_2 !=
+ lduw bp(1),b_1
+ mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !=!r[0]=c1;
+
+ lduw ap(1),a_1
+ mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(2),a_2
+ mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(2),b_2 !=
+ mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(3),b_3
+ mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!=
+ addcc c_12,t_1,t_1 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(3) !=!r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_12,t_1,t_1 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !=!r[4]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_12,t_1,t_1 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(5) !=!r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(6) !r[6]=c1;
+ stuw c_12,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_mul_comba4,#function
+.size bn_mul_comba4,(.-bn_mul_comba4)
+
+.align 32
+
+.global bn_sqr_comba8
+bn_sqr_comba8:
+ save %sp,FRAME_SIZE,%sp
+ mov 1,t_2
+ lduw ap(0),a_0
+ sllx t_2,32,t_2
+ lduw ap(1),a_1
+ mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !r[0]=c1;
+
+ lduw ap(2),a_2
+ mulx a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(4),a_4
+ mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ st t_1,rp(3) !r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(5),a_5
+ mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !r[4]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(6),a_6
+ mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(5) !r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(7),a_7
+ mulx a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(6) !r[6]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(7) !r[7]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(8) !r[8]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(9) !r[9]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(10) !r[10]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_4,a_7,t_1 !sqr_add_c2(a,7,4,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_6,t_1 !sqr_add_c2(a,6,5,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(11) !r[11]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(12) !r[12]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(13) !r[13]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12
+ stuw t_1,rp(14) !r[14]=c3;
+ stuw c_12,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba8,#function
+.size bn_sqr_comba8,(.-bn_sqr_comba8)
+
+.align 32
+
+.global bn_sqr_comba4
+/*
+ * void bn_sqr_comba4(r,a)
+ * BN_ULONG *r,*a;
+ */
+bn_sqr_comba4:
+ save %sp,FRAME_SIZE,%sp
+ mov 1,t_2
+ lduw ap(0),a_0
+ sllx t_2,32,t_2
+ lduw ap(1),a_1
+ mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !r[0]=c1;
+
+ lduw ap(2),a_2
+ mulx a_0,a_1,t_1 !sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(3) !r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !r[4]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(5) !r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12
+ stuw t_1,rp(6) !r[6]=c1;
+ stuw c_12,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba4,#function
+.size bn_sqr_comba4,(.-bn_sqr_comba4)
+
+.align 32
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv9-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv9-mont.pl
new file mode 100644
index 00000000..b8fb1e8a
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv9-mont.pl
@@ -0,0 +1,606 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# December 2005
+#
+# Pure SPARCv9/8+ and IALU-only bn_mul_mont implementation. The reasons
+# for undertaken effort are multiple. First of all, UltraSPARC is not
+# the whole SPARCv9 universe and other VIS-free implementations deserve
+# optimized code as much. Secondly, newly introduced UltraSPARC T1,
+# a.k.a. Niagara, has shared FPU and concurrent FPU-intensive pathes,
+# such as sparcv9a-mont, will simply sink it. Yes, T1 is equipped with
+# several integrated RSA/DSA accelerator circuits accessible through
+# kernel driver [only(*)], but having decent user-land software
+# implementation is important too. Finally, reasons like desire to
+# experiment with dedicated squaring procedure. Yes, this module
+# implements one, because it was easiest to draft it in SPARCv9
+# instructions...
+
+# (*) Engine accessing the driver in question is on my TODO list.
+# For reference, acceleator is estimated to give 6 to 10 times
+# improvement on single-threaded RSA sign. It should be noted
+# that 6-10x improvement coefficient does not actually mean
+# something extraordinary in terms of absolute [single-threaded]
+# performance, as SPARCv9 instruction set is by all means least
+# suitable for high performance crypto among other 64 bit
+# platforms. 6-10x factor simply places T1 in same performance
+# domain as say AMD64 and IA-64. Improvement of RSA verify don't
+# appear impressive at all, but it's the sign operation which is
+# far more critical/interesting.
+
+# You might notice that inner loops are modulo-scheduled:-) This has
+# essentially negligible impact on UltraSPARC performance, it's
+# Fujitsu SPARC64 V users who should notice and hopefully appreciate
+# the advantage... Currently this module surpasses sparcv9a-mont.pl
+# by ~20% on UltraSPARC-III and later cores, but recall that sparcv9a
+# module still have hidden potential [see TODO list there], which is
+# estimated to be larger than 20%...
+
+# int bn_mul_mont(
+$rp="%i0"; # BN_ULONG *rp,
+$ap="%i1"; # const BN_ULONG *ap,
+$bp="%i2"; # const BN_ULONG *bp,
+$np="%i3"; # const BN_ULONG *np,
+$n0="%i4"; # const BN_ULONG *n0,
+$num="%i5"; # int num);
+
+$bits=32;
+for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
+if ($bits==64) { $bias=2047; $frame=192; }
+else { $bias=0; $frame=128; }
+
+$car0="%o0";
+$car1="%o1";
+$car2="%o2"; # 1 bit
+$acc0="%o3";
+$acc1="%o4";
+$mask="%g1"; # 32 bits, what a waste...
+$tmp0="%g4";
+$tmp1="%g5";
+
+$i="%l0";
+$j="%l1";
+$mul0="%l2";
+$mul1="%l3";
+$tp="%l4";
+$apj="%l5";
+$npj="%l6";
+$tpj="%l7";
+
+$fname="bn_mul_mont_int";
+
+$code=<<___;
+.section ".text",#alloc,#execinstr
+
+.global $fname
+.align 32
+$fname:
+ cmp %o5,4 ! 128 bits minimum
+ bge,pt %icc,.Lenter
+ sethi %hi(0xffffffff),$mask
+ retl
+ clr %o0
+.align 32
+.Lenter:
+ save %sp,-$frame,%sp
+ sll $num,2,$num ! num*=4
+ or $mask,%lo(0xffffffff),$mask
+ ld [$n0],$n0
+ cmp $ap,$bp
+ and $num,$mask,$num
+ ld [$bp],$mul0 ! bp[0]
+ nop
+
+ add %sp,$bias,%o7 ! real top of stack
+ ld [$ap],$car0 ! ap[0] ! redundant in squaring context
+ sub %o7,$num,%o7
+ ld [$ap+4],$apj ! ap[1]
+ and %o7,-1024,%o7
+ ld [$np],$car1 ! np[0]
+ sub %o7,$bias,%sp ! alloca
+ ld [$np+4],$npj ! np[1]
+ be,pt `$bits==32?"%icc":"%xcc"`,.Lbn_sqr_mont
+ mov 12,$j
+
+ mulx $car0,$mul0,$car0 ! ap[0]*bp[0]
+ mulx $apj,$mul0,$tmp0 !prologue! ap[1]*bp[0]
+ and $car0,$mask,$acc0
+ add %sp,$bias+$frame,$tp
+ ld [$ap+8],$apj !prologue!
+
+ mulx $n0,$acc0,$mul1 ! "t[0]"*n0
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0
+ mulx $npj,$mul1,$acc1 !prologue! np[1]*"t[0]"*n0
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ ld [$np+8],$npj !prologue!
+ srlx $car1,32,$car1
+ mov $tmp0,$acc0 !prologue!
+
+.L1st:
+ mulx $apj,$mul0,$tmp0
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ add $j,4,$j ! j++
+ mov $tmp0,$acc0
+ st $car1,[$tp]
+ cmp $j,$num
+ mov $tmp1,$acc1
+ srlx $car1,32,$car1
+ bl %icc,.L1st
+ add $tp,4,$tp ! tp++
+!.L1st
+
+ mulx $apj,$mul0,$tmp0 !epilogue!
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0
+ and $car0,$mask,$acc0
+ add $acc1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $tmp0,$car0,$car0
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car1
+
+ add $car0,$car1,$car1
+ st $car1,[$tp+8]
+ srlx $car1,32,$car2
+
+ mov 4,$i ! i++
+ ld [$bp+4],$mul0 ! bp[1]
+.Louter:
+ add %sp,$bias+$frame,$tp
+ ld [$ap],$car0 ! ap[0]
+ ld [$ap+4],$apj ! ap[1]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ ld [$tp],$tmp1 ! tp[0]
+ ld [$tp+4],$tpj ! tp[1]
+ mov 12,$j
+
+ mulx $car0,$mul0,$car0
+ mulx $apj,$mul0,$tmp0 !prologue!
+ add $tmp1,$car0,$car0
+ ld [$ap+8],$apj !prologue!
+ and $car0,$mask,$acc0
+
+ mulx $n0,$acc0,$mul1
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1
+ mulx $npj,$mul1,$acc1 !prologue!
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ ld [$np+8],$npj !prologue!
+ srlx $car1,32,$car1
+ mov $tmp0,$acc0 !prologue!
+
+.Linner:
+ mulx $apj,$mul0,$tmp0
+ mulx $npj,$mul1,$tmp1
+ add $tpj,$car0,$car0
+ ld [$ap+$j],$apj ! ap[j]
+ add $acc0,$car0,$car0
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj ! np[j]
+ and $car0,$mask,$acc0
+ ld [$tp+8],$tpj ! tp[j]
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ add $j,4,$j ! j++
+ mov $tmp0,$acc0
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+ mov $tmp1,$acc1
+ cmp $j,$num
+ bl %icc,.Linner
+ add $tp,4,$tp ! tp++
+!.Linner
+
+ mulx $apj,$mul0,$tmp0 !epilogue!
+ mulx $npj,$mul1,$tmp1
+ add $tpj,$car0,$car0
+ add $acc0,$car0,$car0
+ ld [$tp+8],$tpj ! tp[j]
+ and $car0,$mask,$acc0
+ add $acc1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+
+ add $tpj,$car0,$car0
+ add $tmp0,$car0,$car0
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ add $acc0,$car1,$car1
+ st $car1,[$tp+4] ! tp[j-1]
+ srlx $car0,32,$car0
+ add $i,4,$i ! i++
+ srlx $car1,32,$car1
+
+ add $car0,$car1,$car1
+ cmp $i,$num
+ add $car2,$car1,$car1
+ st $car1,[$tp+8]
+
+ srlx $car1,32,$car2
+ bl,a %icc,.Louter
+ ld [$bp+$i],$mul0 ! bp[i]
+!.Louter
+
+ add $tp,12,$tp
+
+.Ltail:
+ add $np,$num,$np
+ add $rp,$num,$rp
+ mov $tp,$ap
+ sub %g0,$num,%o7 ! k=-num
+ ba .Lsub
+ subcc %g0,%g0,%g0 ! clear %icc.c
+.align 16
+.Lsub:
+ ld [$tp+%o7],%o0
+ ld [$np+%o7],%o1
+ subccc %o0,%o1,%o1 ! tp[j]-np[j]
+ add $rp,%o7,$i
+ add %o7,4,%o7
+ brnz %o7,.Lsub
+ st %o1,[$i]
+ subc $car2,0,$car2 ! handle upmost overflow bit
+ and $tp,$car2,$ap
+ andn $rp,$car2,$np
+ or $ap,$np,$ap
+ sub %g0,$num,%o7
+
+.Lcopy:
+ ld [$ap+%o7],%o0 ! copy or in-place refresh
+ st %g0,[$tp+%o7] ! zap tp
+ st %o0,[$rp+%o7]
+ add %o7,4,%o7
+ brnz %o7,.Lcopy
+ nop
+ mov 1,%i0
+ ret
+ restore
+___
+
+########
+######## .Lbn_sqr_mont gives up to 20% *overall* improvement over
+######## code without following dedicated squaring procedure.
+########
+$sbit="%i2"; # re-use $bp!
+
+$code.=<<___;
+.align 32
+.Lbn_sqr_mont:
+ mulx $mul0,$mul0,$car0 ! ap[0]*ap[0]
+ mulx $apj,$mul0,$tmp0 !prologue!
+ and $car0,$mask,$acc0
+ add %sp,$bias+$frame,$tp
+ ld [$ap+8],$apj !prologue!
+
+ mulx $n0,$acc0,$mul1 ! "t[0]"*n0
+ srlx $car0,32,$car0
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0
+ mulx $npj,$mul1,$acc1 !prologue!
+ and $car0,1,$sbit
+ ld [$np+8],$npj !prologue!
+ srlx $car0,1,$car0
+ add $acc0,$car1,$car1
+ srlx $car1,32,$car1
+ mov $tmp0,$acc0 !prologue!
+
+.Lsqr_1st:
+ mulx $apj,$mul0,$tmp0
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0 ! ap[j]*a0+c0
+ add $acc1,$car1,$car1
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ mov $tmp1,$acc1
+ srlx $acc0,32,$sbit
+ add $j,4,$j ! j++
+ and $acc0,$mask,$acc0
+ cmp $j,$num
+ add $acc0,$car1,$car1
+ st $car1,[$tp]
+ mov $tmp0,$acc0
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_1st
+ add $tp,4,$tp ! tp++
+!.Lsqr_1st
+
+ mulx $apj,$mul0,$tmp0 ! epilogue
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0 ! ap[j]*a0+c0
+ add $acc1,$car1,$car1
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $tmp0,$car0,$car0 ! ap[j]*a0+c0
+ add $tmp1,$car1,$car1
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0
+ or $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ st $car1,[$tp+8]
+ srlx $car1,32,$car2
+
+ ld [%sp+$bias+$frame],$tmp0 ! tp[0]
+ ld [%sp+$bias+$frame+4],$tmp1 ! tp[1]
+ ld [%sp+$bias+$frame+8],$tpj ! tp[2]
+ ld [$ap+4],$mul0 ! ap[1]
+ ld [$ap+8],$apj ! ap[2]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ mulx $n0,$tmp0,$mul1
+
+ mulx $mul0,$mul0,$car0
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1
+ mulx $npj,$mul1,$acc1
+ add $tmp0,$car1,$car1
+ and $car0,$mask,$acc0
+ ld [$np+8],$npj ! np[2]
+ srlx $car1,32,$car1
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ and $car0,1,$sbit
+ add $acc1,$car1,$car1
+ srlx $car0,1,$car0
+ mov 12,$j
+ st $car1,[%sp+$bias+$frame] ! tp[0]=
+ srlx $car1,32,$car1
+ add %sp,$bias+$frame+4,$tp
+
+.Lsqr_2nd:
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $acc0,$car0,$car0
+ add $tpj,$car1,$car1
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc1,$car1,$car1
+ ld [$tp+8],$tpj ! tp[j]
+ add $acc0,$acc0,$acc0
+ add $j,4,$j ! j++
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ cmp $j,$num
+ add $acc0,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_2nd
+ add $tp,4,$tp ! tp++
+!.Lsqr_2nd
+
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $acc0,$car0,$car0
+ add $tpj,$car1,$car1
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc1,$car1,$car1
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0
+ or $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ add $car2,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car2
+
+ ld [%sp+$bias+$frame],$tmp1 ! tp[0]
+ ld [%sp+$bias+$frame+4],$tpj ! tp[1]
+ ld [$ap+8],$mul0 ! ap[2]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ mulx $n0,$tmp1,$mul1
+ and $mul1,$mask,$mul1
+ mov 8,$i
+
+ mulx $mul0,$mul0,$car0
+ mulx $car1,$mul1,$car1
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add %sp,$bias+$frame,$tp
+ srlx $car1,32,$car1
+ and $car0,1,$sbit
+ srlx $car0,1,$car0
+ mov 4,$j
+
+.Lsqr_outer:
+.Lsqr_inner1:
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $j,4,$j
+ ld [$tp+8],$tpj
+ cmp $j,$i
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_inner1
+ add $tp,4,$tp
+!.Lsqr_inner1
+
+ add $j,4,$j
+ ld [$ap+$j],$apj ! ap[j]
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ ld [$np+$j],$npj ! np[j]
+ add $acc0,$car1,$car1
+ ld [$tp+8],$tpj ! tp[j]
+ add $acc1,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $j,4,$j
+ cmp $j,$num
+ be,pn %icc,.Lsqr_no_inner2
+ add $tp,4,$tp
+
+.Lsqr_inner2:
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $acc0,$car0,$car0
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ ld [$tp+8],$tpj ! tp[j]
+ or $sbit,$acc0,$acc0
+ add $j,4,$j ! j++
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ cmp $j,$num
+ add $acc0,$car1,$car1
+ add $acc1,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_inner2
+ add $tp,4,$tp ! tp++
+
+.Lsqr_no_inner2:
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $acc0,$car0,$car0
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ add $acc1,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0
+ or $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ add $car2,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car2
+
+ add $i,4,$i ! i++
+ ld [%sp+$bias+$frame],$tmp1 ! tp[0]
+ ld [%sp+$bias+$frame+4],$tpj ! tp[1]
+ ld [$ap+$i],$mul0 ! ap[j]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ mulx $n0,$tmp1,$mul1
+ and $mul1,$mask,$mul1
+ add $i,4,$tmp0
+
+ mulx $mul0,$mul0,$car0
+ mulx $car1,$mul1,$car1
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add %sp,$bias+$frame,$tp
+ srlx $car1,32,$car1
+ and $car0,1,$sbit
+ srlx $car0,1,$car0
+
+ cmp $tmp0,$num ! i<num-1
+ bl %icc,.Lsqr_outer
+ mov 4,$j
+
+.Lsqr_last:
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $j,4,$j
+ ld [$tp+8],$tpj
+ cmp $j,$i
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_last
+ add $tp,4,$tp
+!.Lsqr_last
+
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $acc0,$car1,$car1
+ add $acc1,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0 ! recover $car0
+ or $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ add $car2,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car2
+
+ ba .Ltail
+ add $tp,8,$tp
+.type $fname,#function
+.size $fname,(.-$fname)
+.asciz "Montgomery Multipltication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
+.align 32
+___
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv9a-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv9a-mont.pl
new file mode 100755
index 00000000..a14205f2
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/sparcv9a-mont.pl
@@ -0,0 +1,882 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# October 2005
+#
+# "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU?
+# Because unlike integer multiplier, which simply stalls whole CPU,
+# FPU is fully pipelined and can effectively emit 48 bit partial
+# product every cycle. Why not blended SPARC v9? One can argue that
+# making this module dependent on UltraSPARC VIS extension limits its
+# binary compatibility. Well yes, it does exclude SPARC64 prior-V(!)
+# implementations from compatibility matrix. But the rest, whole Sun
+# UltraSPARC family and brand new Fujitsu's SPARC64 V, all support
+# VIS extension instructions used in this module. This is considered
+# good enough to not care about HAL SPARC64 users [if any] who have
+# integer-only pure SPARCv9 module to "fall down" to.
+
+# USI&II cores currently exhibit uniform 2x improvement [over pre-
+# bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
+# performance improves few percents for shorter keys and worsens few
+# percents for longer keys. This is because USIII integer multiplier
+# is >3x faster than USI&II one, which is harder to match [but see
+# TODO list below]. It should also be noted that SPARC64 V features
+# out-of-order execution, which *might* mean that integer multiplier
+# is pipelined, which in turn *might* be impossible to match... On
+# additional note, SPARC64 V implements FP Multiply-Add instruction,
+# which is perfectly usable in this context... In other words, as far
+# as Fujitsu SPARC64 V goes, talk to the author:-)
+
+# The implementation implies following "non-natural" limitations on
+# input arguments:
+# - num may not be less than 4;
+# - num has to be even;
+# Failure to meet either condition has no fatal effects, simply
+# doesn't give any performance gain.
+
+# TODO:
+# - modulo-schedule inner loop for better performance (on in-order
+# execution core such as UltraSPARC this shall result in further
+# noticeable(!) improvement);
+# - dedicated squaring procedure[?];
+
+######################################################################
+# November 2006
+#
+# Modulo-scheduled inner loops allow to interleave floating point and
+# integer instructions and minimize Read-After-Write penalties. This
+# results in *further* 20-50% perfromance improvement [depending on
+# key length, more for longer keys] on USI&II cores and 30-80% - on
+# USIII&IV.
+
+$fname="bn_mul_mont_fpu";
+$bits=32;
+for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
+
+if ($bits==64) {
+ $bias=2047;
+ $frame=192;
+} else {
+ $bias=0;
+ $frame=128; # 96 rounded up to largest known cache-line
+}
+$locals=64;
+
+# In order to provide for 32-/64-bit ABI duality, I keep integers wider
+# than 32 bit in %g1-%g4 and %o0-%o5. %l0-%l7 and %i0-%i5 are used
+# exclusively for pointers, indexes and other small values...
+# int bn_mul_mont(
+$rp="%i0"; # BN_ULONG *rp,
+$ap="%i1"; # const BN_ULONG *ap,
+$bp="%i2"; # const BN_ULONG *bp,
+$np="%i3"; # const BN_ULONG *np,
+$n0="%i4"; # const BN_ULONG *n0,
+$num="%i5"; # int num);
+
+$tp="%l0"; # t[num]
+$ap_l="%l1"; # a[num],n[num] are smashed to 32-bit words and saved
+$ap_h="%l2"; # to these four vectors as double-precision FP values.
+$np_l="%l3"; # This way a bunch of fxtods are eliminated in second
+$np_h="%l4"; # loop and L1-cache aliasing is minimized...
+$i="%l5";
+$j="%l6";
+$mask="%l7"; # 16-bit mask, 0xffff
+
+$n0="%g4"; # reassigned(!) to "64-bit" register
+$carry="%i4"; # %i4 reused(!) for a carry bit
+
+# FP register naming chart
+#
+# ..HILO
+# dcba
+# --------
+# LOa
+# LOb
+# LOc
+# LOd
+# HIa
+# HIb
+# HIc
+# HId
+# ..a
+# ..b
+$ba="%f0"; $bb="%f2"; $bc="%f4"; $bd="%f6";
+$na="%f8"; $nb="%f10"; $nc="%f12"; $nd="%f14";
+$alo="%f16"; $alo_="%f17"; $ahi="%f18"; $ahi_="%f19";
+$nlo="%f20"; $nlo_="%f21"; $nhi="%f22"; $nhi_="%f23";
+
+$dota="%f24"; $dotb="%f26";
+
+$aloa="%f32"; $alob="%f34"; $aloc="%f36"; $alod="%f38";
+$ahia="%f40"; $ahib="%f42"; $ahic="%f44"; $ahid="%f46";
+$nloa="%f48"; $nlob="%f50"; $nloc="%f52"; $nlod="%f54";
+$nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62";
+
+$ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load
+
+$code=<<___;
+.section ".text",#alloc,#execinstr
+
+.global $fname
+.align 32
+$fname:
+ save %sp,-$frame-$locals,%sp
+
+ cmp $num,4
+ bl,a,pn %icc,.Lret
+ clr %i0
+ andcc $num,1,%g0 ! $num has to be even...
+ bnz,a,pn %icc,.Lret
+ clr %i0 ! signal "unsupported input value"
+
+ srl $num,1,$num
+ sethi %hi(0xffff),$mask
+ ld [%i4+0],$n0 ! $n0 reassigned, remember?
+ or $mask,%lo(0xffff),$mask
+ ld [%i4+4],%o0
+ sllx %o0,32,%o0
+ or %o0,$n0,$n0 ! $n0=n0[1].n0[0]
+
+ sll $num,3,$num ! num*=8
+
+ add %sp,$bias,%o0 ! real top of stack
+ sll $num,2,%o1
+ add %o1,$num,%o1 ! %o1=num*5
+ sub %o0,%o1,%o0
+ and %o0,-2048,%o0 ! optimize TLB utilization
+ sub %o0,$bias,%sp ! alloca(5*num*8)
+
+ rd %asi,%o7 ! save %asi
+ add %sp,$bias+$frame+$locals,$tp
+ add $tp,$num,$ap_l
+ add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vectors' ends !
+ add $ap_l,$num,$ap_h
+ add $ap_h,$num,$np_l
+ add $np_l,$num,$np_h
+
+ wr %g0,$ASI_FL16_P,%asi ! setup %asi for 16-bit FP loads
+
+ add $rp,$num,$rp ! readjust input pointers to point
+ add $ap,$num,$ap ! at the ends too...
+ add $bp,$num,$bp
+ add $np,$num,$np
+
+ stx %o7,[%sp+$bias+$frame+48] ! save %asi
+
+ sub %g0,$num,$i ! i=-num
+ sub %g0,$num,$j ! j=-num
+
+ add $ap,$j,%o3
+ add $bp,$i,%o4
+
+ ld [%o3+4],%g1 ! bp[0]
+ ld [%o3+0],%o0
+ ld [%o4+4],%g5 ! ap[0]
+ sllx %g1,32,%g1
+ ld [%o4+0],%o1
+ sllx %g5,32,%g5
+ or %g1,%o0,%o0
+ or %g5,%o1,%o1
+
+ add $np,$j,%o5
+
+ mulx %o1,%o0,%o0 ! ap[0]*bp[0]
+ mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0
+ stx %o0,[%sp+$bias+$frame+0]
+
+ ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o3+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
+
+ ! transfer b[i] to FPU as 4x16-bit values
+ ldda [%o4+2]%asi,$ba
+ fxtod $alo,$alo
+ ldda [%o4+0]%asi,$bb
+ fxtod $ahi,$ahi
+ ldda [%o4+6]%asi,$bc
+ fxtod $nlo,$nlo
+ ldda [%o4+4]%asi,$bd
+ fxtod $nhi,$nhi
+
+ ! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
+ ldda [%sp+$bias+$frame+6]%asi,$na
+ fxtod $ba,$ba
+ ldda [%sp+$bias+$frame+4]%asi,$nb
+ fxtod $bb,$bb
+ ldda [%sp+$bias+$frame+2]%asi,$nc
+ fxtod $bc,$bc
+ ldda [%sp+$bias+$frame+0]%asi,$nd
+ fxtod $bd,$bd
+
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ fxtod $na,$na
+ std $ahi,[$ap_h+$j]
+ fxtod $nb,$nb
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ fxtod $nc,$nc
+ std $nhi,[$np_h+$j]
+ fxtod $nd,$nd
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ add $j,8,$j
+ std $nlob,[%sp+$bias+$frame+8]
+ add $ap,$j,%o4
+ std $nloc,[%sp+$bias+$frame+16]
+ add $np,$j,%o5
+ std $nlod,[%sp+$bias+$frame+24]
+
+ ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o4+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
+
+ fxtod $alo,$alo
+ fxtod $ahi,$ahi
+ fxtod $nlo,$nlo
+ fxtod $nhi,$nhi
+
+ ldx [%sp+$bias+$frame+0],%o0
+ fmuld $alo,$ba,$aloa
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $nlo,$na,$nloa
+ ldx [%sp+$bias+$frame+16],%o2
+ fmuld $alo,$bb,$alob
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $nlo,$nb,$nlob
+
+ srlx %o0,16,%o7
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ fmuld $alo,$bc,$aloc
+ add %o7,%o1,%o1
+ std $ahi,[$ap_h+$j]
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ srlx %o1,16,%o7
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ fmuld $alo,$bd,$alod
+ add %o7,%o2,%o2
+ std $nhi,[$np_h+$j]
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ srlx %o2,16,%o7
+ fmuld $ahi,$ba,$ahia
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ !and %o0,$mask,%o0
+ !and %o1,$mask,%o1
+ !and %o2,$mask,%o2
+ !sllx %o1,16,%o1
+ !sllx %o2,32,%o2
+ !sllx %o3,48,%o7
+ !or %o1,%o0,%o0
+ !or %o2,%o0,%o0
+ !or %o7,%o0,%o0 ! 64-bit result
+ srlx %o3,16,%g1 ! 34-bit carry
+ fmuld $ahi,$bb,$ahib
+
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $dota,$nloa,$nloa
+ faddd $dotb,$nlob,$nlob
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ addcc $j,8,$j
+ std $nloc,[%sp+$bias+$frame+16]
+ bz,pn %icc,.L1stskip
+ std $nlod,[%sp+$bias+$frame+24]
+
+.align 32 ! incidentally already aligned !
+.L1st:
+ add $ap,$j,%o4
+ add $np,$j,%o5
+ ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o4+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
+
+ fxtod $alo,$alo
+ fxtod $ahi,$ahi
+ fxtod $nlo,$nlo
+ fxtod $nhi,$nhi
+
+ ldx [%sp+$bias+$frame+0],%o0
+ fmuld $alo,$ba,$aloa
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $nlo,$na,$nloa
+ ldx [%sp+$bias+$frame+16],%o2
+ fmuld $alo,$bb,$alob
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $nlo,$nb,$nlob
+
+ srlx %o0,16,%o7
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ fmuld $alo,$bc,$aloc
+ add %o7,%o1,%o1
+ std $ahi,[$ap_h+$j]
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ srlx %o1,16,%o7
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ fmuld $alo,$bd,$alod
+ add %o7,%o2,%o2
+ std $nhi,[$np_h+$j]
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ srlx %o2,16,%o7
+ fmuld $ahi,$ba,$ahia
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ fmuld $ahi,$bb,$ahib
+ sllx %o1,16,%o1
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ sllx %o2,32,%o2
+ fmuld $ahi,$bc,$ahic
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ or %o2,%o0,%o0
+ fmuld $ahi,$bd,$ahid
+ or %o7,%o0,%o0 ! 64-bit result
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+ addcc %g1,%o0,%o0
+ faddd $dota,$nloa,$nloa
+ srlx %o3,16,%g1 ! 34-bit carry
+ faddd $dotb,$nlob,$nlob
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]=
+
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ std $nloc,[%sp+$bias+$frame+16]
+ std $nlod,[%sp+$bias+$frame+24]
+
+ addcc $j,8,$j
+ bnz,pt %icc,.L1st
+ add $tp,8,$tp
+
+.L1stskip:
+ fdtox $dota,$dota
+ fdtox $dotb,$dotb
+
+ ldx [%sp+$bias+$frame+0],%o0
+ ldx [%sp+$bias+$frame+8],%o1
+ ldx [%sp+$bias+$frame+16],%o2
+ ldx [%sp+$bias+$frame+24],%o3
+
+ srlx %o0,16,%o7
+ std $dota,[%sp+$bias+$frame+32]
+ add %o7,%o1,%o1
+ std $dotb,[%sp+$bias+$frame+40]
+ srlx %o1,16,%o7
+ add %o7,%o2,%o2
+ srlx %o2,16,%o7
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ sllx %o1,16,%o1
+ sllx %o2,32,%o2
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ or %o2,%o0,%o0
+ or %o7,%o0,%o0 ! 64-bit result
+ ldx [%sp+$bias+$frame+32],%o4
+ addcc %g1,%o0,%o0
+ ldx [%sp+$bias+$frame+40],%o5
+ srlx %o3,16,%g1 ! 34-bit carry
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]=
+ add $tp,8,$tp
+
+ srlx %o4,16,%o7
+ add %o7,%o5,%o5
+ and %o4,$mask,%o4
+ sllx %o5,16,%o7
+ or %o7,%o4,%o4
+ addcc %g1,%o4,%o4
+ srlx %o5,48,%g1
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ mov %g1,$carry
+ stx %o4,[$tp] ! tp[num-1]=
+
+ ba .Louter
+ add $i,8,$i
+.align 32
+.Louter:
+ sub %g0,$num,$j ! j=-num
+ add %sp,$bias+$frame+$locals,$tp
+
+ add $ap,$j,%o3
+ add $bp,$i,%o4
+
+ ld [%o3+4],%g1 ! bp[i]
+ ld [%o3+0],%o0
+ ld [%o4+4],%g5 ! ap[0]
+ sllx %g1,32,%g1
+ ld [%o4+0],%o1
+ sllx %g5,32,%g5
+ or %g1,%o0,%o0
+ or %g5,%o1,%o1
+
+ ldx [$tp],%o2 ! tp[0]
+ mulx %o1,%o0,%o0
+ addcc %o2,%o0,%o0
+ mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0
+ stx %o0,[%sp+$bias+$frame+0]
+
+ ! transfer b[i] to FPU as 4x16-bit values
+ ldda [%o4+2]%asi,$ba
+ ldda [%o4+0]%asi,$bb
+ ldda [%o4+6]%asi,$bc
+ ldda [%o4+4]%asi,$bd
+
+ ! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
+ ldda [%sp+$bias+$frame+6]%asi,$na
+ fxtod $ba,$ba
+ ldda [%sp+$bias+$frame+4]%asi,$nb
+ fxtod $bb,$bb
+ ldda [%sp+$bias+$frame+2]%asi,$nc
+ fxtod $bc,$bc
+ ldda [%sp+$bias+$frame+0]%asi,$nd
+ fxtod $bd,$bd
+ ldd [$ap_l+$j],$alo ! load a[j] in double format
+ fxtod $na,$na
+ ldd [$ap_h+$j],$ahi
+ fxtod $nb,$nb
+ ldd [$np_l+$j],$nlo ! load n[j] in double format
+ fxtod $nc,$nc
+ ldd [$np_h+$j],$nhi
+ fxtod $nd,$nd
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ std $nloc,[%sp+$bias+$frame+16]
+ add $j,8,$j
+ std $nlod,[%sp+$bias+$frame+24]
+
+ ldd [$ap_l+$j],$alo ! load a[j] in double format
+ ldd [$ap_h+$j],$ahi
+ ldd [$np_l+$j],$nlo ! load n[j] in double format
+ ldd [$np_h+$j],$nhi
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ ldx [%sp+$bias+$frame+0],%o0
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $alo,$bd,$alod
+ ldx [%sp+$bias+$frame+16],%o2
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $ahi,$ba,$ahia
+
+ srlx %o0,16,%o7
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ add %o7,%o1,%o1
+ fmuld $ahi,$bb,$ahib
+ srlx %o1,16,%o7
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ add %o7,%o2,%o2
+ fmuld $ahi,$bc,$ahic
+ srlx %o2,16,%o7
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ ! why?
+ and %o0,$mask,%o0
+ fmuld $ahi,$bd,$ahid
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+ sllx %o1,16,%o1
+ faddd $dota,$nloa,$nloa
+ sllx %o2,32,%o2
+ faddd $dotb,$nlob,$nlob
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ faddd $ahic,$nhic,$dota ! $nhic
+ or %o2,%o0,%o0
+ faddd $ahid,$nhid,$dotb ! $nhid
+ or %o7,%o0,%o0 ! 64-bit result
+ ldx [$tp],%o7
+ faddd $nloc,$nhia,$nloc
+ addcc %o7,%o0,%o0
+ ! end-of-why?
+ faddd $nlod,$nhib,$nlod
+ srlx %o3,16,%g1 ! 34-bit carry
+ fdtox $nloa,$nloa
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ addcc $j,8,$j
+ std $nloc,[%sp+$bias+$frame+16]
+ bz,pn %icc,.Linnerskip
+ std $nlod,[%sp+$bias+$frame+24]
+
+ ba .Linner
+ nop
+.align 32
+.Linner:
+ ldd [$ap_l+$j],$alo ! load a[j] in double format
+ ldd [$ap_h+$j],$ahi
+ ldd [$np_l+$j],$nlo ! load n[j] in double format
+ ldd [$np_h+$j],$nhi
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ ldx [%sp+$bias+$frame+0],%o0
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $alo,$bd,$alod
+ ldx [%sp+$bias+$frame+16],%o2
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $ahi,$ba,$ahia
+
+ srlx %o0,16,%o7
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ add %o7,%o1,%o1
+ fmuld $ahi,$bb,$ahib
+ srlx %o1,16,%o7
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ add %o7,%o2,%o2
+ fmuld $ahi,$bc,$ahic
+ srlx %o2,16,%o7
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ fmuld $ahi,$bd,$ahid
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+ sllx %o1,16,%o1
+ faddd $dota,$nloa,$nloa
+ sllx %o2,32,%o2
+ faddd $dotb,$nlob,$nlob
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ faddd $ahic,$nhic,$dota ! $nhic
+ or %o2,%o0,%o0
+ faddd $ahid,$nhid,$dotb ! $nhid
+ or %o7,%o0,%o0 ! 64-bit result
+ faddd $nloc,$nhia,$nloc
+ addcc %g1,%o0,%o0
+ ldx [$tp+8],%o7 ! tp[j]
+ faddd $nlod,$nhib,$nlod
+ srlx %o3,16,%g1 ! 34-bit carry
+ fdtox $nloa,$nloa
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+ fdtox $nlob,$nlob
+ addcc %o7,%o0,%o0
+ fdtox $nloc,$nloc
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ std $nloc,[%sp+$bias+$frame+16]
+ addcc $j,8,$j
+ std $nlod,[%sp+$bias+$frame+24]
+ bnz,pt %icc,.Linner
+ add $tp,8,$tp
+
+.Linnerskip:
+ fdtox $dota,$dota
+ fdtox $dotb,$dotb
+
+ ldx [%sp+$bias+$frame+0],%o0
+ ldx [%sp+$bias+$frame+8],%o1
+ ldx [%sp+$bias+$frame+16],%o2
+ ldx [%sp+$bias+$frame+24],%o3
+
+ srlx %o0,16,%o7
+ std $dota,[%sp+$bias+$frame+32]
+ add %o7,%o1,%o1
+ std $dotb,[%sp+$bias+$frame+40]
+ srlx %o1,16,%o7
+ add %o7,%o2,%o2
+ srlx %o2,16,%o7
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ sllx %o1,16,%o1
+ sllx %o2,32,%o2
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ or %o2,%o0,%o0
+ ldx [%sp+$bias+$frame+32],%o4
+ or %o7,%o0,%o0 ! 64-bit result
+ ldx [%sp+$bias+$frame+40],%o5
+ addcc %g1,%o0,%o0
+ ldx [$tp+8],%o7 ! tp[j]
+ srlx %o3,16,%g1 ! 34-bit carry
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ addcc %o7,%o0,%o0
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]
+ add $tp,8,$tp
+
+ srlx %o4,16,%o7
+ add %o7,%o5,%o5
+ and %o4,$mask,%o4
+ sllx %o5,16,%o7
+ or %o7,%o4,%o4
+ addcc %g1,%o4,%o4
+ srlx %o5,48,%g1
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ addcc $carry,%o4,%o4
+ stx %o4,[$tp] ! tp[num-1]
+ mov %g1,$carry
+ bcs,a %xcc,.+8
+ add $carry,1,$carry
+
+ addcc $i,8,$i
+ bnz %icc,.Louter
+ nop
+
+ add $tp,8,$tp ! adjust tp to point at the end
+ orn %g0,%g0,%g4
+ sub %g0,$num,%o7 ! n=-num
+ ba .Lsub
+ subcc %g0,%g0,%g0 ! clear %icc.c
+
+.align 32
+.Lsub:
+ ldx [$tp+%o7],%o0
+ add $np,%o7,%g1
+ ld [%g1+0],%o2
+ ld [%g1+4],%o3
+ srlx %o0,32,%o1
+ subccc %o0,%o2,%o2
+ add $rp,%o7,%g1
+ subccc %o1,%o3,%o3
+ st %o2,[%g1+0]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lsub
+ st %o3,[%g1+4]
+ subc $carry,0,%g4
+ sub %g0,$num,%o7 ! n=-num
+ ba .Lcopy
+ nop
+
+.align 32
+.Lcopy:
+ ldx [$tp+%o7],%o0
+ add $rp,%o7,%g1
+ ld [%g1+0],%o2
+ ld [%g1+4],%o3
+ stx %g0,[$tp+%o7]
+ and %o0,%g4,%o0
+ srlx %o0,32,%o1
+ andn %o2,%g4,%o2
+ andn %o3,%g4,%o3
+ or %o2,%o0,%o0
+ or %o3,%o1,%o1
+ st %o0,[%g1+0]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lcopy
+ st %o1,[%g1+4]
+ sub %g0,$num,%o7 ! n=-num
+
+.Lzap:
+ stx %g0,[$ap_l+%o7]
+ stx %g0,[$ap_h+%o7]
+ stx %g0,[$np_l+%o7]
+ stx %g0,[$np_h+%o7]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lzap
+ nop
+
+ ldx [%sp+$bias+$frame+48],%o7
+ wr %g0,%o7,%asi ! restore %asi
+
+ mov 1,%i0
+.Lret:
+ ret
+ restore
+.type $fname,#function
+.size $fname,(.-$fname)
+.asciz "Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>"
+.align 32
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+# Below substitution makes it possible to compile without demanding
+# VIS extentions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I
+# dare to do this, because VIS capability is detected at run-time now
+# and this routine is not called on CPU not capable to execute it. Do
+# note that fzeros is not the only VIS dependency! Another dependency
+# is implicit and is just _a_ numerical value loaded to %asi register,
+# which assembler can't recognize as VIS specific...
+$code =~ s/fzeros\s+%f([0-9]+)/
+ sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1)
+ /gem;
+
+print $code;
+# flush
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/via-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/via-mont.pl
new file mode 100644
index 00000000..c046a514
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/via-mont.pl
@@ -0,0 +1,242 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# Wrapper around 'rep montmul', VIA-specific instruction accessing
+# PadLock Montgomery Multiplier. The wrapper is designed as drop-in
+# replacement for OpenSSL bn_mul_mont [first implemented in 0.9.9].
+#
+# Below are interleaved outputs from 'openssl speed rsa dsa' for 4
+# different software configurations on 1.5GHz VIA Esther processor.
+# Lines marked with "software integer" denote performance of hand-
+# coded integer-only assembler found in OpenSSL 0.9.7. "Software SSE2"
+# refers to hand-coded SSE2 Montgomery multiplication procedure found
+# OpenSSL 0.9.9. "Hardware VIA SDK" refers to padlock_pmm routine from
+# Padlock SDK 2.0.1 available for download from VIA, which naturally
+# utilizes the magic 'repz montmul' instruction. And finally "hardware
+# this" refers to *this* implementation which also uses 'repz montmul'
+#
+# sign verify sign/s verify/s
+# rsa 512 bits 0.001720s 0.000140s 581.4 7149.7 software integer
+# rsa 512 bits 0.000690s 0.000086s 1450.3 11606.0 software SSE2
+# rsa 512 bits 0.006136s 0.000201s 163.0 4974.5 hardware VIA SDK
+# rsa 512 bits 0.000712s 0.000050s 1404.9 19858.5 hardware this
+#
+# rsa 1024 bits 0.008518s 0.000413s 117.4 2420.8 software integer
+# rsa 1024 bits 0.004275s 0.000277s 233.9 3609.7 software SSE2
+# rsa 1024 bits 0.012136s 0.000260s 82.4 3844.5 hardware VIA SDK
+# rsa 1024 bits 0.002522s 0.000116s 396.5 8650.9 hardware this
+#
+# rsa 2048 bits 0.050101s 0.001371s 20.0 729.6 software integer
+# rsa 2048 bits 0.030273s 0.001008s 33.0 991.9 software SSE2
+# rsa 2048 bits 0.030833s 0.000976s 32.4 1025.1 hardware VIA SDK
+# rsa 2048 bits 0.011879s 0.000342s 84.2 2921.7 hardware this
+#
+# rsa 4096 bits 0.327097s 0.004859s 3.1 205.8 software integer
+# rsa 4096 bits 0.229318s 0.003859s 4.4 259.2 software SSE2
+# rsa 4096 bits 0.233953s 0.003274s 4.3 305.4 hardware VIA SDK
+# rsa 4096 bits 0.070493s 0.001166s 14.2 857.6 hardware this
+#
+# dsa 512 bits 0.001342s 0.001651s 745.2 605.7 software integer
+# dsa 512 bits 0.000844s 0.000987s 1185.3 1013.1 software SSE2
+# dsa 512 bits 0.001902s 0.002247s 525.6 444.9 hardware VIA SDK
+# dsa 512 bits 0.000458s 0.000524s 2182.2 1909.1 hardware this
+#
+# dsa 1024 bits 0.003964s 0.004926s 252.3 203.0 software integer
+# dsa 1024 bits 0.002686s 0.003166s 372.3 315.8 software SSE2
+# dsa 1024 bits 0.002397s 0.002823s 417.1 354.3 hardware VIA SDK
+# dsa 1024 bits 0.000978s 0.001170s 1022.2 855.0 hardware this
+#
+# dsa 2048 bits 0.013280s 0.016518s 75.3 60.5 software integer
+# dsa 2048 bits 0.009911s 0.011522s 100.9 86.8 software SSE2
+# dsa 2048 bits 0.009542s 0.011763s 104.8 85.0 hardware VIA SDK
+# dsa 2048 bits 0.002884s 0.003352s 346.8 298.3 hardware this
+#
+# To give you some other reference point here is output for 2.4GHz P4
+# running hand-coded SSE2 bn_mul_mont found in 0.9.9, i.e. "software
+# SSE2" in above terms.
+#
+# rsa 512 bits 0.000407s 0.000047s 2454.2 21137.0
+# rsa 1024 bits 0.002426s 0.000141s 412.1 7100.0
+# rsa 2048 bits 0.015046s 0.000491s 66.5 2034.9
+# rsa 4096 bits 0.109770s 0.002379s 9.1 420.3
+# dsa 512 bits 0.000438s 0.000525s 2281.1 1904.1
+# dsa 1024 bits 0.001346s 0.001595s 742.7 627.0
+# dsa 2048 bits 0.004745s 0.005582s 210.7 179.1
+#
+# Conclusions:
+# - VIA SDK leaves a *lot* of room for improvement (which this
+# implementation successfully fills:-);
+# - 'rep montmul' gives up to >3x performance improvement depending on
+# key length;
+# - in terms of absolute performance it delivers approximately as much
+# as modern out-of-order 32-bit cores [again, for longer keys].
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"via-mont.pl");
+
+# int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num);
+$func="bn_mul_mont_padlock";
+
+$pad=16*1; # amount of reserved bytes on top of every vector
+
+# stack layout
+$mZeroPrime=&DWP(0,"esp"); # these are specified by VIA
+$A=&DWP(4,"esp");
+$B=&DWP(8,"esp");
+$T=&DWP(12,"esp");
+$M=&DWP(16,"esp");
+$scratch=&DWP(20,"esp");
+$rp=&DWP(24,"esp"); # these are mine
+$sp=&DWP(28,"esp");
+# &DWP(32,"esp") # 32 byte scratch area
+# &DWP(64+(4*$num+$pad)*0,"esp") # padded tp[num]
+# &DWP(64+(4*$num+$pad)*1,"esp") # padded copy of ap[num]
+# &DWP(64+(4*$num+$pad)*2,"esp") # padded copy of bp[num]
+# &DWP(64+(4*$num+$pad)*3,"esp") # padded copy of np[num]
+# Note that SDK suggests to unconditionally allocate 2K per vector. This
+# has quite an impact on performance. It naturally depends on key length,
+# but to give an example 1024 bit private RSA key operations suffer >30%
+# penalty. I allocate only as much as actually required...
+
+&function_begin($func);
+ &xor ("eax","eax");
+ &mov ("ecx",&wparam(5)); # num
+ # meet VIA's limitations for num [note that the specification
+ # expresses them in bits, while we work with amount of 32-bit words]
+ &test ("ecx",3);
+ &jnz (&label("leave")); # num % 4 != 0
+ &cmp ("ecx",8);
+ &jb (&label("leave")); # num < 8
+ &cmp ("ecx",1024);
+ &ja (&label("leave")); # num > 1024
+
+ &pushf ();
+ &cld ();
+
+ &mov ("edi",&wparam(0)); # rp
+ &mov ("eax",&wparam(1)); # ap
+ &mov ("ebx",&wparam(2)); # bp
+ &mov ("edx",&wparam(3)); # np
+ &mov ("esi",&wparam(4)); # n0
+ &mov ("esi",&DWP(0,"esi")); # *n0
+
+ &lea ("ecx",&DWP($pad,"","ecx",4)); # ecx becomes vector size in bytes
+ &lea ("ebp",&DWP(64,"","ecx",4)); # allocate 4 vectors + 64 bytes
+ &neg ("ebp");
+ &add ("ebp","esp");
+ &and ("ebp",-64); # align to cache-line
+ &xchg ("ebp","esp"); # alloca
+
+ &mov ($rp,"edi"); # save rp
+ &mov ($sp,"ebp"); # save esp
+
+ &mov ($mZeroPrime,"esi");
+ &lea ("esi",&DWP(64,"esp")); # tp
+ &mov ($T,"esi");
+ &lea ("edi",&DWP(32,"esp")); # scratch area
+ &mov ($scratch,"edi");
+ &mov ("esi","eax");
+
+ &lea ("ebp",&DWP(-$pad,"ecx"));
+ &shr ("ebp",2); # restore original num value in ebp
+
+ &xor ("eax","eax");
+
+ &mov ("ecx","ebp");
+ &lea ("ecx",&DWP((32+$pad)/4,"ecx"));# padded tp + scratch
+ &data_byte(0xf3,0xab); # rep stosl, bzero
+
+ &mov ("ecx","ebp");
+ &lea ("edi",&DWP(64+$pad,"esp","ecx",4));# pointer to ap copy
+ &mov ($A,"edi");
+ &data_byte(0xf3,0xa5); # rep movsl, memcpy
+ &mov ("ecx",$pad/4);
+ &data_byte(0xf3,0xab); # rep stosl, bzero pad
+ # edi points at the end of padded ap copy...
+
+ &mov ("ecx","ebp");
+ &mov ("esi","ebx");
+ &mov ($B,"edi");
+ &data_byte(0xf3,0xa5); # rep movsl, memcpy
+ &mov ("ecx",$pad/4);
+ &data_byte(0xf3,0xab); # rep stosl, bzero pad
+ # edi points at the end of padded bp copy...
+
+ &mov ("ecx","ebp");
+ &mov ("esi","edx");
+ &mov ($M,"edi");
+ &data_byte(0xf3,0xa5); # rep movsl, memcpy
+ &mov ("ecx",$pad/4);
+ &data_byte(0xf3,0xab); # rep stosl, bzero pad
+ # edi points at the end of padded np copy...
+
+ # let magic happen...
+ &mov ("ecx","ebp");
+ &mov ("esi","esp");
+ &shl ("ecx",5); # convert word counter to bit counter
+ &align (4);
+ &data_byte(0xf3,0x0f,0xa6,0xc0);# rep montmul
+
+ &mov ("ecx","ebp");
+ &lea ("esi",&DWP(64,"esp")); # tp
+ # edi still points at the end of padded np copy...
+ &neg ("ebp");
+ &lea ("ebp",&DWP(-$pad,"edi","ebp",4)); # so just "rewind"
+ &mov ("edi",$rp); # restore rp
+ &xor ("edx","edx"); # i=0 and clear CF
+
+&set_label("sub",8);
+ &mov ("eax",&DWP(0,"esi","edx",4));
+ &sbb ("eax",&DWP(0,"ebp","edx",4));
+ &mov (&DWP(0,"edi","edx",4),"eax"); # rp[i]=tp[i]-np[i]
+ &lea ("edx",&DWP(1,"edx")); # i++
+ &loop (&label("sub")); # doesn't affect CF!
+
+ &mov ("eax",&DWP(0,"esi","edx",4)); # upmost overflow bit
+ &sbb ("eax",0);
+ &and ("esi","eax");
+ &not ("eax");
+ &mov ("ebp","edi");
+ &and ("ebp","eax");
+ &or ("esi","ebp"); # tp=carry?tp:rp
+
+ &mov ("ecx","edx"); # num
+ &xor ("edx","edx"); # i=0
+
+&set_label("copy",8);
+ &mov ("eax",&DWP(0,"esi","edx",4));
+ &mov (&DWP(64,"esp","edx",4),"ecx"); # zap tp
+ &mov (&DWP(0,"edi","edx",4),"eax");
+ &lea ("edx",&DWP(1,"edx")); # i++
+ &loop (&label("copy"));
+
+ &mov ("ebp",$sp);
+ &xor ("eax","eax");
+
+ &mov ("ecx",64/4);
+ &mov ("edi","esp"); # zap frame including scratch area
+ &data_byte(0xf3,0xab); # rep stosl, bzero
+
+ # zap copies of ap, bp and np
+ &lea ("edi",&DWP(64+$pad,"esp","edx",4));# pointer to ap
+ &lea ("ecx",&DWP(3*$pad/4,"edx","edx",2));
+ &data_byte(0xf3,0xab); # rep stosl, bzero
+
+ &mov ("esp","ebp");
+ &inc ("eax"); # signal "done"
+ &popf ();
+&set_label("leave");
+&function_end($func);
+
+&asciz("Padlock Montgomery Multiplication, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-gf2m.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-gf2m.S
new file mode 100644
index 00000000..9ed29ae0
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-gf2m.S
@@ -0,0 +1,347 @@
+.file "crypto/bn/asm/x86-gf2m.s"
+.text
+.type _mul_1x1_mmx,@function
+.align 16
+_mul_1x1_mmx:
+ subl $36,%esp
+ movl %eax,%ecx
+ leal (%eax,%eax,1),%edx
+ andl $1073741823,%ecx
+ leal (%edx,%edx,1),%ebp
+ movl $0,(%esp)
+ andl $2147483647,%edx
+ movd %eax,%mm2
+ movd %ebx,%mm3
+ movl %ecx,4(%esp)
+ xorl %edx,%ecx
+ pxor %mm5,%mm5
+ pxor %mm4,%mm4
+ movl %edx,8(%esp)
+ xorl %ebp,%edx
+ movl %ecx,12(%esp)
+ pcmpgtd %mm2,%mm5
+ paddd %mm2,%mm2
+ xorl %edx,%ecx
+ movl %ebp,16(%esp)
+ xorl %edx,%ebp
+ pand %mm3,%mm5
+ pcmpgtd %mm2,%mm4
+ movl %ecx,20(%esp)
+ xorl %ecx,%ebp
+ psllq $31,%mm5
+ pand %mm3,%mm4
+ movl %edx,24(%esp)
+ movl $7,%esi
+ movl %ebp,28(%esp)
+ movl %esi,%ebp
+ andl %ebx,%esi
+ shrl $3,%ebx
+ movl %ebp,%edi
+ psllq $30,%mm4
+ andl %ebx,%edi
+ shrl $3,%ebx
+ movd (%esp,%esi,4),%mm0
+ movl %ebp,%esi
+ andl %ebx,%esi
+ shrl $3,%ebx
+ movd (%esp,%edi,4),%mm2
+ movl %ebp,%edi
+ psllq $3,%mm2
+ andl %ebx,%edi
+ shrl $3,%ebx
+ pxor %mm2,%mm0
+ movd (%esp,%esi,4),%mm1
+ movl %ebp,%esi
+ psllq $6,%mm1
+ andl %ebx,%esi
+ shrl $3,%ebx
+ pxor %mm1,%mm0
+ movd (%esp,%edi,4),%mm2
+ movl %ebp,%edi
+ psllq $9,%mm2
+ andl %ebx,%edi
+ shrl $3,%ebx
+ pxor %mm2,%mm0
+ movd (%esp,%esi,4),%mm1
+ movl %ebp,%esi
+ psllq $12,%mm1
+ andl %ebx,%esi
+ shrl $3,%ebx
+ pxor %mm1,%mm0
+ movd (%esp,%edi,4),%mm2
+ movl %ebp,%edi
+ psllq $15,%mm2
+ andl %ebx,%edi
+ shrl $3,%ebx
+ pxor %mm2,%mm0
+ movd (%esp,%esi,4),%mm1
+ movl %ebp,%esi
+ psllq $18,%mm1
+ andl %ebx,%esi
+ shrl $3,%ebx
+ pxor %mm1,%mm0
+ movd (%esp,%edi,4),%mm2
+ movl %ebp,%edi
+ psllq $21,%mm2
+ andl %ebx,%edi
+ shrl $3,%ebx
+ pxor %mm2,%mm0
+ movd (%esp,%esi,4),%mm1
+ movl %ebp,%esi
+ psllq $24,%mm1
+ andl %ebx,%esi
+ shrl $3,%ebx
+ pxor %mm1,%mm0
+ movd (%esp,%edi,4),%mm2
+ pxor %mm4,%mm0
+ psllq $27,%mm2
+ pxor %mm2,%mm0
+ movd (%esp,%esi,4),%mm1
+ pxor %mm5,%mm0
+ psllq $30,%mm1
+ addl $36,%esp
+ pxor %mm1,%mm0
+ ret
+.size _mul_1x1_mmx,.-_mul_1x1_mmx
+.type _mul_1x1_ialu,@function
+.align 16
+_mul_1x1_ialu:
+ subl $36,%esp
+ movl %eax,%ecx
+ leal (%eax,%eax,1),%edx
+ leal (,%eax,4),%ebp
+ andl $1073741823,%ecx
+ leal (%eax,%eax,1),%edi
+ sarl $31,%eax
+ movl $0,(%esp)
+ andl $2147483647,%edx
+ movl %ecx,4(%esp)
+ xorl %edx,%ecx
+ movl %edx,8(%esp)
+ xorl %ebp,%edx
+ movl %ecx,12(%esp)
+ xorl %edx,%ecx
+ movl %ebp,16(%esp)
+ xorl %edx,%ebp
+ movl %ecx,20(%esp)
+ xorl %ecx,%ebp
+ sarl $31,%edi
+ andl %ebx,%eax
+ movl %edx,24(%esp)
+ andl %ebx,%edi
+ movl %ebp,28(%esp)
+ movl %eax,%edx
+ shll $31,%eax
+ movl %edi,%ecx
+ shrl $1,%edx
+ movl $7,%esi
+ shll $30,%edi
+ andl %ebx,%esi
+ shrl $2,%ecx
+ xorl %edi,%eax
+ shrl $3,%ebx
+ movl $7,%edi
+ andl %ebx,%edi
+ shrl $3,%ebx
+ xorl %ecx,%edx
+ xorl (%esp,%esi,4),%eax
+ movl $7,%esi
+ andl %ebx,%esi
+ shrl $3,%ebx
+ movl (%esp,%edi,4),%ebp
+ movl $7,%edi
+ movl %ebp,%ecx
+ shll $3,%ebp
+ andl %ebx,%edi
+ shrl $29,%ecx
+ xorl %ebp,%eax
+ shrl $3,%ebx
+ xorl %ecx,%edx
+ movl (%esp,%esi,4),%ecx
+ movl $7,%esi
+ movl %ecx,%ebp
+ shll $6,%ecx
+ andl %ebx,%esi
+ shrl $26,%ebp
+ xorl %ecx,%eax
+ shrl $3,%ebx
+ xorl %ebp,%edx
+ movl (%esp,%edi,4),%ebp
+ movl $7,%edi
+ movl %ebp,%ecx
+ shll $9,%ebp
+ andl %ebx,%edi
+ shrl $23,%ecx
+ xorl %ebp,%eax
+ shrl $3,%ebx
+ xorl %ecx,%edx
+ movl (%esp,%esi,4),%ecx
+ movl $7,%esi
+ movl %ecx,%ebp
+ shll $12,%ecx
+ andl %ebx,%esi
+ shrl $20,%ebp
+ xorl %ecx,%eax
+ shrl $3,%ebx
+ xorl %ebp,%edx
+ movl (%esp,%edi,4),%ebp
+ movl $7,%edi
+ movl %ebp,%ecx
+ shll $15,%ebp
+ andl %ebx,%edi
+ shrl $17,%ecx
+ xorl %ebp,%eax
+ shrl $3,%ebx
+ xorl %ecx,%edx
+ movl (%esp,%esi,4),%ecx
+ movl $7,%esi
+ movl %ecx,%ebp
+ shll $18,%ecx
+ andl %ebx,%esi
+ shrl $14,%ebp
+ xorl %ecx,%eax
+ shrl $3,%ebx
+ xorl %ebp,%edx
+ movl (%esp,%edi,4),%ebp
+ movl $7,%edi
+ movl %ebp,%ecx
+ shll $21,%ebp
+ andl %ebx,%edi
+ shrl $11,%ecx
+ xorl %ebp,%eax
+ shrl $3,%ebx
+ xorl %ecx,%edx
+ movl (%esp,%esi,4),%ecx
+ movl $7,%esi
+ movl %ecx,%ebp
+ shll $24,%ecx
+ andl %ebx,%esi
+ shrl $8,%ebp
+ xorl %ecx,%eax
+ shrl $3,%ebx
+ xorl %ebp,%edx
+ movl (%esp,%edi,4),%ebp
+ movl %ebp,%ecx
+ shll $27,%ebp
+ movl (%esp,%esi,4),%edi
+ shrl $5,%ecx
+ movl %edi,%esi
+ xorl %ebp,%eax
+ shll $30,%edi
+ xorl %ecx,%edx
+ shrl $2,%esi
+ xorl %edi,%eax
+ xorl %esi,%edx
+ addl $36,%esp
+ ret
+.size _mul_1x1_ialu,.-_mul_1x1_ialu
+.globl bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,@function
+.align 16
+bn_GF2m_mul_2x2:
+.L_bn_GF2m_mul_2x2_begin:
+ call .L000PIC_me_up
+.L000PIC_me_up:
+ popl %edx
+ leal _GLOBAL_OFFSET_TABLE_+[.-.L000PIC_me_up](%edx),%edx
+ movl OPENSSL_ia32cap_P@GOT(%edx),%edx
+ movl (%edx),%eax
+ movl 4(%edx),%edx
+ testl $8388608,%eax
+ jz .L001ialu
+ testl $16777216,%eax
+ jz .L002mmx
+ testl $2,%edx
+ jz .L002mmx
+ movups 8(%esp),%xmm0
+ shufps $177,%xmm0,%xmm0
+.byte 102,15,58,68,192,1
+ movl 4(%esp),%eax
+ movups %xmm0,(%eax)
+ ret
+.align 16
+.L002mmx:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+ movl 24(%esp),%eax
+ movl 32(%esp),%ebx
+ call _mul_1x1_mmx
+ movq %mm0,%mm7
+ movl 28(%esp),%eax
+ movl 36(%esp),%ebx
+ call _mul_1x1_mmx
+ movq %mm0,%mm6
+ movl 24(%esp),%eax
+ movl 32(%esp),%ebx
+ xorl 28(%esp),%eax
+ xorl 36(%esp),%ebx
+ call _mul_1x1_mmx
+ pxor %mm7,%mm0
+ movl 20(%esp),%eax
+ pxor %mm6,%mm0
+ movq %mm0,%mm2
+ psllq $32,%mm0
+ popl %edi
+ psrlq $32,%mm2
+ popl %esi
+ pxor %mm6,%mm0
+ popl %ebx
+ pxor %mm7,%mm2
+ movq %mm0,(%eax)
+ popl %ebp
+ movq %mm2,8(%eax)
+ emms
+ ret
+.align 16
+.L001ialu:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+ subl $20,%esp
+ movl 44(%esp),%eax
+ movl 52(%esp),%ebx
+ call _mul_1x1_ialu
+ movl %eax,8(%esp)
+ movl %edx,12(%esp)
+ movl 48(%esp),%eax
+ movl 56(%esp),%ebx
+ call _mul_1x1_ialu
+ movl %eax,(%esp)
+ movl %edx,4(%esp)
+ movl 44(%esp),%eax
+ movl 52(%esp),%ebx
+ xorl 48(%esp),%eax
+ xorl 56(%esp),%ebx
+ call _mul_1x1_ialu
+ movl 40(%esp),%ebp
+ movl (%esp),%ebx
+ movl 4(%esp),%ecx
+ movl 8(%esp),%edi
+ movl 12(%esp),%esi
+ xorl %edx,%eax
+ xorl %ecx,%edx
+ xorl %ebx,%eax
+ movl %ebx,(%ebp)
+ xorl %edi,%edx
+ movl %esi,12(%ebp)
+ xorl %esi,%eax
+ addl $20,%esp
+ xorl %esi,%edx
+ popl %edi
+ xorl %edx,%eax
+ popl %esi
+ movl %edx,8(%ebp)
+ popl %ebx
+ movl %eax,4(%ebp)
+ popl %ebp
+ ret
+.size bn_GF2m_mul_2x2,.-.L_bn_GF2m_mul_2x2_begin
+.byte 71,70,40,50,94,109,41,32,77,117,108,116,105,112,108,105
+.byte 99,97,116,105,111,110,32,102,111,114,32,120,56,54,44,32
+.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
+.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
+.byte 62,0
+.comm OPENSSL_ia32cap_P,8,4
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-gf2m.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-gf2m.pl
new file mode 100644
index 00000000..b5795302
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-gf2m.pl
@@ -0,0 +1,313 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... Except that it has three code paths: pure integer
+# code suitable for any x86 CPU, MMX code suitable for PIII and later
+# and PCLMULQDQ suitable for Westmere and later. Improvement varies
+# from one benchmark and µ-arch to another. Below are interval values
+# for 163- and 571-bit ECDH benchmarks relative to compiler-generated
+# code:
+#
+# PIII 16%-30%
+# P4 12%-12%
+# Opteron 18%-40%
+# Core2 19%-44%
+# Atom 38%-64%
+# Westmere 53%-121%(PCLMULQDQ)/20%-32%(MMX)
+# Sandy Bridge 72%-127%(PCLMULQDQ)/27%-23%(MMX)
+#
+# Note that above improvement coefficients are not coefficients for
+# bn_GF2m_mul_2x2 itself. For example 120% ECDH improvement is result
+# of bn_GF2m_mul_2x2 being >4x faster. As it gets faster, benchmark
+# is more and more dominated by other subroutines, most notably by
+# BN_GF2m_mod[_mul]_arr...
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0,$x86only = $ARGV[$#ARGV] eq "386");
+
+$sse2=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+&external_label("OPENSSL_ia32cap_P") if ($sse2);
+
+$a="eax";
+$b="ebx";
+($a1,$a2,$a4)=("ecx","edx","ebp");
+
+$R="mm0";
+@T=("mm1","mm2");
+($A,$B,$B30,$B31)=("mm2","mm3","mm4","mm5");
+@i=("esi","edi");
+
+ if (!$x86only) {
+&function_begin_B("_mul_1x1_mmx");
+ &sub ("esp",32+4);
+ &mov ($a1,$a);
+ &lea ($a2,&DWP(0,$a,$a));
+ &and ($a1,0x3fffffff);
+ &lea ($a4,&DWP(0,$a2,$a2));
+ &mov (&DWP(0*4,"esp"),0);
+ &and ($a2,0x7fffffff);
+ &movd ($A,$a);
+ &movd ($B,$b);
+ &mov (&DWP(1*4,"esp"),$a1); # a1
+ &xor ($a1,$a2); # a1^a2
+ &pxor ($B31,$B31);
+ &pxor ($B30,$B30);
+ &mov (&DWP(2*4,"esp"),$a2); # a2
+ &xor ($a2,$a4); # a2^a4
+ &mov (&DWP(3*4,"esp"),$a1); # a1^a2
+ &pcmpgtd($B31,$A); # broadcast 31st bit
+ &paddd ($A,$A); # $A<<=1
+ &xor ($a1,$a2); # a1^a4=a1^a2^a2^a4
+ &mov (&DWP(4*4,"esp"),$a4); # a4
+ &xor ($a4,$a2); # a2=a4^a2^a4
+ &pand ($B31,$B);
+ &pcmpgtd($B30,$A); # broadcast 30th bit
+ &mov (&DWP(5*4,"esp"),$a1); # a1^a4
+ &xor ($a4,$a1); # a1^a2^a4
+ &psllq ($B31,31);
+ &pand ($B30,$B);
+ &mov (&DWP(6*4,"esp"),$a2); # a2^a4
+ &mov (@i[0],0x7);
+ &mov (&DWP(7*4,"esp"),$a4); # a1^a2^a4
+ &mov ($a4,@i[0]);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ &mov (@i[1],$a4);
+ &psllq ($B30,30);
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &movd ($R,&DWP(0,"esp",@i[0],4));
+ &mov (@i[0],$a4);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ for($n=1;$n<9;$n++) {
+ &movd (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@i[1],$a4);
+ &psllq (@T[1],3*$n);
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &pxor ($R,@T[1]);
+
+ push(@i,shift(@i)); push(@T,shift(@T));
+ }
+ &movd (@T[1],&DWP(0,"esp",@i[1],4));
+ &pxor ($R,$B30);
+ &psllq (@T[1],3*$n++);
+ &pxor ($R,@T[1]);
+
+ &movd (@T[0],&DWP(0,"esp",@i[0],4));
+ &pxor ($R,$B31);
+ &psllq (@T[0],3*$n);
+ &add ("esp",32+4);
+ &pxor ($R,@T[0]);
+ &ret ();
+&function_end_B("_mul_1x1_mmx");
+ }
+
+($lo,$hi)=("eax","edx");
+@T=("ecx","ebp");
+
+&function_begin_B("_mul_1x1_ialu");
+ &sub ("esp",32+4);
+ &mov ($a1,$a);
+ &lea ($a2,&DWP(0,$a,$a));
+ &lea ($a4,&DWP(0,"",$a,4));
+ &and ($a1,0x3fffffff);
+ &lea (@i[1],&DWP(0,$lo,$lo));
+ &sar ($lo,31); # broadcast 31st bit
+ &mov (&DWP(0*4,"esp"),0);
+ &and ($a2,0x7fffffff);
+ &mov (&DWP(1*4,"esp"),$a1); # a1
+ &xor ($a1,$a2); # a1^a2
+ &mov (&DWP(2*4,"esp"),$a2); # a2
+ &xor ($a2,$a4); # a2^a4
+ &mov (&DWP(3*4,"esp"),$a1); # a1^a2
+ &xor ($a1,$a2); # a1^a4=a1^a2^a2^a4
+ &mov (&DWP(4*4,"esp"),$a4); # a4
+ &xor ($a4,$a2); # a2=a4^a2^a4
+ &mov (&DWP(5*4,"esp"),$a1); # a1^a4
+ &xor ($a4,$a1); # a1^a2^a4
+ &sar (@i[1],31); # broardcast 30th bit
+ &and ($lo,$b);
+ &mov (&DWP(6*4,"esp"),$a2); # a2^a4
+ &and (@i[1],$b);
+ &mov (&DWP(7*4,"esp"),$a4); # a1^a2^a4
+ &mov ($hi,$lo);
+ &shl ($lo,31);
+ &mov (@T[0],@i[1]);
+ &shr ($hi,1);
+
+ &mov (@i[0],0x7);
+ &shl (@i[1],30);
+ &and (@i[0],$b);
+ &shr (@T[0],2);
+ &xor ($lo,@i[1]);
+
+ &shr ($b,3);
+ &mov (@i[1],0x7); # 5-byte instruction!?
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &xor ($hi,@T[0]);
+ &xor ($lo,&DWP(0,"esp",@i[0],4));
+ &mov (@i[0],0x7);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ for($n=1;$n<9;$n++) {
+ &mov (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@i[1],0x7);
+ &mov (@T[0],@T[1]);
+ &shl (@T[1],3*$n);
+ &and (@i[1],$b);
+ &shr (@T[0],32-3*$n);
+ &xor ($lo,@T[1]);
+ &shr ($b,3);
+ &xor ($hi,@T[0]);
+
+ push(@i,shift(@i)); push(@T,shift(@T));
+ }
+ &mov (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@T[0],@T[1]);
+ &shl (@T[1],3*$n);
+ &mov (@i[1],&DWP(0,"esp",@i[0],4));
+ &shr (@T[0],32-3*$n); $n++;
+ &mov (@i[0],@i[1]);
+ &xor ($lo,@T[1]);
+ &shl (@i[1],3*$n);
+ &xor ($hi,@T[0]);
+ &shr (@i[0],32-3*$n);
+ &xor ($lo,@i[1]);
+ &xor ($hi,@i[0]);
+
+ &add ("esp",32+4);
+ &ret ();
+&function_end_B("_mul_1x1_ialu");
+
+# void bn_GF2m_mul_2x2(BN_ULONG *r, BN_ULONG a1, BN_ULONG a0, BN_ULONG b1, BN_ULONG b0);
+&function_begin_B("bn_GF2m_mul_2x2");
+if (!$x86only) {
+ &picmeup("edx","OPENSSL_ia32cap_P");
+ &mov ("eax",&DWP(0,"edx"));
+ &mov ("edx",&DWP(4,"edx"));
+ &test ("eax",1<<23); # check MMX bit
+ &jz (&label("ialu"));
+if ($sse2) {
+ &test ("eax",1<<24); # check FXSR bit
+ &jz (&label("mmx"));
+ &test ("edx",1<<1); # check PCLMULQDQ bit
+ &jz (&label("mmx"));
+
+ &movups ("xmm0",&QWP(8,"esp"));
+ &shufps ("xmm0","xmm0",0b10110001);
+ &pclmulqdq ("xmm0","xmm0",1);
+ &mov ("eax",&DWP(4,"esp"));
+ &movups (&QWP(0,"eax"),"xmm0");
+ &ret ();
+
+&set_label("mmx",16);
+}
+ &push ("ebp");
+ &push ("ebx");
+ &push ("esi");
+ &push ("edi");
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &call ("_mul_1x1_mmx"); # a1·b1
+ &movq ("mm7",$R);
+
+ &mov ($a,&wparam(2));
+ &mov ($b,&wparam(4));
+ &call ("_mul_1x1_mmx"); # a0·b0
+ &movq ("mm6",$R);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &xor ($a,&wparam(2));
+ &xor ($b,&wparam(4));
+ &call ("_mul_1x1_mmx"); # (a0+a1)·(b0+b1)
+ &pxor ($R,"mm7");
+ &mov ($a,&wparam(0));
+ &pxor ($R,"mm6"); # (a0+a1)·(b0+b1)-a1·b1-a0·b0
+
+ &movq ($A,$R);
+ &psllq ($R,32);
+ &pop ("edi");
+ &psrlq ($A,32);
+ &pop ("esi");
+ &pxor ($R,"mm6");
+ &pop ("ebx");
+ &pxor ($A,"mm7");
+ &movq (&QWP(0,$a),$R);
+ &pop ("ebp");
+ &movq (&QWP(8,$a),$A);
+ &emms ();
+ &ret ();
+&set_label("ialu",16);
+}
+ &push ("ebp");
+ &push ("ebx");
+ &push ("esi");
+ &push ("edi");
+ &stack_push(4+1);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &call ("_mul_1x1_ialu"); # a1·b1
+ &mov (&DWP(8,"esp"),$lo);
+ &mov (&DWP(12,"esp"),$hi);
+
+ &mov ($a,&wparam(2));
+ &mov ($b,&wparam(4));
+ &call ("_mul_1x1_ialu"); # a0·b0
+ &mov (&DWP(0,"esp"),$lo);
+ &mov (&DWP(4,"esp"),$hi);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &xor ($a,&wparam(2));
+ &xor ($b,&wparam(4));
+ &call ("_mul_1x1_ialu"); # (a0+a1)·(b0+b1)
+
+ &mov ("ebp",&wparam(0));
+ @r=("ebx","ecx","edi","esi");
+ &mov (@r[0],&DWP(0,"esp"));
+ &mov (@r[1],&DWP(4,"esp"));
+ &mov (@r[2],&DWP(8,"esp"));
+ &mov (@r[3],&DWP(12,"esp"));
+
+ &xor ($lo,$hi);
+ &xor ($hi,@r[1]);
+ &xor ($lo,@r[0]);
+ &mov (&DWP(0,"ebp"),@r[0]);
+ &xor ($hi,@r[2]);
+ &mov (&DWP(12,"ebp"),@r[3]);
+ &xor ($lo,@r[3]);
+ &stack_pop(4+1);
+ &xor ($hi,@r[3]);
+ &pop ("edi");
+ &xor ($lo,$hi);
+ &pop ("esi");
+ &mov (&DWP(8,"ebp"),$hi);
+ &pop ("ebx");
+ &mov (&DWP(4,"ebp"),$lo);
+ &pop ("ebp");
+ &ret ();
+&function_end_B("bn_GF2m_mul_2x2");
+
+&asciz ("GF(2^m) Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-mont.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-mont.S
new file mode 100644
index 00000000..c701e9e3
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-mont.S
@@ -0,0 +1,460 @@
+.file "crypto/bn/asm/x86-mont.s"
+.text
+.globl bn_mul_mont
+.type bn_mul_mont,@function
+.align 16
+bn_mul_mont:
+.L_bn_mul_mont_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+ xorl %eax,%eax
+ movl 40(%esp),%edi
+ cmpl $4,%edi
+ jl .L000just_leave
+ leal 20(%esp),%esi
+ leal 24(%esp),%edx
+ movl %esp,%ebp
+ addl $2,%edi
+ negl %edi
+ leal -32(%esp,%edi,4),%esp
+ negl %edi
+ movl %esp,%eax
+ subl %edx,%eax
+ andl $2047,%eax
+ subl %eax,%esp
+ xorl %esp,%edx
+ andl $2048,%edx
+ xorl $2048,%edx
+ subl %edx,%esp
+ andl $-64,%esp
+ movl (%esi),%eax
+ movl 4(%esi),%ebx
+ movl 8(%esi),%ecx
+ movl 12(%esi),%edx
+ movl 16(%esi),%esi
+ movl (%esi),%esi
+ movl %eax,4(%esp)
+ movl %ebx,8(%esp)
+ movl %ecx,12(%esp)
+ movl %edx,16(%esp)
+ movl %esi,20(%esp)
+ leal -3(%edi),%ebx
+ movl %ebp,24(%esp)
+ call .L001PIC_me_up
+.L001PIC_me_up:
+ popl %eax
+ leal _GLOBAL_OFFSET_TABLE_+[.-.L001PIC_me_up](%eax),%eax
+ movl OPENSSL_ia32cap_P@GOT(%eax),%eax
+ btl $26,(%eax)
+ jnc .L002non_sse2
+ movl $-1,%eax
+ movd %eax,%mm7
+ movl 8(%esp),%esi
+ movl 12(%esp),%edi
+ movl 16(%esp),%ebp
+ xorl %edx,%edx
+ xorl %ecx,%ecx
+ movd (%edi),%mm4
+ movd (%esi),%mm5
+ movd (%ebp),%mm3
+ pmuludq %mm4,%mm5
+ movq %mm5,%mm2
+ movq %mm5,%mm0
+ pand %mm7,%mm0
+ pmuludq 20(%esp),%mm5
+ pmuludq %mm5,%mm3
+ paddq %mm0,%mm3
+ movd 4(%ebp),%mm1
+ movd 4(%esi),%mm0
+ psrlq $32,%mm2
+ psrlq $32,%mm3
+ incl %ecx
+.align 16
+.L0031st:
+ pmuludq %mm4,%mm0
+ pmuludq %mm5,%mm1
+ paddq %mm0,%mm2
+ paddq %mm1,%mm3
+ movq %mm2,%mm0
+ pand %mm7,%mm0
+ movd 4(%ebp,%ecx,4),%mm1
+ paddq %mm0,%mm3
+ movd 4(%esi,%ecx,4),%mm0
+ psrlq $32,%mm2
+ movd %mm3,28(%esp,%ecx,4)
+ psrlq $32,%mm3
+ leal 1(%ecx),%ecx
+ cmpl %ebx,%ecx
+ jl .L0031st
+ pmuludq %mm4,%mm0
+ pmuludq %mm5,%mm1
+ paddq %mm0,%mm2
+ paddq %mm1,%mm3
+ movq %mm2,%mm0
+ pand %mm7,%mm0
+ paddq %mm0,%mm3
+ movd %mm3,28(%esp,%ecx,4)
+ psrlq $32,%mm2
+ psrlq $32,%mm3
+ paddq %mm2,%mm3
+ movq %mm3,32(%esp,%ebx,4)
+ incl %edx
+.L004outer:
+ xorl %ecx,%ecx
+ movd (%edi,%edx,4),%mm4
+ movd (%esi),%mm5
+ movd 32(%esp),%mm6
+ movd (%ebp),%mm3
+ pmuludq %mm4,%mm5
+ paddq %mm6,%mm5
+ movq %mm5,%mm0
+ movq %mm5,%mm2
+ pand %mm7,%mm0
+ pmuludq 20(%esp),%mm5
+ pmuludq %mm5,%mm3
+ paddq %mm0,%mm3
+ movd 36(%esp),%mm6
+ movd 4(%ebp),%mm1
+ movd 4(%esi),%mm0
+ psrlq $32,%mm2
+ psrlq $32,%mm3
+ paddq %mm6,%mm2
+ incl %ecx
+ decl %ebx
+.L005inner:
+ pmuludq %mm4,%mm0
+ pmuludq %mm5,%mm1
+ paddq %mm0,%mm2
+ paddq %mm1,%mm3
+ movq %mm2,%mm0
+ movd 36(%esp,%ecx,4),%mm6
+ pand %mm7,%mm0
+ movd 4(%ebp,%ecx,4),%mm1
+ paddq %mm0,%mm3
+ movd 4(%esi,%ecx,4),%mm0
+ psrlq $32,%mm2
+ movd %mm3,28(%esp,%ecx,4)
+ psrlq $32,%mm3
+ paddq %mm6,%mm2
+ decl %ebx
+ leal 1(%ecx),%ecx
+ jnz .L005inner
+ movl %ecx,%ebx
+ pmuludq %mm4,%mm0
+ pmuludq %mm5,%mm1
+ paddq %mm0,%mm2
+ paddq %mm1,%mm3
+ movq %mm2,%mm0
+ pand %mm7,%mm0
+ paddq %mm0,%mm3
+ movd %mm3,28(%esp,%ecx,4)
+ psrlq $32,%mm2
+ psrlq $32,%mm3
+ movd 36(%esp,%ebx,4),%mm6
+ paddq %mm2,%mm3
+ paddq %mm6,%mm3
+ movq %mm3,32(%esp,%ebx,4)
+ leal 1(%edx),%edx
+ cmpl %ebx,%edx
+ jle .L004outer
+ emms
+ jmp .L006common_tail
+.align 16
+.L002non_sse2:
+ movl 8(%esp),%esi
+ leal 1(%ebx),%ebp
+ movl 12(%esp),%edi
+ xorl %ecx,%ecx
+ movl %esi,%edx
+ andl $1,%ebp
+ subl %edi,%edx
+ leal 4(%edi,%ebx,4),%eax
+ orl %edx,%ebp
+ movl (%edi),%edi
+ jz .L007bn_sqr_mont
+ movl %eax,28(%esp)
+ movl (%esi),%eax
+ xorl %edx,%edx
+.align 16
+.L008mull:
+ movl %edx,%ebp
+ mull %edi
+ addl %eax,%ebp
+ leal 1(%ecx),%ecx
+ adcl $0,%edx
+ movl (%esi,%ecx,4),%eax
+ cmpl %ebx,%ecx
+ movl %ebp,28(%esp,%ecx,4)
+ jl .L008mull
+ movl %edx,%ebp
+ mull %edi
+ movl 20(%esp),%edi
+ addl %ebp,%eax
+ movl 16(%esp),%esi
+ adcl $0,%edx
+ imull 32(%esp),%edi
+ movl %eax,32(%esp,%ebx,4)
+ xorl %ecx,%ecx
+ movl %edx,36(%esp,%ebx,4)
+ movl %ecx,40(%esp,%ebx,4)
+ movl (%esi),%eax
+ mull %edi
+ addl 32(%esp),%eax
+ movl 4(%esi),%eax
+ adcl $0,%edx
+ incl %ecx
+ jmp .L0092ndmadd
+.align 16
+.L0101stmadd:
+ movl %edx,%ebp
+ mull %edi
+ addl 32(%esp,%ecx,4),%ebp
+ leal 1(%ecx),%ecx
+ adcl $0,%edx
+ addl %eax,%ebp
+ movl (%esi,%ecx,4),%eax
+ adcl $0,%edx
+ cmpl %ebx,%ecx
+ movl %ebp,28(%esp,%ecx,4)
+ jl .L0101stmadd
+ movl %edx,%ebp
+ mull %edi
+ addl 32(%esp,%ebx,4),%eax
+ movl 20(%esp),%edi
+ adcl $0,%edx
+ movl 16(%esp),%esi
+ addl %eax,%ebp
+ adcl $0,%edx
+ imull 32(%esp),%edi
+ xorl %ecx,%ecx
+ addl 36(%esp,%ebx,4),%edx
+ movl %ebp,32(%esp,%ebx,4)
+ adcl $0,%ecx
+ movl (%esi),%eax
+ movl %edx,36(%esp,%ebx,4)
+ movl %ecx,40(%esp,%ebx,4)
+ mull %edi
+ addl 32(%esp),%eax
+ movl 4(%esi),%eax
+ adcl $0,%edx
+ movl $1,%ecx
+.align 16
+.L0092ndmadd:
+ movl %edx,%ebp
+ mull %edi
+ addl 32(%esp,%ecx,4),%ebp
+ leal 1(%ecx),%ecx
+ adcl $0,%edx
+ addl %eax,%ebp
+ movl (%esi,%ecx,4),%eax
+ adcl $0,%edx
+ cmpl %ebx,%ecx
+ movl %ebp,24(%esp,%ecx,4)
+ jl .L0092ndmadd
+ movl %edx,%ebp
+ mull %edi
+ addl 32(%esp,%ebx,4),%ebp
+ adcl $0,%edx
+ addl %eax,%ebp
+ adcl $0,%edx
+ movl %ebp,28(%esp,%ebx,4)
+ xorl %eax,%eax
+ movl 12(%esp),%ecx
+ addl 36(%esp,%ebx,4),%edx
+ adcl 40(%esp,%ebx,4),%eax
+ leal 4(%ecx),%ecx
+ movl %edx,32(%esp,%ebx,4)
+ cmpl 28(%esp),%ecx
+ movl %eax,36(%esp,%ebx,4)
+ je .L006common_tail
+ movl (%ecx),%edi
+ movl 8(%esp),%esi
+ movl %ecx,12(%esp)
+ xorl %ecx,%ecx
+ xorl %edx,%edx
+ movl (%esi),%eax
+ jmp .L0101stmadd
+.align 16
+.L007bn_sqr_mont:
+ movl %ebx,(%esp)
+ movl %ecx,12(%esp)
+ movl %edi,%eax
+ mull %edi
+ movl %eax,32(%esp)
+ movl %edx,%ebx
+ shrl $1,%edx
+ andl $1,%ebx
+ incl %ecx
+.align 16
+.L011sqr:
+ movl (%esi,%ecx,4),%eax
+ movl %edx,%ebp
+ mull %edi
+ addl %ebp,%eax
+ leal 1(%ecx),%ecx
+ adcl $0,%edx
+ leal (%ebx,%eax,2),%ebp
+ shrl $31,%eax
+ cmpl (%esp),%ecx
+ movl %eax,%ebx
+ movl %ebp,28(%esp,%ecx,4)
+ jl .L011sqr
+ movl (%esi,%ecx,4),%eax
+ movl %edx,%ebp
+ mull %edi
+ addl %ebp,%eax
+ movl 20(%esp),%edi
+ adcl $0,%edx
+ movl 16(%esp),%esi
+ leal (%ebx,%eax,2),%ebp
+ imull 32(%esp),%edi
+ shrl $31,%eax
+ movl %ebp,32(%esp,%ecx,4)
+ leal (%eax,%edx,2),%ebp
+ movl (%esi),%eax
+ shrl $31,%edx
+ movl %ebp,36(%esp,%ecx,4)
+ movl %edx,40(%esp,%ecx,4)
+ mull %edi
+ addl 32(%esp),%eax
+ movl %ecx,%ebx
+ adcl $0,%edx
+ movl 4(%esi),%eax
+ movl $1,%ecx
+.align 16
+.L0123rdmadd:
+ movl %edx,%ebp
+ mull %edi
+ addl 32(%esp,%ecx,4),%ebp
+ adcl $0,%edx
+ addl %eax,%ebp
+ movl 4(%esi,%ecx,4),%eax
+ adcl $0,%edx
+ movl %ebp,28(%esp,%ecx,4)
+ movl %edx,%ebp
+ mull %edi
+ addl 36(%esp,%ecx,4),%ebp
+ leal 2(%ecx),%ecx
+ adcl $0,%edx
+ addl %eax,%ebp
+ movl (%esi,%ecx,4),%eax
+ adcl $0,%edx
+ cmpl %ebx,%ecx
+ movl %ebp,24(%esp,%ecx,4)
+ jl .L0123rdmadd
+ movl %edx,%ebp
+ mull %edi
+ addl 32(%esp,%ebx,4),%ebp
+ adcl $0,%edx
+ addl %eax,%ebp
+ adcl $0,%edx
+ movl %ebp,28(%esp,%ebx,4)
+ movl 12(%esp),%ecx
+ xorl %eax,%eax
+ movl 8(%esp),%esi
+ addl 36(%esp,%ebx,4),%edx
+ adcl 40(%esp,%ebx,4),%eax
+ movl %edx,32(%esp,%ebx,4)
+ cmpl %ebx,%ecx
+ movl %eax,36(%esp,%ebx,4)
+ je .L006common_tail
+ movl 4(%esi,%ecx,4),%edi
+ leal 1(%ecx),%ecx
+ movl %edi,%eax
+ movl %ecx,12(%esp)
+ mull %edi
+ addl 32(%esp,%ecx,4),%eax
+ adcl $0,%edx
+ movl %eax,32(%esp,%ecx,4)
+ xorl %ebp,%ebp
+ cmpl %ebx,%ecx
+ leal 1(%ecx),%ecx
+ je .L013sqrlast
+ movl %edx,%ebx
+ shrl $1,%edx
+ andl $1,%ebx
+.align 16
+.L014sqradd:
+ movl (%esi,%ecx,4),%eax
+ movl %edx,%ebp
+ mull %edi
+ addl %ebp,%eax
+ leal (%eax,%eax,1),%ebp
+ adcl $0,%edx
+ shrl $31,%eax
+ addl 32(%esp,%ecx,4),%ebp
+ leal 1(%ecx),%ecx
+ adcl $0,%eax
+ addl %ebx,%ebp
+ adcl $0,%eax
+ cmpl (%esp),%ecx
+ movl %ebp,28(%esp,%ecx,4)
+ movl %eax,%ebx
+ jle .L014sqradd
+ movl %edx,%ebp
+ addl %edx,%edx
+ shrl $31,%ebp
+ addl %ebx,%edx
+ adcl $0,%ebp
+.L013sqrlast:
+ movl 20(%esp),%edi
+ movl 16(%esp),%esi
+ imull 32(%esp),%edi
+ addl 32(%esp,%ecx,4),%edx
+ movl (%esi),%eax
+ adcl $0,%ebp
+ movl %edx,32(%esp,%ecx,4)
+ movl %ebp,36(%esp,%ecx,4)
+ mull %edi
+ addl 32(%esp),%eax
+ leal -1(%ecx),%ebx
+ adcl $0,%edx
+ movl $1,%ecx
+ movl 4(%esi),%eax
+ jmp .L0123rdmadd
+.align 16
+.L006common_tail:
+ movl 16(%esp),%ebp
+ movl 4(%esp),%edi
+ leal 32(%esp),%esi
+ movl (%esi),%eax
+ movl %ebx,%ecx
+ xorl %edx,%edx
+.align 16
+.L015sub:
+ sbbl (%ebp,%edx,4),%eax
+ movl %eax,(%edi,%edx,4)
+ decl %ecx
+ movl 4(%esi,%edx,4),%eax
+ leal 1(%edx),%edx
+ jge .L015sub
+ sbbl $0,%eax
+ andl %eax,%esi
+ notl %eax
+ movl %edi,%ebp
+ andl %eax,%ebp
+ orl %ebp,%esi
+.align 16
+.L016copy:
+ movl (%esi,%ebx,4),%eax
+ movl %eax,(%edi,%ebx,4)
+ movl %ecx,32(%esp,%ebx,4)
+ decl %ebx
+ jge .L016copy
+ movl 24(%esp),%esp
+ movl $1,%eax
+.L000just_leave:
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ ret
+.size bn_mul_mont,.-.L_bn_mul_mont_begin
+.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105
+.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56
+.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
+.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
+.byte 111,114,103,62,0
+.comm OPENSSL_ia32cap_P,8,4
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-mont.pl
new file mode 100755
index 00000000..e8f6b050
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86-mont.pl
@@ -0,0 +1,593 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# October 2005
+#
+# This is a "teaser" code, as it can be improved in several ways...
+# First of all non-SSE2 path should be implemented (yes, for now it
+# performs Montgomery multiplication/convolution only on SSE2-capable
+# CPUs such as P4, others fall down to original code). Then inner loop
+# can be unrolled and modulo-scheduled to improve ILP and possibly
+# moved to 128-bit XMM register bank (though it would require input
+# rearrangement and/or increase bus bandwidth utilization). Dedicated
+# squaring procedure should give further performance improvement...
+# Yet, for being draft, the code improves rsa512 *sign* benchmark by
+# 110%(!), rsa1024 one - by 70% and rsa4096 - by 20%:-)
+
+# December 2006
+#
+# Modulo-scheduling SSE2 loops results in further 15-20% improvement.
+# Integer-only code [being equipped with dedicated squaring procedure]
+# gives ~40% on rsa512 sign benchmark...
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+$sse2=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+&external_label("OPENSSL_ia32cap_P") if ($sse2);
+
+&function_begin("bn_mul_mont");
+
+$i="edx";
+$j="ecx";
+$ap="esi"; $tp="esi"; # overlapping variables!!!
+$rp="edi"; $bp="edi"; # overlapping variables!!!
+$np="ebp";
+$num="ebx";
+
+$_num=&DWP(4*0,"esp"); # stack top layout
+$_rp=&DWP(4*1,"esp");
+$_ap=&DWP(4*2,"esp");
+$_bp=&DWP(4*3,"esp");
+$_np=&DWP(4*4,"esp");
+$_n0=&DWP(4*5,"esp"); $_n0q=&QWP(4*5,"esp");
+$_sp=&DWP(4*6,"esp");
+$_bpend=&DWP(4*7,"esp");
+$frame=32; # size of above frame rounded up to 16n
+
+ &xor ("eax","eax");
+ &mov ("edi",&wparam(5)); # int num
+ &cmp ("edi",4);
+ &jl (&label("just_leave"));
+
+ &lea ("esi",&wparam(0)); # put aside pointer to argument block
+ &lea ("edx",&wparam(1)); # load ap
+ &mov ("ebp","esp"); # saved stack pointer!
+ &add ("edi",2); # extra two words on top of tp
+ &neg ("edi");
+ &lea ("esp",&DWP(-$frame,"esp","edi",4)); # alloca($frame+4*(num+2))
+ &neg ("edi");
+
+ # minimize cache contention by arraning 2K window between stack
+ # pointer and ap argument [np is also position sensitive vector,
+ # but it's assumed to be near ap, as it's allocated at ~same
+ # time].
+ &mov ("eax","esp");
+ &sub ("eax","edx");
+ &and ("eax",2047);
+ &sub ("esp","eax"); # this aligns sp and ap modulo 2048
+
+ &xor ("edx","esp");
+ &and ("edx",2048);
+ &xor ("edx",2048);
+ &sub ("esp","edx"); # this splits them apart modulo 4096
+
+ &and ("esp",-64); # align to cache line
+
+ ################################# load argument block...
+ &mov ("eax",&DWP(0*4,"esi"));# BN_ULONG *rp
+ &mov ("ebx",&DWP(1*4,"esi"));# const BN_ULONG *ap
+ &mov ("ecx",&DWP(2*4,"esi"));# const BN_ULONG *bp
+ &mov ("edx",&DWP(3*4,"esi"));# const BN_ULONG *np
+ &mov ("esi",&DWP(4*4,"esi"));# const BN_ULONG *n0
+ #&mov ("edi",&DWP(5*4,"esi"));# int num
+
+ &mov ("esi",&DWP(0,"esi")); # pull n0[0]
+ &mov ($_rp,"eax"); # ... save a copy of argument block
+ &mov ($_ap,"ebx");
+ &mov ($_bp,"ecx");
+ &mov ($_np,"edx");
+ &mov ($_n0,"esi");
+ &lea ($num,&DWP(-3,"edi")); # num=num-1 to assist modulo-scheduling
+ #&mov ($_num,$num); # redundant as $num is not reused
+ &mov ($_sp,"ebp"); # saved stack pointer!
+
+if($sse2) {
+$acc0="mm0"; # mmx register bank layout
+$acc1="mm1";
+$car0="mm2";
+$car1="mm3";
+$mul0="mm4";
+$mul1="mm5";
+$temp="mm6";
+$mask="mm7";
+
+ &picmeup("eax","OPENSSL_ia32cap_P");
+ &bt (&DWP(0,"eax"),26);
+ &jnc (&label("non_sse2"));
+
+ &mov ("eax",-1);
+ &movd ($mask,"eax"); # mask 32 lower bits
+
+ &mov ($ap,$_ap); # load input pointers
+ &mov ($bp,$_bp);
+ &mov ($np,$_np);
+
+ &xor ($i,$i); # i=0
+ &xor ($j,$j); # j=0
+
+ &movd ($mul0,&DWP(0,$bp)); # bp[0]
+ &movd ($mul1,&DWP(0,$ap)); # ap[0]
+ &movd ($car1,&DWP(0,$np)); # np[0]
+
+ &pmuludq($mul1,$mul0); # ap[0]*bp[0]
+ &movq ($car0,$mul1);
+ &movq ($acc0,$mul1); # I wish movd worked for
+ &pand ($acc0,$mask); # inter-register transfers
+
+ &pmuludq($mul1,$_n0q); # *=n0
+
+ &pmuludq($car1,$mul1); # "t[0]"*np[0]*n0
+ &paddq ($car1,$acc0);
+
+ &movd ($acc1,&DWP(4,$np)); # np[1]
+ &movd ($acc0,&DWP(4,$ap)); # ap[1]
+
+ &psrlq ($car0,32);
+ &psrlq ($car1,32);
+
+ &inc ($j); # j++
+&set_label("1st",16);
+ &pmuludq($acc0,$mul0); # ap[j]*bp[0]
+ &pmuludq($acc1,$mul1); # np[j]*m1
+ &paddq ($car0,$acc0); # +=c0
+ &paddq ($car1,$acc1); # +=c1
+
+ &movq ($acc0,$car0);
+ &pand ($acc0,$mask);
+ &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1]
+ &paddq ($car1,$acc0); # +=ap[j]*bp[0];
+ &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1]
+ &psrlq ($car0,32);
+ &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[j-1]=
+ &psrlq ($car1,32);
+
+ &lea ($j,&DWP(1,$j));
+ &cmp ($j,$num);
+ &jl (&label("1st"));
+
+ &pmuludq($acc0,$mul0); # ap[num-1]*bp[0]
+ &pmuludq($acc1,$mul1); # np[num-1]*m1
+ &paddq ($car0,$acc0); # +=c0
+ &paddq ($car1,$acc1); # +=c1
+
+ &movq ($acc0,$car0);
+ &pand ($acc0,$mask);
+ &paddq ($car1,$acc0); # +=ap[num-1]*bp[0];
+ &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]=
+
+ &psrlq ($car0,32);
+ &psrlq ($car1,32);
+
+ &paddq ($car1,$car0);
+ &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1]
+
+ &inc ($i); # i++
+&set_label("outer");
+ &xor ($j,$j); # j=0
+
+ &movd ($mul0,&DWP(0,$bp,$i,4)); # bp[i]
+ &movd ($mul1,&DWP(0,$ap)); # ap[0]
+ &movd ($temp,&DWP($frame,"esp")); # tp[0]
+ &movd ($car1,&DWP(0,$np)); # np[0]
+ &pmuludq($mul1,$mul0); # ap[0]*bp[i]
+
+ &paddq ($mul1,$temp); # +=tp[0]
+ &movq ($acc0,$mul1);
+ &movq ($car0,$mul1);
+ &pand ($acc0,$mask);
+
+ &pmuludq($mul1,$_n0q); # *=n0
+
+ &pmuludq($car1,$mul1);
+ &paddq ($car1,$acc0);
+
+ &movd ($temp,&DWP($frame+4,"esp")); # tp[1]
+ &movd ($acc1,&DWP(4,$np)); # np[1]
+ &movd ($acc0,&DWP(4,$ap)); # ap[1]
+
+ &psrlq ($car0,32);
+ &psrlq ($car1,32);
+ &paddq ($car0,$temp); # +=tp[1]
+
+ &inc ($j); # j++
+ &dec ($num);
+&set_label("inner");
+ &pmuludq($acc0,$mul0); # ap[j]*bp[i]
+ &pmuludq($acc1,$mul1); # np[j]*m1
+ &paddq ($car0,$acc0); # +=c0
+ &paddq ($car1,$acc1); # +=c1
+
+ &movq ($acc0,$car0);
+ &movd ($temp,&DWP($frame+4,"esp",$j,4));# tp[j+1]
+ &pand ($acc0,$mask);
+ &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1]
+ &paddq ($car1,$acc0); # +=ap[j]*bp[i]+tp[j]
+ &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1]
+ &psrlq ($car0,32);
+ &movd (&DWP($frame-4,"esp",$j,4),$car1);# tp[j-1]=
+ &psrlq ($car1,32);
+ &paddq ($car0,$temp); # +=tp[j+1]
+
+ &dec ($num);
+ &lea ($j,&DWP(1,$j)); # j++
+ &jnz (&label("inner"));
+
+ &mov ($num,$j);
+ &pmuludq($acc0,$mul0); # ap[num-1]*bp[i]
+ &pmuludq($acc1,$mul1); # np[num-1]*m1
+ &paddq ($car0,$acc0); # +=c0
+ &paddq ($car1,$acc1); # +=c1
+
+ &movq ($acc0,$car0);
+ &pand ($acc0,$mask);
+ &paddq ($car1,$acc0); # +=ap[num-1]*bp[i]+tp[num-1]
+ &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]=
+ &psrlq ($car0,32);
+ &psrlq ($car1,32);
+
+ &movd ($temp,&DWP($frame+4,"esp",$num,4)); # += tp[num]
+ &paddq ($car1,$car0);
+ &paddq ($car1,$temp);
+ &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1]
+
+ &lea ($i,&DWP(1,$i)); # i++
+ &cmp ($i,$num);
+ &jle (&label("outer"));
+
+ &emms (); # done with mmx bank
+ &jmp (&label("common_tail"));
+
+&set_label("non_sse2",16);
+}
+
+if (0) {
+ &mov ("esp",$_sp);
+ &xor ("eax","eax"); # signal "not fast enough [yet]"
+ &jmp (&label("just_leave"));
+ # While the below code provides competitive performance for
+ # all key lengthes on modern Intel cores, it's still more
+ # than 10% slower for 4096-bit key elsewhere:-( "Competitive"
+ # means compared to the original integer-only assembler.
+ # 512-bit RSA sign is better by ~40%, but that's about all
+ # one can say about all CPUs...
+} else {
+$inp="esi"; # integer path uses these registers differently
+$word="edi";
+$carry="ebp";
+
+ &mov ($inp,$_ap);
+ &lea ($carry,&DWP(1,$num));
+ &mov ($word,$_bp);
+ &xor ($j,$j); # j=0
+ &mov ("edx",$inp);
+ &and ($carry,1); # see if num is even
+ &sub ("edx",$word); # see if ap==bp
+ &lea ("eax",&DWP(4,$word,$num,4)); # &bp[num]
+ &or ($carry,"edx");
+ &mov ($word,&DWP(0,$word)); # bp[0]
+ &jz (&label("bn_sqr_mont"));
+ &mov ($_bpend,"eax");
+ &mov ("eax",&DWP(0,$inp));
+ &xor ("edx","edx");
+
+&set_label("mull",16);
+ &mov ($carry,"edx");
+ &mul ($word); # ap[j]*bp[0]
+ &add ($carry,"eax");
+ &lea ($j,&DWP(1,$j));
+ &adc ("edx",0);
+ &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1]
+ &cmp ($j,$num);
+ &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
+ &jl (&label("mull"));
+
+ &mov ($carry,"edx");
+ &mul ($word); # ap[num-1]*bp[0]
+ &mov ($word,$_n0);
+ &add ("eax",$carry);
+ &mov ($inp,$_np);
+ &adc ("edx",0);
+ &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
+
+ &mov (&DWP($frame,"esp",$num,4),"eax"); # tp[num-1]=
+ &xor ($j,$j);
+ &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]=
+ &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]=
+
+ &mov ("eax",&DWP(0,$inp)); # np[0]
+ &mul ($word); # np[0]*m
+ &add ("eax",&DWP($frame,"esp")); # +=tp[0]
+ &mov ("eax",&DWP(4,$inp)); # np[1]
+ &adc ("edx",0);
+ &inc ($j);
+
+ &jmp (&label("2ndmadd"));
+
+&set_label("1stmadd",16);
+ &mov ($carry,"edx");
+ &mul ($word); # ap[j]*bp[i]
+ &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
+ &lea ($j,&DWP(1,$j));
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1]
+ &adc ("edx",0);
+ &cmp ($j,$num);
+ &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
+ &jl (&label("1stmadd"));
+
+ &mov ($carry,"edx");
+ &mul ($word); # ap[num-1]*bp[i]
+ &add ("eax",&DWP($frame,"esp",$num,4)); # +=tp[num-1]
+ &mov ($word,$_n0);
+ &adc ("edx",0);
+ &mov ($inp,$_np);
+ &add ($carry,"eax");
+ &adc ("edx",0);
+ &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
+
+ &xor ($j,$j);
+ &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num]
+ &mov (&DWP($frame,"esp",$num,4),$carry); # tp[num-1]=
+ &adc ($j,0);
+ &mov ("eax",&DWP(0,$inp)); # np[0]
+ &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]=
+ &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]=
+
+ &mul ($word); # np[0]*m
+ &add ("eax",&DWP($frame,"esp")); # +=tp[0]
+ &mov ("eax",&DWP(4,$inp)); # np[1]
+ &adc ("edx",0);
+ &mov ($j,1);
+
+&set_label("2ndmadd",16);
+ &mov ($carry,"edx");
+ &mul ($word); # np[j]*m
+ &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
+ &lea ($j,&DWP(1,$j));
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+1]
+ &adc ("edx",0);
+ &cmp ($j,$num);
+ &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j-1]=
+ &jl (&label("2ndmadd"));
+
+ &mov ($carry,"edx");
+ &mul ($word); # np[j]*m
+ &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1]
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &adc ("edx",0);
+ &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]=
+
+ &xor ("eax","eax");
+ &mov ($j,$_bp); # &bp[i]
+ &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num]
+ &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1]
+ &lea ($j,&DWP(4,$j));
+ &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]=
+ &cmp ($j,$_bpend);
+ &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]=
+ &je (&label("common_tail"));
+
+ &mov ($word,&DWP(0,$j)); # bp[i+1]
+ &mov ($inp,$_ap);
+ &mov ($_bp,$j); # &bp[++i]
+ &xor ($j,$j);
+ &xor ("edx","edx");
+ &mov ("eax",&DWP(0,$inp));
+ &jmp (&label("1stmadd"));
+
+&set_label("bn_sqr_mont",16);
+$sbit=$num;
+ &mov ($_num,$num);
+ &mov ($_bp,$j); # i=0
+
+ &mov ("eax",$word); # ap[0]
+ &mul ($word); # ap[0]*ap[0]
+ &mov (&DWP($frame,"esp"),"eax"); # tp[0]=
+ &mov ($sbit,"edx");
+ &shr ("edx",1);
+ &and ($sbit,1);
+ &inc ($j);
+&set_label("sqr",16);
+ &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j]
+ &mov ($carry,"edx");
+ &mul ($word); # ap[j]*ap[0]
+ &add ("eax",$carry);
+ &lea ($j,&DWP(1,$j));
+ &adc ("edx",0);
+ &lea ($carry,&DWP(0,$sbit,"eax",2));
+ &shr ("eax",31);
+ &cmp ($j,$_num);
+ &mov ($sbit,"eax");
+ &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
+ &jl (&label("sqr"));
+
+ &mov ("eax",&DWP(0,$inp,$j,4)); # ap[num-1]
+ &mov ($carry,"edx");
+ &mul ($word); # ap[num-1]*ap[0]
+ &add ("eax",$carry);
+ &mov ($word,$_n0);
+ &adc ("edx",0);
+ &mov ($inp,$_np);
+ &lea ($carry,&DWP(0,$sbit,"eax",2));
+ &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
+ &shr ("eax",31);
+ &mov (&DWP($frame,"esp",$j,4),$carry); # tp[num-1]=
+
+ &lea ($carry,&DWP(0,"eax","edx",2));
+ &mov ("eax",&DWP(0,$inp)); # np[0]
+ &shr ("edx",31);
+ &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num]=
+ &mov (&DWP($frame+8,"esp",$j,4),"edx"); # tp[num+1]=
+
+ &mul ($word); # np[0]*m
+ &add ("eax",&DWP($frame,"esp")); # +=tp[0]
+ &mov ($num,$j);
+ &adc ("edx",0);
+ &mov ("eax",&DWP(4,$inp)); # np[1]
+ &mov ($j,1);
+
+&set_label("3rdmadd",16);
+ &mov ($carry,"edx");
+ &mul ($word); # np[j]*m
+ &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &mov ("eax",&DWP(4,$inp,$j,4)); # np[j+1]
+ &adc ("edx",0);
+ &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j-1]=
+
+ &mov ($carry,"edx");
+ &mul ($word); # np[j+1]*m
+ &add ($carry,&DWP($frame+4,"esp",$j,4)); # +=tp[j+1]
+ &lea ($j,&DWP(2,$j));
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+2]
+ &adc ("edx",0);
+ &cmp ($j,$num);
+ &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j]=
+ &jl (&label("3rdmadd"));
+
+ &mov ($carry,"edx");
+ &mul ($word); # np[j]*m
+ &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1]
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &adc ("edx",0);
+ &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]=
+
+ &mov ($j,$_bp); # i
+ &xor ("eax","eax");
+ &mov ($inp,$_ap);
+ &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num]
+ &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1]
+ &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]=
+ &cmp ($j,$num);
+ &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]=
+ &je (&label("common_tail"));
+
+ &mov ($word,&DWP(4,$inp,$j,4)); # ap[i]
+ &lea ($j,&DWP(1,$j));
+ &mov ("eax",$word);
+ &mov ($_bp,$j); # ++i
+ &mul ($word); # ap[i]*ap[i]
+ &add ("eax",&DWP($frame,"esp",$j,4)); # +=tp[i]
+ &adc ("edx",0);
+ &mov (&DWP($frame,"esp",$j,4),"eax"); # tp[i]=
+ &xor ($carry,$carry);
+ &cmp ($j,$num);
+ &lea ($j,&DWP(1,$j));
+ &je (&label("sqrlast"));
+
+ &mov ($sbit,"edx"); # zaps $num
+ &shr ("edx",1);
+ &and ($sbit,1);
+&set_label("sqradd",16);
+ &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j]
+ &mov ($carry,"edx");
+ &mul ($word); # ap[j]*ap[i]
+ &add ("eax",$carry);
+ &lea ($carry,&DWP(0,"eax","eax"));
+ &adc ("edx",0);
+ &shr ("eax",31);
+ &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
+ &lea ($j,&DWP(1,$j));
+ &adc ("eax",0);
+ &add ($carry,$sbit);
+ &adc ("eax",0);
+ &cmp ($j,$_num);
+ &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
+ &mov ($sbit,"eax");
+ &jle (&label("sqradd"));
+
+ &mov ($carry,"edx");
+ &add ("edx","edx");
+ &shr ($carry,31);
+ &add ("edx",$sbit);
+ &adc ($carry,0);
+&set_label("sqrlast");
+ &mov ($word,$_n0);
+ &mov ($inp,$_np);
+ &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
+
+ &add ("edx",&DWP($frame,"esp",$j,4)); # +=tp[num]
+ &mov ("eax",&DWP(0,$inp)); # np[0]
+ &adc ($carry,0);
+ &mov (&DWP($frame,"esp",$j,4),"edx"); # tp[num]=
+ &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num+1]=
+
+ &mul ($word); # np[0]*m
+ &add ("eax",&DWP($frame,"esp")); # +=tp[0]
+ &lea ($num,&DWP(-1,$j));
+ &adc ("edx",0);
+ &mov ($j,1);
+ &mov ("eax",&DWP(4,$inp)); # np[1]
+
+ &jmp (&label("3rdmadd"));
+}
+
+&set_label("common_tail",16);
+ &mov ($np,$_np); # load modulus pointer
+ &mov ($rp,$_rp); # load result pointer
+ &lea ($tp,&DWP($frame,"esp")); # [$ap and $bp are zapped]
+
+ &mov ("eax",&DWP(0,$tp)); # tp[0]
+ &mov ($j,$num); # j=num-1
+ &xor ($i,$i); # i=0 and clear CF!
+
+&set_label("sub",16);
+ &sbb ("eax",&DWP(0,$np,$i,4));
+ &mov (&DWP(0,$rp,$i,4),"eax"); # rp[i]=tp[i]-np[i]
+ &dec ($j); # doesn't affect CF!
+ &mov ("eax",&DWP(4,$tp,$i,4)); # tp[i+1]
+ &lea ($i,&DWP(1,$i)); # i++
+ &jge (&label("sub"));
+
+ &sbb ("eax",0); # handle upmost overflow bit
+ &and ($tp,"eax");
+ &not ("eax");
+ &mov ($np,$rp);
+ &and ($np,"eax");
+ &or ($tp,$np); # tp=carry?tp:rp
+
+&set_label("copy",16); # copy or in-place refresh
+ &mov ("eax",&DWP(0,$tp,$num,4));
+ &mov (&DWP(0,$rp,$num,4),"eax"); # rp[i]=tp[i]
+ &mov (&DWP($frame,"esp",$num,4),$j); # zap temporary vector
+ &dec ($num);
+ &jge (&label("copy"));
+
+ &mov ("esp",$_sp); # pull saved stack pointer
+ &mov ("eax",1);
+&set_label("just_leave");
+&function_end("bn_mul_mont");
+
+&asciz("Montgomery Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86.pl
new file mode 100644
index 00000000..1bc4f1bb
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86.pl
@@ -0,0 +1,28 @@
+#!/usr/local/bin/perl
+
+push(@INC,"perlasm","../../perlasm");
+require "x86asm.pl";
+
+require("x86/mul_add.pl");
+require("x86/mul.pl");
+require("x86/sqr.pl");
+require("x86/div.pl");
+require("x86/add.pl");
+require("x86/sub.pl");
+require("x86/comba.pl");
+
+&asm_init($ARGV[0],$0);
+
+&bn_mul_add_words("bn_mul_add_words");
+&bn_mul_words("bn_mul_words");
+&bn_sqr_words("bn_sqr_words");
+&bn_div_words("bn_div_words");
+&bn_add_words("bn_add_words");
+&bn_sub_words("bn_sub_words");
+&bn_mul_comba("bn_mul_comba8",8);
+&bn_mul_comba("bn_mul_comba4",4);
+&bn_sqr_comba("bn_sqr_comba8",8);
+&bn_sqr_comba("bn_sqr_comba4",4);
+
+&asm_finish();
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/add.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/add.pl
new file mode 100644
index 00000000..0b5cf583
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/add.pl
@@ -0,0 +1,76 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_add_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &add($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &add($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &add($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &add($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *a
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
+1;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/comba.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/comba.pl
new file mode 100644
index 00000000..22912536
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/comba.pl
@@ -0,0 +1,277 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub mul_add_c
+ {
+ local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("mul a[$ai]*b[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$b,"",0));
+
+ &mul("edx");
+ &add($c0,"eax");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # laod next a
+ &mov("eax",&wparam(0)) if $pos > 0; # load r[]
+ ###
+ &adc($c1,"edx");
+ &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0; # laod next b
+ &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b
+ ###
+ &adc($c2,0);
+ # is pos > 1, it means it is the last loop
+ &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[];
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a
+ }
+
+sub sqr_add_c
+ {
+ local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("sqr a[$ai]*a[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$b,"",0));
+
+ if ($ai == $bi)
+ { &mul("eax");}
+ else
+ { &mul("edx");}
+ &add($c0,"eax");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
+ ###
+ &adc($c1,"edx");
+ &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb);
+ ###
+ &adc($c2,0);
+ # is pos > 1, it means it is the last loop
+ &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
+ }
+
+sub sqr_add_c2
+ {
+ local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("sqr a[$ai]*a[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$a,"",0));
+
+ if ($ai == $bi)
+ { &mul("eax");}
+ else
+ { &mul("edx");}
+ &add("eax","eax");
+ ###
+ &adc("edx","edx");
+ ###
+ &adc($c2,0);
+ &add($c0,"eax");
+ &adc($c1,"edx");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
+ &adc($c2,0);
+ &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
+ &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb);
+ ###
+ }
+
+sub bn_mul_comba
+ {
+ local($name,$num)=@_;
+ local($a,$b,$c0,$c1,$c2);
+ local($i,$as,$ae,$bs,$be,$ai,$bi);
+ local($tot,$end);
+
+ &function_begin_B($name,"");
+
+ $c0="ebx";
+ $c1="ecx";
+ $c2="ebp";
+ $a="esi";
+ $b="edi";
+
+ $as=0;
+ $ae=0;
+ $bs=0;
+ $be=0;
+ $tot=$num+$num-1;
+
+ &push("esi");
+ &mov($a,&wparam(1));
+ &push("edi");
+ &mov($b,&wparam(2));
+ &push("ebp");
+ &push("ebx");
+
+ &xor($c0,$c0);
+ &mov("eax",&DWP(0,$a,"",0)); # load the first word
+ &xor($c1,$c1);
+ &mov("edx",&DWP(0,$b,"",0)); # load the first second
+
+ for ($i=0; $i<$tot; $i++)
+ {
+ $ai=$as;
+ $bi=$bs;
+ $end=$be+1;
+
+ &comment("################## Calculate word $i");
+
+ for ($j=$bs; $j<$end; $j++)
+ {
+ &xor($c2,$c2) if ($j == $bs);
+ if (($j+1) == $end)
+ {
+ $v=1;
+ $v=2 if (($i+1) == $tot);
+ }
+ else
+ { $v=0; }
+ if (($j+1) != $end)
+ {
+ $na=($ai-1);
+ $nb=($bi+1);
+ }
+ else
+ {
+ $na=$as+($i < ($num-1));
+ $nb=$bs+($i >= ($num-1));
+ }
+#printf STDERR "[$ai,$bi] -> [$na,$nb]\n";
+ &mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb);
+ if ($v)
+ {
+ &comment("saved r[$i]");
+ # &mov("eax",&wparam(0));
+ # &mov(&DWP($i*4,"eax","",0),$c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ }
+ $ai--;
+ $bi++;
+ }
+ $as++ if ($i < ($num-1));
+ $ae++ if ($i >= ($num-1));
+
+ $bs++ if ($i >= ($num-1));
+ $be++ if ($i < ($num-1));
+ }
+ &comment("save r[$i]");
+ # &mov("eax",&wparam(0));
+ &mov(&DWP($i*4,"eax","",0),$c0);
+
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
+
+sub bn_sqr_comba
+ {
+ local($name,$num)=@_;
+ local($r,$a,$c0,$c1,$c2)=@_;
+ local($i,$as,$ae,$bs,$be,$ai,$bi);
+ local($b,$tot,$end,$half);
+
+ &function_begin_B($name,"");
+
+ $c0="ebx";
+ $c1="ecx";
+ $c2="ebp";
+ $a="esi";
+ $r="edi";
+
+ &push("esi");
+ &push("edi");
+ &push("ebp");
+ &push("ebx");
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &xor($c0,$c0);
+ &xor($c1,$c1);
+ &mov("eax",&DWP(0,$a,"",0)); # load the first word
+
+ $as=0;
+ $ae=0;
+ $bs=0;
+ $be=0;
+ $tot=$num+$num-1;
+
+ for ($i=0; $i<$tot; $i++)
+ {
+ $ai=$as;
+ $bi=$bs;
+ $end=$be+1;
+
+ &comment("############### Calculate word $i");
+ for ($j=$bs; $j<$end; $j++)
+ {
+ &xor($c2,$c2) if ($j == $bs);
+ if (($ai-1) < ($bi+1))
+ {
+ $v=1;
+ $v=2 if ($i+1) == $tot;
+ }
+ else
+ { $v=0; }
+ if (!$v)
+ {
+ $na=$ai-1;
+ $nb=$bi+1;
+ }
+ else
+ {
+ $na=$as+($i < ($num-1));
+ $nb=$bs+($i >= ($num-1));
+ }
+ if ($ai == $bi)
+ {
+ &sqr_add_c($r,$a,$ai,$bi,
+ $c0,$c1,$c2,$v,$i,$na,$nb);
+ }
+ else
+ {
+ &sqr_add_c2($r,$a,$ai,$bi,
+ $c0,$c1,$c2,$v,$i,$na,$nb);
+ }
+ if ($v)
+ {
+ &comment("saved r[$i]");
+ #&mov(&DWP($i*4,$r,"",0),$c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ last;
+ }
+ $ai--;
+ $bi++;
+ }
+ $as++ if ($i < ($num-1));
+ $ae++ if ($i >= ($num-1));
+
+ $bs++ if ($i >= ($num-1));
+ $be++ if ($i < ($num-1));
+ }
+ &mov(&DWP($i*4,$r,"",0),$c0);
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
+
+1;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/div.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/div.pl
new file mode 100644
index 00000000..0e90152c
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/div.pl
@@ -0,0 +1,15 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_div_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+ &mov("edx",&wparam(0)); #
+ &mov("eax",&wparam(1)); #
+ &mov("ebx",&wparam(2)); #
+ &div("ebx");
+ &function_end($name);
+ }
+1;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/f b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/f
new file mode 100644
index 00000000..22e41122
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/f
@@ -0,0 +1,3 @@
+#!/usr/local/bin/perl
+# x86 assember
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/mul.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/mul.pl
new file mode 100644
index 00000000..674cb9b0
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/mul.pl
@@ -0,0 +1,77 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_mul_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $Low="eax";
+ $High="edx";
+ $a="ebx";
+ $w="ecx";
+ $r="edi";
+ $c="esi";
+ $num="ebp";
+
+ &xor($c,$c); # clear carry
+ &mov($r,&wparam(0)); #
+ &mov($a,&wparam(1)); #
+ &mov($num,&wparam(2)); #
+ &mov($w,&wparam(3)); #
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("mw_finish"));
+
+ &set_label("mw_loop",0);
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ # XXX
+
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
+
+ &mov($c,"edx"); # c= H(t);
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,32);
+ &sub($num,8);
+ &jz(&label("mw_finish"));
+ &jmp(&label("mw_loop"));
+
+ &set_label("mw_finish",0);
+ &mov($num,&wparam(2)); # get num
+ &and($num,7);
+ &jnz(&label("mw_finish2"));
+ &jmp(&label("mw_end"));
+
+ &set_label("mw_finish2",1);
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0));# *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ # XXX
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ &dec($num) if ($i != 7-1);
+ &jz(&label("mw_end")) if ($i != 7-1);
+ }
+ &set_label("mw_end",0);
+ &mov("eax",$c);
+
+ &function_end($name);
+ }
+
+1;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/mul_add.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/mul_add.pl
new file mode 100644
index 00000000..61830d3a
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/mul_add.pl
@@ -0,0 +1,87 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_mul_add_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $Low="eax";
+ $High="edx";
+ $a="ebx";
+ $w="ebp";
+ $r="edi";
+ $c="esi";
+
+ &xor($c,$c); # clear carry
+ &mov($r,&wparam(0)); #
+
+ &mov("ecx",&wparam(2)); #
+ &mov($a,&wparam(1)); #
+
+ &and("ecx",0xfffffff8); # num / 8
+ &mov($w,&wparam(3)); #
+
+ &push("ecx"); # Up the stack for a tmp variable
+
+ &jz(&label("maw_finish"));
+
+ &set_label("maw_loop",0);
+
+ &mov(&swtmp(0),"ecx"); #
+
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+= *r
+ &mov($c,&DWP($i,$r,"",0)); # L(t)+= *r
+ &adc("edx",0); # H(t)+=carry
+ &add("eax",$c); # L(t)+=c
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ }
+
+ &comment("");
+ &mov("ecx",&swtmp(0)); #
+ &add($a,32);
+ &add($r,32);
+ &sub("ecx",8);
+ &jnz(&label("maw_loop"));
+
+ &set_label("maw_finish",0);
+ &mov("ecx",&wparam(2)); # get num
+ &and("ecx",7);
+ &jnz(&label("maw_finish2")); # helps branch prediction
+ &jmp(&label("maw_end"));
+
+ &set_label("maw_finish2",1);
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0));# *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ &mov($c,&DWP($i*4,$r,"",0)); # L(t)+= *r
+ &adc("edx",0); # H(t)+=carry
+ &add("eax",$c);
+ &adc("edx",0); # H(t)+=carry
+ &dec("ecx") if ($i != 7-1);
+ &mov(&DWP($i*4,$r,"",0),"eax"); # *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ &jz(&label("maw_end")) if ($i != 7-1);
+ }
+ &set_label("maw_end",0);
+ &mov("eax",$c);
+
+ &pop("ecx"); # clear variable from
+
+ &function_end($name);
+ }
+
+1;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/sqr.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/sqr.pl
new file mode 100644
index 00000000..1f90993c
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/sqr.pl
@@ -0,0 +1,60 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_sqr_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $r="esi";
+ $a="edi";
+ $num="ebx";
+
+ &mov($r,&wparam(0)); #
+ &mov($a,&wparam(1)); #
+ &mov($num,&wparam(2)); #
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("sw_finish"));
+
+ &set_label("sw_loop",0);
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ # XXX
+ &mul("eax"); # *a * *a
+ &mov(&DWP($i*2,$r,"",0),"eax"); #
+ &mov(&DWP($i*2+4,$r,"",0),"edx");#
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,64);
+ &sub($num,8);
+ &jnz(&label("sw_loop"));
+
+ &set_label("sw_finish",0);
+ &mov($num,&wparam(2)); # get num
+ &and($num,7);
+ &jz(&label("sw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0)); # *a
+ # XXX
+ &mul("eax"); # *a * *a
+ &mov(&DWP($i*8,$r,"",0),"eax"); #
+ &dec($num) if ($i != 7-1);
+ &mov(&DWP($i*8+4,$r,"",0),"edx");
+ &jz(&label("sw_end")) if ($i != 7-1);
+ }
+ &set_label("sw_end",0);
+
+ &function_end($name);
+ }
+
+1;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/sub.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/sub.pl
new file mode 100644
index 00000000..837b0e1b
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86/sub.pl
@@ -0,0 +1,76 @@
+#!/usr/local/bin/perl
+# x86 assember
+
+sub bn_sub_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *a
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
+1;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gcc.c b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gcc.c
new file mode 100644
index 00000000..329946e5
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gcc.c
@@ -0,0 +1,606 @@
+#include "../bn_lcl.h"
+#if !(defined(__GNUC__) && __GNUC__>=2)
+# include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
+#else
+/*
+ * x86_64 BIGNUM accelerator version 0.1, December 2002.
+ *
+ * Implemented by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+ * project.
+ *
+ * Rights for redistribution and usage in source and binary forms are
+ * granted according to the OpenSSL license. Warranty of any kind is
+ * disclaimed.
+ *
+ * Q. Version 0.1? It doesn't sound like Andy, he used to assign real
+ * versions, like 1.0...
+ * A. Well, that's because this code is basically a quick-n-dirty
+ * proof-of-concept hack. As you can see it's implemented with
+ * inline assembler, which means that you're bound to GCC and that
+ * there might be enough room for further improvement.
+ *
+ * Q. Why inline assembler?
+ * A. x86_64 features own ABI which I'm not familiar with. This is
+ * why I decided to let the compiler take care of subroutine
+ * prologue/epilogue as well as register allocation. For reference.
+ * Win64 implements different ABI for AMD64, different from Linux.
+ *
+ * Q. How much faster does it get?
+ * A. 'apps/openssl speed rsa dsa' output with no-asm:
+ *
+ * sign verify sign/s verify/s
+ * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
+ * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
+ * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
+ * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
+ * sign verify sign/s verify/s
+ * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
+ * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
+ * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
+ *
+ * 'apps/openssl speed rsa dsa' output with this module:
+ *
+ * sign verify sign/s verify/s
+ * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
+ * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
+ * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
+ * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
+ * sign verify sign/s verify/s
+ * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
+ * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
+ * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
+ *
+ * For the reference. IA-32 assembler implementation performs
+ * very much like 64-bit code compiled with no-asm on the same
+ * machine.
+ */
+
+#ifdef _WIN64
+#define BN_ULONG unsigned long long
+#else
+#define BN_ULONG unsigned long
+#endif
+
+#undef mul
+#undef mul_add
+#undef sqr
+
+/*
+ * "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
+ * "g"(0) let the compiler to decide where does it
+ * want to keep the value of zero;
+ */
+#define mul_add(r,a,word,carry) do { \
+ register BN_ULONG high,low; \
+ asm ("mulq %3" \
+ : "=a"(low),"=d"(high) \
+ : "a"(word),"m"(a) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(carry),"+d"(high)\
+ : "a"(low),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+m"(r),"+d"(high) \
+ : "r"(carry),"g"(0) \
+ : "cc"); \
+ carry=high; \
+ } while (0)
+
+#define mul(r,a,word,carry) do { \
+ register BN_ULONG high,low; \
+ asm ("mulq %3" \
+ : "=a"(low),"=d"(high) \
+ : "a"(word),"g"(a) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(carry),"+d"(high)\
+ : "a"(low),"g"(0) \
+ : "cc"); \
+ (r)=carry, carry=high; \
+ } while (0)
+
+#define sqr(r0,r1,a) \
+ asm ("mulq %2" \
+ : "=a"(r0),"=d"(r1) \
+ : "a"(a) \
+ : "cc");
+
+BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
+ {
+ BN_ULONG c1=0;
+
+ if (num <= 0) return(c1);
+
+ while (num&~3)
+ {
+ mul_add(rp[0],ap[0],w,c1);
+ mul_add(rp[1],ap[1],w,c1);
+ mul_add(rp[2],ap[2],w,c1);
+ mul_add(rp[3],ap[3],w,c1);
+ ap+=4; rp+=4; num-=4;
+ }
+ if (num)
+ {
+ mul_add(rp[0],ap[0],w,c1); if (--num==0) return c1;
+ mul_add(rp[1],ap[1],w,c1); if (--num==0) return c1;
+ mul_add(rp[2],ap[2],w,c1); return c1;
+ }
+
+ return(c1);
+ }
+
+BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
+ {
+ BN_ULONG c1=0;
+
+ if (num <= 0) return(c1);
+
+ while (num&~3)
+ {
+ mul(rp[0],ap[0],w,c1);
+ mul(rp[1],ap[1],w,c1);
+ mul(rp[2],ap[2],w,c1);
+ mul(rp[3],ap[3],w,c1);
+ ap+=4; rp+=4; num-=4;
+ }
+ if (num)
+ {
+ mul(rp[0],ap[0],w,c1); if (--num == 0) return c1;
+ mul(rp[1],ap[1],w,c1); if (--num == 0) return c1;
+ mul(rp[2],ap[2],w,c1);
+ }
+ return(c1);
+ }
+
+void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
+ {
+ if (n <= 0) return;
+
+ while (n&~3)
+ {
+ sqr(r[0],r[1],a[0]);
+ sqr(r[2],r[3],a[1]);
+ sqr(r[4],r[5],a[2]);
+ sqr(r[6],r[7],a[3]);
+ a+=4; r+=8; n-=4;
+ }
+ if (n)
+ {
+ sqr(r[0],r[1],a[0]); if (--n == 0) return;
+ sqr(r[2],r[3],a[1]); if (--n == 0) return;
+ sqr(r[4],r[5],a[2]);
+ }
+ }
+
+BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
+{ BN_ULONG ret,waste;
+
+ asm ("divq %4"
+ : "=a"(ret),"=d"(waste)
+ : "a"(l),"d"(h),"g"(d)
+ : "cc");
+
+ return ret;
+}
+
+BN_ULONG bn_add_words (BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int n)
+{ BN_ULONG ret=0,i=0;
+
+ if (n <= 0) return 0;
+
+ asm (
+ " subq %2,%2 \n"
+ ".p2align 4 \n"
+ "1: movq (%4,%2,8),%0 \n"
+ " adcq (%5,%2,8),%0 \n"
+ " movq %0,(%3,%2,8) \n"
+ " leaq 1(%2),%2 \n"
+ " loop 1b \n"
+ " sbbq %0,%0 \n"
+ : "=&a"(ret),"+c"(n),"=&r"(i)
+ : "r"(rp),"r"(ap),"r"(bp)
+ : "cc"
+ );
+
+ return ret&1;
+}
+
+#ifndef SIMICS
+BN_ULONG bn_sub_words (BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int n)
+{ BN_ULONG ret=0,i=0;
+
+ if (n <= 0) return 0;
+
+ asm (
+ " subq %2,%2 \n"
+ ".p2align 4 \n"
+ "1: movq (%4,%2,8),%0 \n"
+ " sbbq (%5,%2,8),%0 \n"
+ " movq %0,(%3,%2,8) \n"
+ " leaq 1(%2),%2 \n"
+ " loop 1b \n"
+ " sbbq %0,%0 \n"
+ : "=&a"(ret),"+c"(n),"=&r"(i)
+ : "r"(rp),"r"(ap),"r"(bp)
+ : "cc"
+ );
+
+ return ret&1;
+}
+#else
+/* Simics 1.4<7 has buggy sbbq:-( */
+#define BN_MASK2 0xffffffffffffffffL
+BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+ {
+ BN_ULONG t1,t2;
+ int c=0;
+
+ if (n <= 0) return((BN_ULONG)0);
+
+ for (;;)
+ {
+ t1=a[0]; t2=b[0];
+ r[0]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ if (--n <= 0) break;
+
+ t1=a[1]; t2=b[1];
+ r[1]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ if (--n <= 0) break;
+
+ t1=a[2]; t2=b[2];
+ r[2]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ if (--n <= 0) break;
+
+ t1=a[3]; t2=b[3];
+ r[3]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ if (--n <= 0) break;
+
+ a+=4;
+ b+=4;
+ r+=4;
+ }
+ return(c);
+ }
+#endif
+
+/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
+/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
+/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
+/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
+
+#if 0
+/* original macros are kept for reference purposes */
+#define mul_add_c(a,b,c0,c1,c2) { \
+ BN_ULONG ta=(a),tb=(b); \
+ t1 = ta * tb; \
+ t2 = BN_UMULT_HIGH(ta,tb); \
+ c0 += t1; t2 += (c0<t1)?1:0; \
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ }
+
+#define mul_add_c2(a,b,c0,c1,c2) { \
+ BN_ULONG ta=(a),tb=(b),t0; \
+ t1 = BN_UMULT_HIGH(ta,tb); \
+ t0 = ta * tb; \
+ t2 = t1+t1; c2 += (t2<t1)?1:0; \
+ t1 = t0+t0; t2 += (t1<t0)?1:0; \
+ c0 += t1; t2 += (c0<t1)?1:0; \
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ }
+#else
+#define mul_add_c(a,b,c0,c1,c2) do { \
+ asm ("mulq %3" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a),"m"(b) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c0),"+d"(t2) \
+ : "a"(t1),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c1),"+r"(c2) \
+ : "d"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+
+#define sqr_add_c(a,i,c0,c1,c2) do { \
+ asm ("mulq %2" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a[i]) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c0),"+d"(t2) \
+ : "a"(t1),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c1),"+r"(c2) \
+ : "d"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+
+#define mul_add_c2(a,b,c0,c1,c2) do { \
+ asm ("mulq %3" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a),"m"(b) \
+ : "cc"); \
+ asm ("addq %0,%0; adcq %2,%1" \
+ : "+d"(t2),"+r"(c2) \
+ : "g"(0) \
+ : "cc"); \
+ asm ("addq %0,%0; adcq %2,%1" \
+ : "+a"(t1),"+d"(t2) \
+ : "g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c0),"+d"(t2) \
+ : "a"(t1),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c1),"+r"(c2) \
+ : "d"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+#endif
+
+#define sqr_add_c2(a,i,j,c0,c1,c2) \
+ mul_add_c2((a)[i],(a)[j],c0,c1,c2)
+
+void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+ {
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ mul_add_c(a[0],b[0],c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ mul_add_c(a[0],b[1],c2,c3,c1);
+ mul_add_c(a[1],b[0],c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ mul_add_c(a[2],b[0],c3,c1,c2);
+ mul_add_c(a[1],b[1],c3,c1,c2);
+ mul_add_c(a[0],b[2],c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ mul_add_c(a[0],b[3],c1,c2,c3);
+ mul_add_c(a[1],b[2],c1,c2,c3);
+ mul_add_c(a[2],b[1],c1,c2,c3);
+ mul_add_c(a[3],b[0],c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ mul_add_c(a[4],b[0],c2,c3,c1);
+ mul_add_c(a[3],b[1],c2,c3,c1);
+ mul_add_c(a[2],b[2],c2,c3,c1);
+ mul_add_c(a[1],b[3],c2,c3,c1);
+ mul_add_c(a[0],b[4],c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ mul_add_c(a[0],b[5],c3,c1,c2);
+ mul_add_c(a[1],b[4],c3,c1,c2);
+ mul_add_c(a[2],b[3],c3,c1,c2);
+ mul_add_c(a[3],b[2],c3,c1,c2);
+ mul_add_c(a[4],b[1],c3,c1,c2);
+ mul_add_c(a[5],b[0],c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ mul_add_c(a[6],b[0],c1,c2,c3);
+ mul_add_c(a[5],b[1],c1,c2,c3);
+ mul_add_c(a[4],b[2],c1,c2,c3);
+ mul_add_c(a[3],b[3],c1,c2,c3);
+ mul_add_c(a[2],b[4],c1,c2,c3);
+ mul_add_c(a[1],b[5],c1,c2,c3);
+ mul_add_c(a[0],b[6],c1,c2,c3);
+ r[6]=c1;
+ c1=0;
+ mul_add_c(a[0],b[7],c2,c3,c1);
+ mul_add_c(a[1],b[6],c2,c3,c1);
+ mul_add_c(a[2],b[5],c2,c3,c1);
+ mul_add_c(a[3],b[4],c2,c3,c1);
+ mul_add_c(a[4],b[3],c2,c3,c1);
+ mul_add_c(a[5],b[2],c2,c3,c1);
+ mul_add_c(a[6],b[1],c2,c3,c1);
+ mul_add_c(a[7],b[0],c2,c3,c1);
+ r[7]=c2;
+ c2=0;
+ mul_add_c(a[7],b[1],c3,c1,c2);
+ mul_add_c(a[6],b[2],c3,c1,c2);
+ mul_add_c(a[5],b[3],c3,c1,c2);
+ mul_add_c(a[4],b[4],c3,c1,c2);
+ mul_add_c(a[3],b[5],c3,c1,c2);
+ mul_add_c(a[2],b[6],c3,c1,c2);
+ mul_add_c(a[1],b[7],c3,c1,c2);
+ r[8]=c3;
+ c3=0;
+ mul_add_c(a[2],b[7],c1,c2,c3);
+ mul_add_c(a[3],b[6],c1,c2,c3);
+ mul_add_c(a[4],b[5],c1,c2,c3);
+ mul_add_c(a[5],b[4],c1,c2,c3);
+ mul_add_c(a[6],b[3],c1,c2,c3);
+ mul_add_c(a[7],b[2],c1,c2,c3);
+ r[9]=c1;
+ c1=0;
+ mul_add_c(a[7],b[3],c2,c3,c1);
+ mul_add_c(a[6],b[4],c2,c3,c1);
+ mul_add_c(a[5],b[5],c2,c3,c1);
+ mul_add_c(a[4],b[6],c2,c3,c1);
+ mul_add_c(a[3],b[7],c2,c3,c1);
+ r[10]=c2;
+ c2=0;
+ mul_add_c(a[4],b[7],c3,c1,c2);
+ mul_add_c(a[5],b[6],c3,c1,c2);
+ mul_add_c(a[6],b[5],c3,c1,c2);
+ mul_add_c(a[7],b[4],c3,c1,c2);
+ r[11]=c3;
+ c3=0;
+ mul_add_c(a[7],b[5],c1,c2,c3);
+ mul_add_c(a[6],b[6],c1,c2,c3);
+ mul_add_c(a[5],b[7],c1,c2,c3);
+ r[12]=c1;
+ c1=0;
+ mul_add_c(a[6],b[7],c2,c3,c1);
+ mul_add_c(a[7],b[6],c2,c3,c1);
+ r[13]=c2;
+ c2=0;
+ mul_add_c(a[7],b[7],c3,c1,c2);
+ r[14]=c3;
+ r[15]=c1;
+ }
+
+void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+ {
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ mul_add_c(a[0],b[0],c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ mul_add_c(a[0],b[1],c2,c3,c1);
+ mul_add_c(a[1],b[0],c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ mul_add_c(a[2],b[0],c3,c1,c2);
+ mul_add_c(a[1],b[1],c3,c1,c2);
+ mul_add_c(a[0],b[2],c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ mul_add_c(a[0],b[3],c1,c2,c3);
+ mul_add_c(a[1],b[2],c1,c2,c3);
+ mul_add_c(a[2],b[1],c1,c2,c3);
+ mul_add_c(a[3],b[0],c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ mul_add_c(a[3],b[1],c2,c3,c1);
+ mul_add_c(a[2],b[2],c2,c3,c1);
+ mul_add_c(a[1],b[3],c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ mul_add_c(a[2],b[3],c3,c1,c2);
+ mul_add_c(a[3],b[2],c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ mul_add_c(a[3],b[3],c1,c2,c3);
+ r[6]=c1;
+ r[7]=c2;
+ }
+
+void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
+ {
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ sqr_add_c(a,0,c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ sqr_add_c2(a,1,0,c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ sqr_add_c(a,1,c3,c1,c2);
+ sqr_add_c2(a,2,0,c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ sqr_add_c2(a,3,0,c1,c2,c3);
+ sqr_add_c2(a,2,1,c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ sqr_add_c(a,2,c2,c3,c1);
+ sqr_add_c2(a,3,1,c2,c3,c1);
+ sqr_add_c2(a,4,0,c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ sqr_add_c2(a,5,0,c3,c1,c2);
+ sqr_add_c2(a,4,1,c3,c1,c2);
+ sqr_add_c2(a,3,2,c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ sqr_add_c(a,3,c1,c2,c3);
+ sqr_add_c2(a,4,2,c1,c2,c3);
+ sqr_add_c2(a,5,1,c1,c2,c3);
+ sqr_add_c2(a,6,0,c1,c2,c3);
+ r[6]=c1;
+ c1=0;
+ sqr_add_c2(a,7,0,c2,c3,c1);
+ sqr_add_c2(a,6,1,c2,c3,c1);
+ sqr_add_c2(a,5,2,c2,c3,c1);
+ sqr_add_c2(a,4,3,c2,c3,c1);
+ r[7]=c2;
+ c2=0;
+ sqr_add_c(a,4,c3,c1,c2);
+ sqr_add_c2(a,5,3,c3,c1,c2);
+ sqr_add_c2(a,6,2,c3,c1,c2);
+ sqr_add_c2(a,7,1,c3,c1,c2);
+ r[8]=c3;
+ c3=0;
+ sqr_add_c2(a,7,2,c1,c2,c3);
+ sqr_add_c2(a,6,3,c1,c2,c3);
+ sqr_add_c2(a,5,4,c1,c2,c3);
+ r[9]=c1;
+ c1=0;
+ sqr_add_c(a,5,c2,c3,c1);
+ sqr_add_c2(a,6,4,c2,c3,c1);
+ sqr_add_c2(a,7,3,c2,c3,c1);
+ r[10]=c2;
+ c2=0;
+ sqr_add_c2(a,7,4,c3,c1,c2);
+ sqr_add_c2(a,6,5,c3,c1,c2);
+ r[11]=c3;
+ c3=0;
+ sqr_add_c(a,6,c1,c2,c3);
+ sqr_add_c2(a,7,5,c1,c2,c3);
+ r[12]=c1;
+ c1=0;
+ sqr_add_c2(a,7,6,c2,c3,c1);
+ r[13]=c2;
+ c2=0;
+ sqr_add_c(a,7,c3,c1,c2);
+ r[14]=c3;
+ r[15]=c1;
+ }
+
+void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
+ {
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ sqr_add_c(a,0,c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ sqr_add_c2(a,1,0,c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ sqr_add_c(a,1,c3,c1,c2);
+ sqr_add_c2(a,2,0,c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ sqr_add_c2(a,3,0,c1,c2,c3);
+ sqr_add_c2(a,2,1,c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ sqr_add_c(a,2,c2,c3,c1);
+ sqr_add_c2(a,3,1,c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ sqr_add_c2(a,3,2,c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ sqr_add_c(a,3,c1,c2,c3);
+ r[6]=c1;
+ r[7]=c2;
+ }
+#endif
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gf2m.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gf2m.S
new file mode 100644
index 00000000..ccd2ed70
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gf2m.S
@@ -0,0 +1,291 @@
+.text
+
+.type _mul_1x1,@function
+.align 16
+_mul_1x1:
+ subq $128+8,%rsp
+ movq $-1,%r9
+ leaq (%rax,%rax,1),%rsi
+ shrq $3,%r9
+ leaq (,%rax,4),%rdi
+ andq %rax,%r9
+ leaq (,%rax,8),%r12
+ sarq $63,%rax
+ leaq (%r9,%r9,1),%r10
+ sarq $63,%rsi
+ leaq (,%r9,4),%r11
+ andq %rbp,%rax
+ sarq $63,%rdi
+ movq %rax,%rdx
+ shlq $63,%rax
+ andq %rbp,%rsi
+ shrq $1,%rdx
+ movq %rsi,%rcx
+ shlq $62,%rsi
+ andq %rbp,%rdi
+ shrq $2,%rcx
+ xorq %rsi,%rax
+ movq %rdi,%rbx
+ shlq $61,%rdi
+ xorq %rcx,%rdx
+ shrq $3,%rbx
+ xorq %rdi,%rax
+ xorq %rbx,%rdx
+
+ movq %r9,%r13
+ movq $0,0(%rsp)
+ xorq %r10,%r13
+ movq %r9,8(%rsp)
+ movq %r11,%r14
+ movq %r10,16(%rsp)
+ xorq %r12,%r14
+ movq %r13,24(%rsp)
+
+ xorq %r11,%r9
+ movq %r11,32(%rsp)
+ xorq %r11,%r10
+ movq %r9,40(%rsp)
+ xorq %r11,%r13
+ movq %r10,48(%rsp)
+ xorq %r14,%r9
+ movq %r13,56(%rsp)
+ xorq %r14,%r10
+
+ movq %r12,64(%rsp)
+ xorq %r14,%r13
+ movq %r9,72(%rsp)
+ xorq %r11,%r9
+ movq %r10,80(%rsp)
+ xorq %r11,%r10
+ movq %r13,88(%rsp)
+
+ xorq %r11,%r13
+ movq %r14,96(%rsp)
+ movq %r8,%rsi
+ movq %r9,104(%rsp)
+ andq %rbp,%rsi
+ movq %r10,112(%rsp)
+ shrq $4,%rbp
+ movq %r13,120(%rsp)
+ movq %r8,%rdi
+ andq %rbp,%rdi
+ shrq $4,%rbp
+
+ movq (%rsp,%rsi,8),%xmm0
+ movq %r8,%rsi
+ andq %rbp,%rsi
+ shrq $4,%rbp
+ movq (%rsp,%rdi,8),%rcx
+ movq %r8,%rdi
+ movq %rcx,%rbx
+ shlq $4,%rcx
+ andq %rbp,%rdi
+ movq (%rsp,%rsi,8),%xmm1
+ shrq $60,%rbx
+ xorq %rcx,%rax
+ pslldq $1,%xmm1
+ movq %r8,%rsi
+ shrq $4,%rbp
+ xorq %rbx,%rdx
+ andq %rbp,%rsi
+ shrq $4,%rbp
+ pxor %xmm1,%xmm0
+ movq (%rsp,%rdi,8),%rcx
+ movq %r8,%rdi
+ movq %rcx,%rbx
+ shlq $12,%rcx
+ andq %rbp,%rdi
+ movq (%rsp,%rsi,8),%xmm1
+ shrq $52,%rbx
+ xorq %rcx,%rax
+ pslldq $2,%xmm1
+ movq %r8,%rsi
+ shrq $4,%rbp
+ xorq %rbx,%rdx
+ andq %rbp,%rsi
+ shrq $4,%rbp
+ pxor %xmm1,%xmm0
+ movq (%rsp,%rdi,8),%rcx
+ movq %r8,%rdi
+ movq %rcx,%rbx
+ shlq $20,%rcx
+ andq %rbp,%rdi
+ movq (%rsp,%rsi,8),%xmm1
+ shrq $44,%rbx
+ xorq %rcx,%rax
+ pslldq $3,%xmm1
+ movq %r8,%rsi
+ shrq $4,%rbp
+ xorq %rbx,%rdx
+ andq %rbp,%rsi
+ shrq $4,%rbp
+ pxor %xmm1,%xmm0
+ movq (%rsp,%rdi,8),%rcx
+ movq %r8,%rdi
+ movq %rcx,%rbx
+ shlq $28,%rcx
+ andq %rbp,%rdi
+ movq (%rsp,%rsi,8),%xmm1
+ shrq $36,%rbx
+ xorq %rcx,%rax
+ pslldq $4,%xmm1
+ movq %r8,%rsi
+ shrq $4,%rbp
+ xorq %rbx,%rdx
+ andq %rbp,%rsi
+ shrq $4,%rbp
+ pxor %xmm1,%xmm0
+ movq (%rsp,%rdi,8),%rcx
+ movq %r8,%rdi
+ movq %rcx,%rbx
+ shlq $36,%rcx
+ andq %rbp,%rdi
+ movq (%rsp,%rsi,8),%xmm1
+ shrq $28,%rbx
+ xorq %rcx,%rax
+ pslldq $5,%xmm1
+ movq %r8,%rsi
+ shrq $4,%rbp
+ xorq %rbx,%rdx
+ andq %rbp,%rsi
+ shrq $4,%rbp
+ pxor %xmm1,%xmm0
+ movq (%rsp,%rdi,8),%rcx
+ movq %r8,%rdi
+ movq %rcx,%rbx
+ shlq $44,%rcx
+ andq %rbp,%rdi
+ movq (%rsp,%rsi,8),%xmm1
+ shrq $20,%rbx
+ xorq %rcx,%rax
+ pslldq $6,%xmm1
+ movq %r8,%rsi
+ shrq $4,%rbp
+ xorq %rbx,%rdx
+ andq %rbp,%rsi
+ shrq $4,%rbp
+ pxor %xmm1,%xmm0
+ movq (%rsp,%rdi,8),%rcx
+ movq %r8,%rdi
+ movq %rcx,%rbx
+ shlq $52,%rcx
+ andq %rbp,%rdi
+ movq (%rsp,%rsi,8),%xmm1
+ shrq $12,%rbx
+ xorq %rcx,%rax
+ pslldq $7,%xmm1
+ movq %r8,%rsi
+ shrq $4,%rbp
+ xorq %rbx,%rdx
+ andq %rbp,%rsi
+ shrq $4,%rbp
+ pxor %xmm1,%xmm0
+ movq (%rsp,%rdi,8),%rcx
+ movq %rcx,%rbx
+ shlq $60,%rcx
+.byte 102,72,15,126,198
+ shrq $4,%rbx
+ xorq %rcx,%rax
+ psrldq $8,%xmm0
+ xorq %rbx,%rdx
+.byte 102,72,15,126,199
+ xorq %rsi,%rax
+ xorq %rdi,%rdx
+
+ addq $128+8,%rsp
+ .byte 0xf3,0xc3
+.Lend_mul_1x1:
+.size _mul_1x1,.-_mul_1x1
+
+.globl bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,@function
+.align 16
+bn_GF2m_mul_2x2:
+ movq OPENSSL_ia32cap_P(%rip),%rax
+ btq $33,%rax
+ jnc .Lvanilla_mul_2x2
+
+.byte 102,72,15,110,198
+.byte 102,72,15,110,201
+.byte 102,72,15,110,210
+.byte 102,73,15,110,216
+ movdqa %xmm0,%xmm4
+ movdqa %xmm1,%xmm5
+.byte 102,15,58,68,193,0
+ pxor %xmm2,%xmm4
+ pxor %xmm3,%xmm5
+.byte 102,15,58,68,211,0
+.byte 102,15,58,68,229,0
+ xorps %xmm0,%xmm4
+ xorps %xmm2,%xmm4
+ movdqa %xmm4,%xmm5
+ pslldq $8,%xmm4
+ psrldq $8,%xmm5
+ pxor %xmm4,%xmm2
+ pxor %xmm5,%xmm0
+ movdqu %xmm2,0(%rdi)
+ movdqu %xmm0,16(%rdi)
+ .byte 0xf3,0xc3
+
+.align 16
+.Lvanilla_mul_2x2:
+ leaq -136(%rsp),%rsp
+ movq %r14,80(%rsp)
+ movq %r13,88(%rsp)
+ movq %r12,96(%rsp)
+ movq %rbp,104(%rsp)
+ movq %rbx,112(%rsp)
+.Lbody_mul_2x2:
+ movq %rdi,32(%rsp)
+ movq %rsi,40(%rsp)
+ movq %rdx,48(%rsp)
+ movq %rcx,56(%rsp)
+ movq %r8,64(%rsp)
+
+ movq $15,%r8
+ movq %rsi,%rax
+ movq %rcx,%rbp
+ call _mul_1x1
+ movq %rax,16(%rsp)
+ movq %rdx,24(%rsp)
+
+ movq 48(%rsp),%rax
+ movq 64(%rsp),%rbp
+ call _mul_1x1
+ movq %rax,0(%rsp)
+ movq %rdx,8(%rsp)
+
+ movq 40(%rsp),%rax
+ movq 56(%rsp),%rbp
+ xorq 48(%rsp),%rax
+ xorq 64(%rsp),%rbp
+ call _mul_1x1
+ movq 0(%rsp),%rbx
+ movq 8(%rsp),%rcx
+ movq 16(%rsp),%rdi
+ movq 24(%rsp),%rsi
+ movq 32(%rsp),%rbp
+
+ xorq %rdx,%rax
+ xorq %rcx,%rdx
+ xorq %rbx,%rax
+ movq %rbx,0(%rbp)
+ xorq %rdi,%rdx
+ movq %rsi,24(%rbp)
+ xorq %rsi,%rax
+ xorq %rsi,%rdx
+ xorq %rdx,%rax
+ movq %rdx,16(%rbp)
+ movq %rax,8(%rbp)
+
+ movq 80(%rsp),%r14
+ movq 88(%rsp),%r13
+ movq 96(%rsp),%r12
+ movq 104(%rsp),%rbp
+ movq 112(%rsp),%rbx
+ leaq 136(%rsp),%rsp
+ .byte 0xf3,0xc3
+.Lend_mul_2x2:
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+.byte 71,70,40,50,94,109,41,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
+.align 16
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gf2m.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gf2m.pl
new file mode 100644
index 00000000..42bbec2f
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-gf2m.pl
@@ -0,0 +1,390 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... Except that it has two code paths: code suitable
+# for any x86_64 CPU and PCLMULQDQ one suitable for Westmere and
+# later. Improvement varies from one benchmark and µ-arch to another.
+# Vanilla code path is at most 20% faster than compiler-generated code
+# [not very impressive], while PCLMULQDQ - whole 85%-160% better on
+# 163- and 571-bit ECDH benchmarks on Intel CPUs. Keep in mind that
+# these coefficients are not ones for bn_GF2m_mul_2x2 itself, as not
+# all CPU time is burnt in it...
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+($lo,$hi)=("%rax","%rdx"); $a=$lo;
+($i0,$i1)=("%rsi","%rdi");
+($t0,$t1)=("%rbx","%rcx");
+($b,$mask)=("%rbp","%r8");
+($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(9..15));
+($R,$Tx)=("%xmm0","%xmm1");
+
+$code.=<<___;
+.text
+
+.type _mul_1x1,\@abi-omnipotent
+.align 16
+_mul_1x1:
+ sub \$128+8,%rsp
+ mov \$-1,$a1
+ lea ($a,$a),$i0
+ shr \$3,$a1
+ lea (,$a,4),$i1
+ and $a,$a1 # a1=a&0x1fffffffffffffff
+ lea (,$a,8),$a8
+ sar \$63,$a # broadcast 63rd bit
+ lea ($a1,$a1),$a2
+ sar \$63,$i0 # broadcast 62nd bit
+ lea (,$a1,4),$a4
+ and $b,$a
+ sar \$63,$i1 # boardcast 61st bit
+ mov $a,$hi # $a is $lo
+ shl \$63,$lo
+ and $b,$i0
+ shr \$1,$hi
+ mov $i0,$t1
+ shl \$62,$i0
+ and $b,$i1
+ shr \$2,$t1
+ xor $i0,$lo
+ mov $i1,$t0
+ shl \$61,$i1
+ xor $t1,$hi
+ shr \$3,$t0
+ xor $i1,$lo
+ xor $t0,$hi
+
+ mov $a1,$a12
+ movq \$0,0(%rsp) # tab[0]=0
+ xor $a2,$a12 # a1^a2
+ mov $a1,8(%rsp) # tab[1]=a1
+ mov $a4,$a48
+ mov $a2,16(%rsp) # tab[2]=a2
+ xor $a8,$a48 # a4^a8
+ mov $a12,24(%rsp) # tab[3]=a1^a2
+
+ xor $a4,$a1
+ mov $a4,32(%rsp) # tab[4]=a4
+ xor $a4,$a2
+ mov $a1,40(%rsp) # tab[5]=a1^a4
+ xor $a4,$a12
+ mov $a2,48(%rsp) # tab[6]=a2^a4
+ xor $a48,$a1 # a1^a4^a4^a8=a1^a8
+ mov $a12,56(%rsp) # tab[7]=a1^a2^a4
+ xor $a48,$a2 # a2^a4^a4^a8=a1^a8
+
+ mov $a8,64(%rsp) # tab[8]=a8
+ xor $a48,$a12 # a1^a2^a4^a4^a8=a1^a2^a8
+ mov $a1,72(%rsp) # tab[9]=a1^a8
+ xor $a4,$a1 # a1^a8^a4
+ mov $a2,80(%rsp) # tab[10]=a2^a8
+ xor $a4,$a2 # a2^a8^a4
+ mov $a12,88(%rsp) # tab[11]=a1^a2^a8
+
+ xor $a4,$a12 # a1^a2^a8^a4
+ mov $a48,96(%rsp) # tab[12]=a4^a8
+ mov $mask,$i0
+ mov $a1,104(%rsp) # tab[13]=a1^a4^a8
+ and $b,$i0
+ mov $a2,112(%rsp) # tab[14]=a2^a4^a8
+ shr \$4,$b
+ mov $a12,120(%rsp) # tab[15]=a1^a2^a4^a8
+ mov $mask,$i1
+ and $b,$i1
+ shr \$4,$b
+
+ movq (%rsp,$i0,8),$R # half of calculations is done in SSE2
+ mov $mask,$i0
+ and $b,$i0
+ shr \$4,$b
+___
+ for ($n=1;$n<8;$n++) {
+ $code.=<<___;
+ mov (%rsp,$i1,8),$t1
+ mov $mask,$i1
+ mov $t1,$t0
+ shl \$`8*$n-4`,$t1
+ and $b,$i1
+ movq (%rsp,$i0,8),$Tx
+ shr \$`64-(8*$n-4)`,$t0
+ xor $t1,$lo
+ pslldq \$$n,$Tx
+ mov $mask,$i0
+ shr \$4,$b
+ xor $t0,$hi
+ and $b,$i0
+ shr \$4,$b
+ pxor $Tx,$R
+___
+ }
+$code.=<<___;
+ mov (%rsp,$i1,8),$t1
+ mov $t1,$t0
+ shl \$`8*$n-4`,$t1
+ movq $R,$i0
+ shr \$`64-(8*$n-4)`,$t0
+ xor $t1,$lo
+ psrldq \$8,$R
+ xor $t0,$hi
+ movq $R,$i1
+ xor $i0,$lo
+ xor $i1,$hi
+
+ add \$128+8,%rsp
+ ret
+.Lend_mul_1x1:
+.size _mul_1x1,.-_mul_1x1
+___
+
+($rp,$a1,$a0,$b1,$b0) = $win64? ("%rcx","%rdx","%r8", "%r9","%r10") : # Win64 order
+ ("%rdi","%rsi","%rdx","%rcx","%r8"); # Unix order
+
+$code.=<<___;
+.extern OPENSSL_ia32cap_P
+.globl bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,\@abi-omnipotent
+.align 16
+bn_GF2m_mul_2x2:
+ mov OPENSSL_ia32cap_P(%rip),%rax
+ bt \$33,%rax
+ jnc .Lvanilla_mul_2x2
+
+ movq $a1,%xmm0
+ movq $b1,%xmm1
+ movq $a0,%xmm2
+___
+$code.=<<___ if ($win64);
+ movq 40(%rsp),%xmm3
+___
+$code.=<<___ if (!$win64);
+ movq $b0,%xmm3
+___
+$code.=<<___;
+ movdqa %xmm0,%xmm4
+ movdqa %xmm1,%xmm5
+ pclmulqdq \$0,%xmm1,%xmm0 # a1·b1
+ pxor %xmm2,%xmm4
+ pxor %xmm3,%xmm5
+ pclmulqdq \$0,%xmm3,%xmm2 # a0·b0
+ pclmulqdq \$0,%xmm5,%xmm4 # (a0+a1)·(b0+b1)
+ xorps %xmm0,%xmm4
+ xorps %xmm2,%xmm4 # (a0+a1)·(b0+b1)-a0·b0-a1·b1
+ movdqa %xmm4,%xmm5
+ pslldq \$8,%xmm4
+ psrldq \$8,%xmm5
+ pxor %xmm4,%xmm2
+ pxor %xmm5,%xmm0
+ movdqu %xmm2,0($rp)
+ movdqu %xmm0,16($rp)
+ ret
+
+.align 16
+.Lvanilla_mul_2x2:
+ lea -8*17(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+ mov `8*17+40`(%rsp),$b0
+ mov %rdi,8*15(%rsp)
+ mov %rsi,8*16(%rsp)
+___
+$code.=<<___;
+ mov %r14,8*10(%rsp)
+ mov %r13,8*11(%rsp)
+ mov %r12,8*12(%rsp)
+ mov %rbp,8*13(%rsp)
+ mov %rbx,8*14(%rsp)
+.Lbody_mul_2x2:
+ mov $rp,32(%rsp) # save the arguments
+ mov $a1,40(%rsp)
+ mov $a0,48(%rsp)
+ mov $b1,56(%rsp)
+ mov $b0,64(%rsp)
+
+ mov \$0xf,$mask
+ mov $a1,$a
+ mov $b1,$b
+ call _mul_1x1 # a1·b1
+ mov $lo,16(%rsp)
+ mov $hi,24(%rsp)
+
+ mov 48(%rsp),$a
+ mov 64(%rsp),$b
+ call _mul_1x1 # a0·b0
+ mov $lo,0(%rsp)
+ mov $hi,8(%rsp)
+
+ mov 40(%rsp),$a
+ mov 56(%rsp),$b
+ xor 48(%rsp),$a
+ xor 64(%rsp),$b
+ call _mul_1x1 # (a0+a1)·(b0+b1)
+___
+ @r=("%rbx","%rcx","%rdi","%rsi");
+$code.=<<___;
+ mov 0(%rsp),@r[0]
+ mov 8(%rsp),@r[1]
+ mov 16(%rsp),@r[2]
+ mov 24(%rsp),@r[3]
+ mov 32(%rsp),%rbp
+
+ xor $hi,$lo
+ xor @r[1],$hi
+ xor @r[0],$lo
+ mov @r[0],0(%rbp)
+ xor @r[2],$hi
+ mov @r[3],24(%rbp)
+ xor @r[3],$lo
+ xor @r[3],$hi
+ xor $hi,$lo
+ mov $hi,16(%rbp)
+ mov $lo,8(%rbp)
+
+ mov 8*10(%rsp),%r14
+ mov 8*11(%rsp),%r13
+ mov 8*12(%rsp),%r12
+ mov 8*13(%rsp),%rbp
+ mov 8*14(%rsp),%rbx
+___
+$code.=<<___ if ($win64);
+ mov 8*15(%rsp),%rdi
+ mov 8*16(%rsp),%rsi
+___
+$code.=<<___;
+ lea 8*17(%rsp),%rsp
+ ret
+.Lend_mul_2x2:
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+.asciz "GF(2^m) Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 16
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 152($context),%rax # pull context->Rsp
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lbody_mul_2x2(%rip),%r10
+ cmp %r10,%rbx # context->Rip<"prologue" label
+ jb .Lin_prologue
+
+ mov 8*10(%rax),%r14 # mimic epilogue
+ mov 8*11(%rax),%r13
+ mov 8*12(%rax),%r12
+ mov 8*13(%rax),%rbp
+ mov 8*14(%rax),%rbx
+ mov 8*15(%rax),%rdi
+ mov 8*16(%rax),%rsi
+
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+
+.Lin_prologue:
+ lea 8*17(%rax),%rax
+ mov %rax,152($context) # restore context->Rsp
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size se_handler,.-se_handler
+
+.section .pdata
+.align 4
+ .rva _mul_1x1
+ .rva .Lend_mul_1x1
+ .rva .LSEH_info_1x1
+
+ .rva .Lvanilla_mul_2x2
+ .rva .Lend_mul_2x2
+ .rva .LSEH_info_2x2
+.section .xdata
+.align 8
+.LSEH_info_1x1:
+ .byte 0x01,0x07,0x02,0x00
+ .byte 0x07,0x01,0x11,0x00 # sub rsp,128+8
+.LSEH_info_2x2:
+ .byte 9,0,0,0
+ .rva se_handler
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont.S
new file mode 100644
index 00000000..95e29052
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont.S
@@ -0,0 +1,1374 @@
+.text
+
+.globl bn_mul_mont
+.type bn_mul_mont,@function
+.align 16
+bn_mul_mont:
+ testl $3,%r9d
+ jnz .Lmul_enter
+ cmpl $8,%r9d
+ jb .Lmul_enter
+ cmpq %rsi,%rdx
+ jne .Lmul4x_enter
+ jmp .Lsqr4x_enter
+
+.align 16
+.Lmul_enter:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ movl %r9d,%r9d
+ leaq 2(%r9),%r10
+ movq %rsp,%r11
+ negq %r10
+ leaq (%rsp,%r10,8),%rsp
+ andq $-1024,%rsp
+
+ movq %r11,8(%rsp,%r9,8)
+.Lmul_body:
+ movq %rdx,%r12
+ movq (%r8),%r8
+ movq (%r12),%rbx
+ movq (%rsi),%rax
+
+ xorq %r14,%r14
+ xorq %r15,%r15
+
+ movq %r8,%rbp
+ mulq %rbx
+ movq %rax,%r10
+ movq (%rcx),%rax
+
+ imulq %r10,%rbp
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r10
+ movq 8(%rsi),%rax
+ adcq $0,%rdx
+ movq %rdx,%r13
+
+ leaq 1(%r15),%r15
+ jmp .L1st_enter
+
+.align 16
+.L1st:
+ addq %rax,%r13
+ movq (%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%r13
+ movq %r10,%r11
+ adcq $0,%rdx
+ movq %r13,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+.L1st_enter:
+ mulq %rbx
+ addq %rax,%r11
+ movq (%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ leaq 1(%r15),%r15
+ movq %rdx,%r10
+
+ mulq %rbp
+ cmpq %r9,%r15
+ jne .L1st
+
+ addq %rax,%r13
+ movq (%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%r13
+ adcq $0,%rdx
+ movq %r13,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+ movq %r10,%r11
+
+ xorq %rdx,%rdx
+ addq %r11,%r13
+ adcq $0,%rdx
+ movq %r13,-8(%rsp,%r9,8)
+ movq %rdx,(%rsp,%r9,8)
+
+ leaq 1(%r14),%r14
+ jmp .Louter
+.align 16
+.Louter:
+ movq (%r12,%r14,8),%rbx
+ xorq %r15,%r15
+ movq %r8,%rbp
+ movq (%rsp),%r10
+ mulq %rbx
+ addq %rax,%r10
+ movq (%rcx),%rax
+ adcq $0,%rdx
+
+ imulq %r10,%rbp
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r10
+ movq 8(%rsi),%rax
+ adcq $0,%rdx
+ movq 8(%rsp),%r10
+ movq %rdx,%r13
+
+ leaq 1(%r15),%r15
+ jmp .Linner_enter
+
+.align 16
+.Linner:
+ addq %rax,%r13
+ movq (%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ movq (%rsp,%r15,8),%r10
+ adcq $0,%rdx
+ movq %r13,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+.Linner_enter:
+ mulq %rbx
+ addq %rax,%r11
+ movq (%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%r10
+ movq %rdx,%r11
+ adcq $0,%r11
+ leaq 1(%r15),%r15
+
+ mulq %rbp
+ cmpq %r9,%r15
+ jne .Linner
+
+ addq %rax,%r13
+ movq (%rsi),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ movq (%rsp,%r15,8),%r10
+ adcq $0,%rdx
+ movq %r13,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+ xorq %rdx,%rdx
+ addq %r11,%r13
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %r13,-8(%rsp,%r9,8)
+ movq %rdx,(%rsp,%r9,8)
+
+ leaq 1(%r14),%r14
+ cmpq %r9,%r14
+ jl .Louter
+
+ xorq %r14,%r14
+ movq (%rsp),%rax
+ leaq (%rsp),%rsi
+ movq %r9,%r15
+ jmp .Lsub
+.align 16
+.Lsub: sbbq (%rcx,%r14,8),%rax
+ movq %rax,(%rdi,%r14,8)
+ movq 8(%rsi,%r14,8),%rax
+ leaq 1(%r14),%r14
+ decq %r15
+ jnz .Lsub
+
+ sbbq $0,%rax
+ xorq %r14,%r14
+ andq %rax,%rsi
+ notq %rax
+ movq %rdi,%rcx
+ andq %rax,%rcx
+ movq %r9,%r15
+ orq %rcx,%rsi
+.align 16
+.Lcopy:
+ movq (%rsi,%r14,8),%rax
+ movq %r14,(%rsp,%r14,8)
+ movq %rax,(%rdi,%r14,8)
+ leaq 1(%r14),%r14
+ subq $1,%r15
+ jnz .Lcopy
+
+ movq 8(%rsp,%r9,8),%rsi
+ movq $1,%rax
+ movq (%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+.Lmul_epilogue:
+ .byte 0xf3,0xc3
+.size bn_mul_mont,.-bn_mul_mont
+.type bn_mul4x_mont,@function
+.align 16
+bn_mul4x_mont:
+.Lmul4x_enter:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ movl %r9d,%r9d
+ leaq 4(%r9),%r10
+ movq %rsp,%r11
+ negq %r10
+ leaq (%rsp,%r10,8),%rsp
+ andq $-1024,%rsp
+
+ movq %r11,8(%rsp,%r9,8)
+.Lmul4x_body:
+ movq %rdi,16(%rsp,%r9,8)
+ movq %rdx,%r12
+ movq (%r8),%r8
+ movq (%r12),%rbx
+ movq (%rsi),%rax
+
+ xorq %r14,%r14
+ xorq %r15,%r15
+
+ movq %r8,%rbp
+ mulq %rbx
+ movq %rax,%r10
+ movq (%rcx),%rax
+
+ imulq %r10,%rbp
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r10
+ movq 8(%rsi),%rax
+ adcq $0,%rdx
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 8(%rcx),%rax
+ adcq $0,%rdx
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq 16(%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ leaq 4(%r15),%r15
+ adcq $0,%rdx
+ movq %rdi,(%rsp)
+ movq %rdx,%r13
+ jmp .L1st4x
+.align 16
+.L1st4x:
+ mulq %rbx
+ addq %rax,%r10
+ movq -16(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq -8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %r13,-24(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq -8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq (%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %rdi,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+ mulq %rbx
+ addq %rax,%r10
+ movq (%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq 8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %r13,-8(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ leaq 4(%r15),%r15
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq -16(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %rdi,-32(%rsp,%r15,8)
+ movq %rdx,%r13
+ cmpq %r9,%r15
+ jl .L1st4x
+
+ mulq %rbx
+ addq %rax,%r10
+ movq -16(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq -8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %r13,-24(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq -8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq (%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %rdi,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+ xorq %rdi,%rdi
+ addq %r10,%r13
+ adcq $0,%rdi
+ movq %r13,-8(%rsp,%r15,8)
+ movq %rdi,(%rsp,%r15,8)
+
+ leaq 1(%r14),%r14
+.align 4
+.Louter4x:
+ movq (%r12,%r14,8),%rbx
+ xorq %r15,%r15
+ movq (%rsp),%r10
+ movq %r8,%rbp
+ mulq %rbx
+ addq %rax,%r10
+ movq (%rcx),%rax
+ adcq $0,%rdx
+
+ imulq %r10,%rbp
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r10
+ movq 8(%rsi),%rax
+ adcq $0,%rdx
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 8(%rcx),%rax
+ adcq $0,%rdx
+ addq 8(%rsp),%r11
+ adcq $0,%rdx
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq 16(%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ leaq 4(%r15),%r15
+ adcq $0,%rdx
+ movq %rdi,(%rsp)
+ movq %rdx,%r13
+ jmp .Linner4x
+.align 16
+.Linner4x:
+ mulq %rbx
+ addq %rax,%r10
+ movq -16(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq -16(%rsp,%r15,8),%r10
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq -8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %r13,-24(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq -8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq -8(%rsp,%r15,8),%r11
+ adcq $0,%rdx
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq (%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %rdi,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+ mulq %rbx
+ addq %rax,%r10
+ movq (%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq (%rsp,%r15,8),%r10
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq 8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %r13,-8(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq 8(%rsp,%r15,8),%r11
+ adcq $0,%rdx
+ leaq 4(%r15),%r15
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq -16(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %rdi,-32(%rsp,%r15,8)
+ movq %rdx,%r13
+ cmpq %r9,%r15
+ jl .Linner4x
+
+ mulq %rbx
+ addq %rax,%r10
+ movq -16(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq -16(%rsp,%r15,8),%r10
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq -8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %r13,-24(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq -8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq -8(%rsp,%r15,8),%r11
+ adcq $0,%rdx
+ leaq 1(%r14),%r14
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq (%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %rdi,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+ xorq %rdi,%rdi
+ addq %r10,%r13
+ adcq $0,%rdi
+ addq (%rsp,%r9,8),%r13
+ adcq $0,%rdi
+ movq %r13,-8(%rsp,%r15,8)
+ movq %rdi,(%rsp,%r15,8)
+
+ cmpq %r9,%r14
+ jl .Louter4x
+ movq 16(%rsp,%r9,8),%rdi
+ movq 0(%rsp),%rax
+ pxor %xmm0,%xmm0
+ movq 8(%rsp),%rdx
+ shrq $2,%r9
+ leaq (%rsp),%rsi
+ xorq %r14,%r14
+
+ subq 0(%rcx),%rax
+ movq 16(%rsi),%rbx
+ movq 24(%rsi),%rbp
+ sbbq 8(%rcx),%rdx
+ leaq -1(%r9),%r15
+ jmp .Lsub4x
+.align 16
+.Lsub4x:
+ movq %rax,0(%rdi,%r14,8)
+ movq %rdx,8(%rdi,%r14,8)
+ sbbq 16(%rcx,%r14,8),%rbx
+ movq 32(%rsi,%r14,8),%rax
+ movq 40(%rsi,%r14,8),%rdx
+ sbbq 24(%rcx,%r14,8),%rbp
+ movq %rbx,16(%rdi,%r14,8)
+ movq %rbp,24(%rdi,%r14,8)
+ sbbq 32(%rcx,%r14,8),%rax
+ movq 48(%rsi,%r14,8),%rbx
+ movq 56(%rsi,%r14,8),%rbp
+ sbbq 40(%rcx,%r14,8),%rdx
+ leaq 4(%r14),%r14
+ decq %r15
+ jnz .Lsub4x
+
+ movq %rax,0(%rdi,%r14,8)
+ movq 32(%rsi,%r14,8),%rax
+ sbbq 16(%rcx,%r14,8),%rbx
+ movq %rdx,8(%rdi,%r14,8)
+ sbbq 24(%rcx,%r14,8),%rbp
+ movq %rbx,16(%rdi,%r14,8)
+
+ sbbq $0,%rax
+ movq %rbp,24(%rdi,%r14,8)
+ xorq %r14,%r14
+ andq %rax,%rsi
+ notq %rax
+ movq %rdi,%rcx
+ andq %rax,%rcx
+ leaq -1(%r9),%r15
+ orq %rcx,%rsi
+
+ movdqu (%rsi),%xmm1
+ movdqa %xmm0,(%rsp)
+ movdqu %xmm1,(%rdi)
+ jmp .Lcopy4x
+.align 16
+.Lcopy4x:
+ movdqu 16(%rsi,%r14,1),%xmm2
+ movdqu 32(%rsi,%r14,1),%xmm1
+ movdqa %xmm0,16(%rsp,%r14,1)
+ movdqu %xmm2,16(%rdi,%r14,1)
+ movdqa %xmm0,32(%rsp,%r14,1)
+ movdqu %xmm1,32(%rdi,%r14,1)
+ leaq 32(%r14),%r14
+ decq %r15
+ jnz .Lcopy4x
+
+ shlq $2,%r9
+ movdqu 16(%rsi,%r14,1),%xmm2
+ movdqa %xmm0,16(%rsp,%r14,1)
+ movdqu %xmm2,16(%rdi,%r14,1)
+ movq 8(%rsp,%r9,8),%rsi
+ movq $1,%rax
+ movq (%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+.Lmul4x_epilogue:
+ .byte 0xf3,0xc3
+.size bn_mul4x_mont,.-bn_mul4x_mont
+.type bn_sqr4x_mont,@function
+.align 16
+bn_sqr4x_mont:
+.Lsqr4x_enter:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ shll $3,%r9d
+ xorq %r10,%r10
+ movq %rsp,%r11
+ subq %r9,%r10
+ movq (%r8),%r8
+ leaq -72(%rsp,%r10,2),%rsp
+ andq $-1024,%rsp
+
+
+
+
+
+
+
+
+
+
+
+ movq %rdi,32(%rsp)
+ movq %rcx,40(%rsp)
+ movq %r8,48(%rsp)
+ movq %r11,56(%rsp)
+.Lsqr4x_body:
+
+
+
+
+
+
+
+ leaq 32(%r10),%rbp
+ leaq (%rsi,%r9,1),%rsi
+
+ movq %r9,%rcx
+
+
+ movq -32(%rsi,%rbp,1),%r14
+ leaq 64(%rsp,%r9,2),%rdi
+ movq -24(%rsi,%rbp,1),%rax
+ leaq -32(%rdi,%rbp,1),%rdi
+ movq -16(%rsi,%rbp,1),%rbx
+ movq %rax,%r15
+
+ mulq %r14
+ movq %rax,%r10
+ movq %rbx,%rax
+ movq %rdx,%r11
+ movq %r10,-24(%rdi,%rbp,1)
+
+ xorq %r10,%r10
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq %rdx,%r10
+ movq %r11,-16(%rdi,%rbp,1)
+
+ leaq -16(%rbp),%rcx
+
+
+ movq 8(%rsi,%rcx,1),%rbx
+ mulq %r15
+ movq %rax,%r12
+ movq %rbx,%rax
+ movq %rdx,%r13
+
+ xorq %r11,%r11
+ addq %r12,%r10
+ leaq 16(%rcx),%rcx
+ adcq $0,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ adcq %rdx,%r11
+ movq %r10,-8(%rdi,%rcx,1)
+ jmp .Lsqr4x_1st
+
+.align 16
+.Lsqr4x_1st:
+ movq (%rsi,%rcx,1),%rbx
+ xorq %r12,%r12
+ mulq %r15
+ addq %rax,%r13
+ movq %rbx,%rax
+ adcq %rdx,%r12
+
+ xorq %r10,%r10
+ addq %r13,%r11
+ adcq $0,%r10
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq %rdx,%r10
+ movq %r11,(%rdi,%rcx,1)
+
+
+ movq 8(%rsi,%rcx,1),%rbx
+ xorq %r13,%r13
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ adcq %rdx,%r13
+
+ xorq %r11,%r11
+ addq %r12,%r10
+ adcq $0,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ adcq %rdx,%r11
+ movq %r10,8(%rdi,%rcx,1)
+
+ movq 16(%rsi,%rcx,1),%rbx
+ xorq %r12,%r12
+ mulq %r15
+ addq %rax,%r13
+ movq %rbx,%rax
+ adcq %rdx,%r12
+
+ xorq %r10,%r10
+ addq %r13,%r11
+ adcq $0,%r10
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq %rdx,%r10
+ movq %r11,16(%rdi,%rcx,1)
+
+
+ movq 24(%rsi,%rcx,1),%rbx
+ xorq %r13,%r13
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ adcq %rdx,%r13
+
+ xorq %r11,%r11
+ addq %r12,%r10
+ leaq 32(%rcx),%rcx
+ adcq $0,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ adcq %rdx,%r11
+ movq %r10,-8(%rdi,%rcx,1)
+
+ cmpq $0,%rcx
+ jne .Lsqr4x_1st
+
+ xorq %r12,%r12
+ addq %r11,%r13
+ adcq $0,%r12
+ mulq %r15
+ addq %rax,%r13
+ adcq %rdx,%r12
+
+ movq %r13,(%rdi)
+ leaq 16(%rbp),%rbp
+ movq %r12,8(%rdi)
+ jmp .Lsqr4x_outer
+
+.align 16
+.Lsqr4x_outer:
+ movq -32(%rsi,%rbp,1),%r14
+ leaq 64(%rsp,%r9,2),%rdi
+ movq -24(%rsi,%rbp,1),%rax
+ leaq -32(%rdi,%rbp,1),%rdi
+ movq -16(%rsi,%rbp,1),%rbx
+ movq %rax,%r15
+
+ movq -24(%rdi,%rbp,1),%r10
+ xorq %r11,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ adcq %rdx,%r11
+ movq %r10,-24(%rdi,%rbp,1)
+
+ xorq %r10,%r10
+ addq -16(%rdi,%rbp,1),%r11
+ adcq $0,%r10
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq %rdx,%r10
+ movq %r11,-16(%rdi,%rbp,1)
+
+ leaq -16(%rbp),%rcx
+ xorq %r12,%r12
+
+
+ movq 8(%rsi,%rcx,1),%rbx
+ xorq %r13,%r13
+ addq 8(%rdi,%rcx,1),%r12
+ adcq $0,%r13
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ adcq %rdx,%r13
+
+ xorq %r11,%r11
+ addq %r12,%r10
+ adcq $0,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ adcq %rdx,%r11
+ movq %r10,8(%rdi,%rcx,1)
+
+ leaq 16(%rcx),%rcx
+ jmp .Lsqr4x_inner
+
+.align 16
+.Lsqr4x_inner:
+ movq (%rsi,%rcx,1),%rbx
+ xorq %r12,%r12
+ addq (%rdi,%rcx,1),%r13
+ adcq $0,%r12
+ mulq %r15
+ addq %rax,%r13
+ movq %rbx,%rax
+ adcq %rdx,%r12
+
+ xorq %r10,%r10
+ addq %r13,%r11
+ adcq $0,%r10
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq %rdx,%r10
+ movq %r11,(%rdi,%rcx,1)
+
+ movq 8(%rsi,%rcx,1),%rbx
+ xorq %r13,%r13
+ addq 8(%rdi,%rcx,1),%r12
+ adcq $0,%r13
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ adcq %rdx,%r13
+
+ xorq %r11,%r11
+ addq %r12,%r10
+ leaq 16(%rcx),%rcx
+ adcq $0,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ adcq %rdx,%r11
+ movq %r10,-8(%rdi,%rcx,1)
+
+ cmpq $0,%rcx
+ jne .Lsqr4x_inner
+
+ xorq %r12,%r12
+ addq %r11,%r13
+ adcq $0,%r12
+ mulq %r15
+ addq %rax,%r13
+ adcq %rdx,%r12
+
+ movq %r13,(%rdi)
+ movq %r12,8(%rdi)
+
+ addq $16,%rbp
+ jnz .Lsqr4x_outer
+
+
+ movq -32(%rsi),%r14
+ leaq 64(%rsp,%r9,2),%rdi
+ movq -24(%rsi),%rax
+ leaq -32(%rdi,%rbp,1),%rdi
+ movq -16(%rsi),%rbx
+ movq %rax,%r15
+
+ xorq %r11,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ adcq %rdx,%r11
+ movq %r10,-24(%rdi)
+
+ xorq %r10,%r10
+ addq %r13,%r11
+ adcq $0,%r10
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq %rdx,%r10
+ movq %r11,-16(%rdi)
+
+ movq -8(%rsi),%rbx
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ adcq $0,%rdx
+
+ xorq %r11,%r11
+ addq %r12,%r10
+ movq %rdx,%r13
+ adcq $0,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %rbx,%rax
+ adcq %rdx,%r11
+ movq %r10,-8(%rdi)
+
+ xorq %r12,%r12
+ addq %r11,%r13
+ adcq $0,%r12
+ mulq %r15
+ addq %rax,%r13
+ movq -16(%rsi),%rax
+ adcq %rdx,%r12
+
+ movq %r13,(%rdi)
+ movq %r12,8(%rdi)
+
+ mulq %rbx
+ addq $16,%rbp
+ xorq %r14,%r14
+ subq %r9,%rbp
+ xorq %r15,%r15
+
+ addq %r12,%rax
+ adcq $0,%rdx
+ movq %rax,8(%rdi)
+ movq %rdx,16(%rdi)
+ movq %r15,24(%rdi)
+
+ movq -16(%rsi,%rbp,1),%rax
+ leaq 64(%rsp,%r9,2),%rdi
+ xorq %r10,%r10
+ movq -24(%rdi,%rbp,2),%r11
+
+ leaq (%r14,%r10,2),%r12
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r13
+ shrq $63,%r11
+ orq %r10,%r13
+ movq -16(%rdi,%rbp,2),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq -8(%rdi,%rbp,2),%r11
+ adcq %rax,%r12
+ movq -8(%rsi,%rbp,1),%rax
+ movq %r12,-32(%rdi,%rbp,2)
+ adcq %rdx,%r13
+
+ leaq (%r14,%r10,2),%rbx
+ movq %r13,-24(%rdi,%rbp,2)
+ sbbq %r15,%r15
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r8
+ shrq $63,%r11
+ orq %r10,%r8
+ movq 0(%rdi,%rbp,2),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq 8(%rdi,%rbp,2),%r11
+ adcq %rax,%rbx
+ movq 0(%rsi,%rbp,1),%rax
+ movq %rbx,-16(%rdi,%rbp,2)
+ adcq %rdx,%r8
+ leaq 16(%rbp),%rbp
+ movq %r8,-40(%rdi,%rbp,2)
+ sbbq %r15,%r15
+ jmp .Lsqr4x_shift_n_add
+
+.align 16
+.Lsqr4x_shift_n_add:
+ leaq (%r14,%r10,2),%r12
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r13
+ shrq $63,%r11
+ orq %r10,%r13
+ movq -16(%rdi,%rbp,2),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq -8(%rdi,%rbp,2),%r11
+ adcq %rax,%r12
+ movq -8(%rsi,%rbp,1),%rax
+ movq %r12,-32(%rdi,%rbp,2)
+ adcq %rdx,%r13
+
+ leaq (%r14,%r10,2),%rbx
+ movq %r13,-24(%rdi,%rbp,2)
+ sbbq %r15,%r15
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r8
+ shrq $63,%r11
+ orq %r10,%r8
+ movq 0(%rdi,%rbp,2),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq 8(%rdi,%rbp,2),%r11
+ adcq %rax,%rbx
+ movq 0(%rsi,%rbp,1),%rax
+ movq %rbx,-16(%rdi,%rbp,2)
+ adcq %rdx,%r8
+
+ leaq (%r14,%r10,2),%r12
+ movq %r8,-8(%rdi,%rbp,2)
+ sbbq %r15,%r15
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r13
+ shrq $63,%r11
+ orq %r10,%r13
+ movq 16(%rdi,%rbp,2),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq 24(%rdi,%rbp,2),%r11
+ adcq %rax,%r12
+ movq 8(%rsi,%rbp,1),%rax
+ movq %r12,0(%rdi,%rbp,2)
+ adcq %rdx,%r13
+
+ leaq (%r14,%r10,2),%rbx
+ movq %r13,8(%rdi,%rbp,2)
+ sbbq %r15,%r15
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r8
+ shrq $63,%r11
+ orq %r10,%r8
+ movq 32(%rdi,%rbp,2),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq 40(%rdi,%rbp,2),%r11
+ adcq %rax,%rbx
+ movq 16(%rsi,%rbp,1),%rax
+ movq %rbx,16(%rdi,%rbp,2)
+ adcq %rdx,%r8
+ movq %r8,24(%rdi,%rbp,2)
+ sbbq %r15,%r15
+ addq $32,%rbp
+ jnz .Lsqr4x_shift_n_add
+
+ leaq (%r14,%r10,2),%r12
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r13
+ shrq $63,%r11
+ orq %r10,%r13
+ movq -16(%rdi),%r10
+ movq %r11,%r14
+ mulq %rax
+ negq %r15
+ movq -8(%rdi),%r11
+ adcq %rax,%r12
+ movq -8(%rsi),%rax
+ movq %r12,-32(%rdi)
+ adcq %rdx,%r13
+
+ leaq (%r14,%r10,2),%rbx
+ movq %r13,-24(%rdi)
+ sbbq %r15,%r15
+ shrq $63,%r10
+ leaq (%rcx,%r11,2),%r8
+ shrq $63,%r11
+ orq %r10,%r8
+ mulq %rax
+ negq %r15
+ adcq %rax,%rbx
+ adcq %rdx,%r8
+ movq %rbx,-16(%rdi)
+ movq %r8,-8(%rdi)
+ movq 40(%rsp),%rsi
+ movq 48(%rsp),%r8
+ xorq %rcx,%rcx
+ movq %r9,0(%rsp)
+ subq %r9,%rcx
+ movq 64(%rsp),%r10
+ movq %r8,%r14
+ leaq 64(%rsp,%r9,2),%rax
+ leaq 64(%rsp,%r9,1),%rdi
+ movq %rax,8(%rsp)
+ leaq (%rsi,%r9,1),%rsi
+ xorq %rbp,%rbp
+
+ movq 0(%rsi,%rcx,1),%rax
+ movq 8(%rsi,%rcx,1),%r9
+ imulq %r10,%r14
+ movq %rax,%rbx
+ jmp .Lsqr4x_mont_outer
+
+.align 16
+.Lsqr4x_mont_outer:
+ xorq %r11,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %r9,%rax
+ adcq %rdx,%r11
+ movq %r8,%r15
+
+ xorq %r10,%r10
+ addq 8(%rdi,%rcx,1),%r11
+ adcq $0,%r10
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq %rdx,%r10
+
+ imulq %r11,%r15
+
+ movq 16(%rsi,%rcx,1),%rbx
+ xorq %r13,%r13
+ addq %r11,%r12
+ adcq $0,%r13
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ adcq %rdx,%r13
+ movq %r12,8(%rdi,%rcx,1)
+
+ xorq %r11,%r11
+ addq 16(%rdi,%rcx,1),%r10
+ adcq $0,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %r9,%rax
+ adcq %rdx,%r11
+
+ movq 24(%rsi,%rcx,1),%r9
+ xorq %r12,%r12
+ addq %r10,%r13
+ adcq $0,%r12
+ mulq %r15
+ addq %rax,%r13
+ movq %r9,%rax
+ adcq %rdx,%r12
+ movq %r13,16(%rdi,%rcx,1)
+
+ xorq %r10,%r10
+ addq 24(%rdi,%rcx,1),%r11
+ leaq 32(%rcx),%rcx
+ adcq $0,%r10
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq %rdx,%r10
+ jmp .Lsqr4x_mont_inner
+
+.align 16
+.Lsqr4x_mont_inner:
+ movq (%rsi,%rcx,1),%rbx
+ xorq %r13,%r13
+ addq %r11,%r12
+ adcq $0,%r13
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ adcq %rdx,%r13
+ movq %r12,-8(%rdi,%rcx,1)
+
+ xorq %r11,%r11
+ addq (%rdi,%rcx,1),%r10
+ adcq $0,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %r9,%rax
+ adcq %rdx,%r11
+
+ movq 8(%rsi,%rcx,1),%r9
+ xorq %r12,%r12
+ addq %r10,%r13
+ adcq $0,%r12
+ mulq %r15
+ addq %rax,%r13
+ movq %r9,%rax
+ adcq %rdx,%r12
+ movq %r13,(%rdi,%rcx,1)
+
+ xorq %r10,%r10
+ addq 8(%rdi,%rcx,1),%r11
+ adcq $0,%r10
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq %rdx,%r10
+
+
+ movq 16(%rsi,%rcx,1),%rbx
+ xorq %r13,%r13
+ addq %r11,%r12
+ adcq $0,%r13
+ mulq %r15
+ addq %rax,%r12
+ movq %rbx,%rax
+ adcq %rdx,%r13
+ movq %r12,8(%rdi,%rcx,1)
+
+ xorq %r11,%r11
+ addq 16(%rdi,%rcx,1),%r10
+ adcq $0,%r11
+ mulq %r14
+ addq %rax,%r10
+ movq %r9,%rax
+ adcq %rdx,%r11
+
+ movq 24(%rsi,%rcx,1),%r9
+ xorq %r12,%r12
+ addq %r10,%r13
+ adcq $0,%r12
+ mulq %r15
+ addq %rax,%r13
+ movq %r9,%rax
+ adcq %rdx,%r12
+ movq %r13,16(%rdi,%rcx,1)
+
+ xorq %r10,%r10
+ addq 24(%rdi,%rcx,1),%r11
+ leaq 32(%rcx),%rcx
+ adcq $0,%r10
+ mulq %r14
+ addq %rax,%r11
+ movq %rbx,%rax
+ adcq %rdx,%r10
+ cmpq $0,%rcx
+ jne .Lsqr4x_mont_inner
+
+ subq 0(%rsp),%rcx
+ movq %r8,%r14
+
+ xorq %r13,%r13
+ addq %r11,%r12
+ adcq $0,%r13
+ mulq %r15
+ addq %rax,%r12
+ movq %r9,%rax
+ adcq %rdx,%r13
+ movq %r12,-8(%rdi)
+
+ xorq %r11,%r11
+ addq (%rdi),%r10
+ adcq $0,%r11
+ movq 0(%rsi,%rcx,1),%rbx
+ addq %rbp,%r10
+ adcq $0,%r11
+
+ imulq 16(%rdi,%rcx,1),%r14
+ xorq %r12,%r12
+ movq 8(%rsi,%rcx,1),%r9
+ addq %r10,%r13
+ movq 16(%rdi,%rcx,1),%r10
+ adcq $0,%r12
+ mulq %r15
+ addq %rax,%r13
+ movq %rbx,%rax
+ adcq %rdx,%r12
+ movq %r13,(%rdi)
+
+ xorq %rbp,%rbp
+ addq 8(%rdi),%r12
+ adcq %rbp,%rbp
+ addq %r11,%r12
+ leaq 16(%rdi),%rdi
+ adcq $0,%rbp
+ movq %r12,-8(%rdi)
+ cmpq 8(%rsp),%rdi
+ jb .Lsqr4x_mont_outer
+
+ movq 0(%rsp),%r9
+ movq %rbp,(%rdi)
+ movq 64(%rsp,%r9,1),%rax
+ leaq 64(%rsp,%r9,1),%rbx
+ movq 40(%rsp),%rsi
+ shrq $5,%r9
+ movq 8(%rbx),%rdx
+ xorq %rbp,%rbp
+
+ movq 32(%rsp),%rdi
+ subq 0(%rsi),%rax
+ movq 16(%rbx),%r10
+ movq 24(%rbx),%r11
+ sbbq 8(%rsi),%rdx
+ leaq -1(%r9),%rcx
+ jmp .Lsqr4x_sub
+.align 16
+.Lsqr4x_sub:
+ movq %rax,0(%rdi,%rbp,8)
+ movq %rdx,8(%rdi,%rbp,8)
+ sbbq 16(%rsi,%rbp,8),%r10
+ movq 32(%rbx,%rbp,8),%rax
+ movq 40(%rbx,%rbp,8),%rdx
+ sbbq 24(%rsi,%rbp,8),%r11
+ movq %r10,16(%rdi,%rbp,8)
+ movq %r11,24(%rdi,%rbp,8)
+ sbbq 32(%rsi,%rbp,8),%rax
+ movq 48(%rbx,%rbp,8),%r10
+ movq 56(%rbx,%rbp,8),%r11
+ sbbq 40(%rsi,%rbp,8),%rdx
+ leaq 4(%rbp),%rbp
+ decq %rcx
+ jnz .Lsqr4x_sub
+
+ movq %rax,0(%rdi,%rbp,8)
+ movq 32(%rbx,%rbp,8),%rax
+ sbbq 16(%rsi,%rbp,8),%r10
+ movq %rdx,8(%rdi,%rbp,8)
+ sbbq 24(%rsi,%rbp,8),%r11
+ movq %r10,16(%rdi,%rbp,8)
+
+ sbbq $0,%rax
+ movq %r11,24(%rdi,%rbp,8)
+ xorq %rbp,%rbp
+ andq %rax,%rbx
+ notq %rax
+ movq %rdi,%rsi
+ andq %rax,%rsi
+ leaq -1(%r9),%rcx
+ orq %rsi,%rbx
+
+ pxor %xmm0,%xmm0
+ leaq 64(%rsp,%r9,8),%rsi
+ movdqu (%rbx),%xmm1
+ leaq (%rsi,%r9,8),%rsi
+ movdqa %xmm0,64(%rsp)
+ movdqa %xmm0,(%rsi)
+ movdqu %xmm1,(%rdi)
+ jmp .Lsqr4x_copy
+.align 16
+.Lsqr4x_copy:
+ movdqu 16(%rbx,%rbp,1),%xmm2
+ movdqu 32(%rbx,%rbp,1),%xmm1
+ movdqa %xmm0,80(%rsp,%rbp,1)
+ movdqa %xmm0,96(%rsp,%rbp,1)
+ movdqa %xmm0,16(%rsi,%rbp,1)
+ movdqa %xmm0,32(%rsi,%rbp,1)
+ movdqu %xmm2,16(%rdi,%rbp,1)
+ movdqu %xmm1,32(%rdi,%rbp,1)
+ leaq 32(%rbp),%rbp
+ decq %rcx
+ jnz .Lsqr4x_copy
+
+ movdqu 16(%rbx,%rbp,1),%xmm2
+ movdqa %xmm0,80(%rsp,%rbp,1)
+ movdqa %xmm0,16(%rsi,%rbp,1)
+ movdqu %xmm2,16(%rdi,%rbp,1)
+ movq 56(%rsp),%rsi
+ movq $1,%rax
+ movq 0(%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+.Lsqr4x_epilogue:
+ .byte 0xf3,0xc3
+.size bn_sqr4x_mont,.-bn_sqr4x_mont
+.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
+.align 16
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont.pl
new file mode 100755
index 00000000..17fb94c8
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont.pl
@@ -0,0 +1,1681 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# October 2005.
+#
+# Montgomery multiplication routine for x86_64. While it gives modest
+# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
+# than twice, >2x, as fast. Most common rsa1024 sign is improved by
+# respectful 50%. It remains to be seen if loop unrolling and
+# dedicated squaring routine can provide further improvement...
+
+# July 2011.
+#
+# Add dedicated squaring procedure. Performance improvement varies
+# from platform to platform, but in average it's ~5%/15%/25%/33%
+# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
+
+# August 2011.
+#
+# Unroll and modulo-schedule inner loops in such manner that they
+# are "fallen through" for input lengths of 8, which is critical for
+# 1024-bit RSA *sign*. Average performance improvement in comparison
+# to *initial* version of this module from 2005 is ~0%/30%/40%/45%
+# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+# int bn_mul_mont(
+$rp="%rdi"; # BN_ULONG *rp,
+$ap="%rsi"; # const BN_ULONG *ap,
+$bp="%rdx"; # const BN_ULONG *bp,
+$np="%rcx"; # const BN_ULONG *np,
+$n0="%r8"; # const BN_ULONG *n0,
+$num="%r9"; # int num);
+$lo0="%r10";
+$hi0="%r11";
+$hi1="%r13";
+$i="%r14";
+$j="%r15";
+$m0="%rbx";
+$m1="%rbp";
+
+$code=<<___;
+.text
+
+.globl bn_mul_mont
+.type bn_mul_mont,\@function,6
+.align 16
+bn_mul_mont:
+ test \$3,${num}d
+ jnz .Lmul_enter
+ cmp \$8,${num}d
+ jb .Lmul_enter
+ cmp $ap,$bp
+ jne .Lmul4x_enter
+ jmp .Lsqr4x_enter
+
+.align 16
+.Lmul_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov ${num}d,${num}d
+ lea 2($num),%r10
+ mov %rsp,%r11
+ neg %r10
+ lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
+ and \$-1024,%rsp # minimize TLB usage
+
+ mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul_body:
+ mov $bp,%r12 # reassign $bp
+___
+ $bp="%r12";
+$code.=<<___;
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($bp),$m0 # m0=bp[0]
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$lo0
+ mov ($np),%rax
+
+ imulq $lo0,$m1 # "tp[0]"*n0
+ mov %rdx,$hi0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .L1st_enter
+
+.align 16
+.L1st:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ mov $lo0,$hi0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.L1st_enter:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 1($j),$j # j++
+ mov %rdx,$lo0
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .L1st
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+ mov $lo0,$hi0
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ jmp .Louter
+.align 16
+.Louter:
+ mov ($bp,$i,8),$m0 # m0=bp[i]
+ xor $j,$j # j=0
+ mov $n0,$m1
+ mov (%rsp),$lo0
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$lo0 # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ imulq $lo0,$m1 # tp[0]*n0
+ mov %rdx,$hi0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov 8(%rsp),$lo0 # tp[1]
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .Linner_enter
+
+.align 16
+.Linner:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.Linner_enter:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
+ mov %rdx,$hi0
+ adc \$0,$hi0
+ lea 1($j),$j # j++
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .Linner
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ add $lo0,$hi1 # pull upmost overflow bit
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ cmp $num,$i
+ jl .Louter
+
+ xor $i,$i # i=0 and clear CF!
+ mov (%rsp),%rax # tp[0]
+ lea (%rsp),$ap # borrow ap for tp
+ mov $num,$j # j=num
+ jmp .Lsub
+.align 16
+.Lsub: sbb ($np,$i,8),%rax
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 8($ap,$i,8),%rax # tp[i+1]
+ lea 1($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub
+
+ sbb \$0,%rax # handle upmost overflow bit
+ xor $i,$i
+ and %rax,$ap
+ not %rax
+ mov $rp,$np
+ and %rax,$np
+ mov $num,$j # j=num
+ or $np,$ap # ap=borrow?tp:rp
+.align 16
+.Lcopy: # copy or in-place refresh
+ mov ($ap,$i,8),%rax
+ mov $i,(%rsp,$i,8) # zap temporary vector
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]
+ lea 1($i),$i
+ sub \$1,$j
+ jnz .Lcopy
+
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lmul_epilogue:
+ ret
+.size bn_mul_mont,.-bn_mul_mont
+___
+{{{
+my @A=("%r10","%r11");
+my @N=("%r13","%rdi");
+$code.=<<___;
+.type bn_mul4x_mont,\@function,6
+.align 16
+bn_mul4x_mont:
+.Lmul4x_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov ${num}d,${num}d
+ lea 4($num),%r10
+ mov %rsp,%r11
+ neg %r10
+ lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
+ and \$-1024,%rsp # minimize TLB usage
+
+ mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul4x_body:
+ mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
+ mov %rdx,%r12 # reassign $bp
+___
+ $bp="%r12";
+$code.=<<___;
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($bp),$m0 # m0=bp[0]
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$A[0]
+ mov ($np),%rax
+
+ imulq $A[0],$m1 # "tp[0]"*n0
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ lea 4($j),$j # j++
+ adc \$0,%rdx
+ mov $N[1],(%rsp)
+ mov %rdx,$N[0]
+ jmp .L1st4x
+.align 16
+.L1st4x:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-8(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .L1st4x
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+.align 4
+.Louter4x:
+ mov ($bp,$i,8),$m0 # m0=bp[i]
+ xor $j,$j # j=0
+ mov (%rsp),$A[0]
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$A[0] # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ imulq $A[0],$m1 # tp[0]*n0
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # "$N[0]", discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ add 8(%rsp),$A[1] # +tp[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
+ lea 4($j),$j # j+=2
+ adc \$0,%rdx
+ mov $N[1],(%rsp) # tp[j-1]
+ mov %rdx,$N[0]
+ jmp .Linner4x
+.align 16
+.Linner4x:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-8(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ add 8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .Linner4x
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 1($i),$i # i++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ add (%rsp,$num,8),$N[0] # pull upmost overflow bit
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ cmp $num,$i
+ jl .Louter4x
+___
+{
+my @ri=("%rax","%rdx",$m0,$m1);
+$code.=<<___;
+ mov 16(%rsp,$num,8),$rp # restore $rp
+ mov 0(%rsp),@ri[0] # tp[0]
+ pxor %xmm0,%xmm0
+ mov 8(%rsp),@ri[1] # tp[1]
+ shr \$2,$num # num/=4
+ lea (%rsp),$ap # borrow ap for tp
+ xor $i,$i # i=0 and clear CF!
+
+ sub 0($np),@ri[0]
+ mov 16($ap),@ri[2] # tp[2]
+ mov 24($ap),@ri[3] # tp[3]
+ sbb 8($np),@ri[1]
+ lea -1($num),$j # j=num/4-1
+ jmp .Lsub4x
+.align 16
+.Lsub4x:
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 16($np,$i,8),@ri[2]
+ mov 32($ap,$i,8),@ri[0] # tp[i+1]
+ mov 40($ap,$i,8),@ri[1]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 32($np,$i,8),@ri[0]
+ mov 48($ap,$i,8),@ri[2]
+ mov 56($ap,$i,8),@ri[3]
+ sbb 40($np,$i,8),@ri[1]
+ lea 4($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub4x
+
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 32($ap,$i,8),@ri[0] # load overflow bit
+ sbb 16($np,$i,8),@ri[2]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+
+ sbb \$0,@ri[0] # handle upmost overflow bit
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ xor $i,$i # i=0
+ and @ri[0],$ap
+ not @ri[0]
+ mov $rp,$np
+ and @ri[0],$np
+ lea -1($num),$j
+ or $np,$ap # ap=borrow?tp:rp
+
+ movdqu ($ap),%xmm1
+ movdqa %xmm0,(%rsp)
+ movdqu %xmm1,($rp)
+ jmp .Lcopy4x
+.align 16
+.Lcopy4x: # copy or in-place refresh
+ movdqu 16($ap,$i),%xmm2
+ movdqu 32($ap,$i),%xmm1
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+ movdqa %xmm0,32(%rsp,$i)
+ movdqu %xmm1,32($rp,$i)
+ lea 32($i),$i
+ dec $j
+ jnz .Lcopy4x
+
+ shl \$2,$num
+ movdqu 16($ap,$i),%xmm2
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+___
+}
+$code.=<<___;
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lmul4x_epilogue:
+ ret
+.size bn_mul4x_mont,.-bn_mul4x_mont
+___
+}}}
+ {{{
+######################################################################
+# void bn_sqr4x_mont(
+my $rptr="%rdi"; # const BN_ULONG *rptr,
+my $aptr="%rsi"; # const BN_ULONG *aptr,
+my $bptr="%rdx"; # not used
+my $nptr="%rcx"; # const BN_ULONG *nptr,
+my $n0 ="%r8"; # const BN_ULONG *n0);
+my $num ="%r9"; # int num, has to be divisible by 4 and
+ # not less than 8
+
+my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
+my @A0=("%r10","%r11");
+my @A1=("%r12","%r13");
+my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
+
+$code.=<<___;
+.type bn_sqr4x_mont,\@function,6
+.align 16
+bn_sqr4x_mont:
+.Lsqr4x_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ shl \$3,${num}d # convert $num to bytes
+ xor %r10,%r10
+ mov %rsp,%r11 # put aside %rsp
+ sub $num,%r10 # -$num
+ mov ($n0),$n0 # *n0
+ lea -72(%rsp,%r10,2),%rsp # alloca(frame+2*$num)
+ and \$-1024,%rsp # minimize TLB usage
+ ##############################################################
+ # Stack layout
+ #
+ # +0 saved $num, used in reduction section
+ # +8 &t[2*$num], used in reduction section
+ # +32 saved $rptr
+ # +40 saved $nptr
+ # +48 saved *n0
+ # +56 saved %rsp
+ # +64 t[2*$num]
+ #
+ mov $rptr,32(%rsp) # save $rptr
+ mov $nptr,40(%rsp)
+ mov $n0, 48(%rsp)
+ mov %r11, 56(%rsp) # save original %rsp
+.Lsqr4x_body:
+ ##############################################################
+ # Squaring part:
+ #
+ # a) multiply-n-add everything but a[i]*a[i];
+ # b) shift result of a) by 1 to the left and accumulate
+ # a[i]*a[i] products;
+ #
+ lea 32(%r10),$i # $i=-($num-32)
+ lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
+
+ mov $num,$j # $j=$num
+
+ # comments apply to $num==8 case
+ mov -32($aptr,$i),$a0 # a[0]
+ lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr,$i),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr,$i),$ai # a[2]
+ mov %rax,$a1
+
+ mul $a0 # a[1]*a[0]
+ mov %rax,$A0[0] # a[1]*a[0]
+ mov $ai,%rax # a[2]
+ mov %rdx,$A0[1]
+ mov $A0[0],-24($tptr,$i) # t[1]
+
+ xor $A0[0],$A0[0]
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ adc %rdx,$A0[0]
+ mov $A0[1],-16($tptr,$i) # t[2]
+
+ lea -16($i),$j # j=-16
+
+
+ mov 8($aptr,$j),$ai # a[3]
+ mul $a1 # a[2]*a[1]
+ mov %rax,$A1[0] # a[2]*a[1]+t[3]
+ mov $ai,%rax
+ mov %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ lea 16($j),$j
+ adc \$0,$A0[1]
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[3]
+ jmp .Lsqr4x_1st
+
+.align 16
+.Lsqr4x_1st:
+ mov ($aptr,$j),$ai # a[4]
+ xor $A1[0],$A1[0]
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1] # a[3]*a[1]+t[4]
+ mov $ai,%rax
+ adc %rdx,$A1[0]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+ mul $a0 # a[4]*a[0]
+ add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
+ mov $ai,%rax # a[3]
+ adc %rdx,$A0[0]
+ mov $A0[1],($tptr,$j) # t[4]
+
+
+ mov 8($aptr,$j),$ai # a[5]
+ xor $A1[1],$A1[1]
+ mul $a1 # a[4]*a[3]
+ add %rax,$A1[0] # a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ adc \$0,$A0[1]
+ mul $a0 # a[5]*a[2]
+ add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],8($tptr,$j) # t[5]
+
+ mov 16($aptr,$j),$ai # a[6]
+ xor $A1[0],$A1[0]
+ mul $a1 # a[5]*a[3]
+ add %rax,$A1[1] # a[5]*a[3]+t[6]
+ mov $ai,%rax
+ adc %rdx,$A1[0]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+ mul $a0 # a[6]*a[2]
+ add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
+ mov $ai,%rax # a[3]
+ adc %rdx,$A0[0]
+ mov $A0[1],16($tptr,$j) # t[6]
+
+
+ mov 24($aptr,$j),$ai # a[7]
+ xor $A1[1],$A1[1]
+ mul $a1 # a[6]*a[5]
+ add %rax,$A1[0] # a[6]*a[5]+t[7]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ lea 32($j),$j
+ adc \$0,$A0[1]
+ mul $a0 # a[7]*a[4]
+ add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[7]
+
+ cmp \$0,$j
+ jne .Lsqr4x_1st
+
+ xor $A1[0],$A1[0]
+ add $A0[1],$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[7]*a[5]
+ add %rax,$A1[1]
+ adc %rdx,$A1[0]
+
+ mov $A1[1],($tptr) # t[8]
+ lea 16($i),$i
+ mov $A1[0],8($tptr) # t[9]
+ jmp .Lsqr4x_outer
+
+.align 16
+.Lsqr4x_outer: # comments apply to $num==6 case
+ mov -32($aptr,$i),$a0 # a[0]
+ lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr,$i),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr,$i),$ai # a[2]
+ mov %rax,$a1
+
+ mov -24($tptr,$i),$A0[0] # t[1]
+ xor $A0[1],$A0[1]
+ mul $a0 # a[1]*a[0]
+ add %rax,$A0[0] # a[1]*a[0]+t[1]
+ mov $ai,%rax # a[2]
+ adc %rdx,$A0[1]
+ mov $A0[0],-24($tptr,$i) # t[1]
+
+ xor $A0[0],$A0[0]
+ add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
+ adc \$0,$A0[0]
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ adc %rdx,$A0[0]
+ mov $A0[1],-16($tptr,$i) # t[2]
+
+ lea -16($i),$j # j=-16
+ xor $A1[0],$A1[0]
+
+
+ mov 8($aptr,$j),$ai # a[3]
+ xor $A1[1],$A1[1]
+ add 8($tptr,$j),$A1[0]
+ adc \$0,$A1[1]
+ mul $a1 # a[2]*a[1]
+ add %rax,$A1[0] # a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ adc \$0,$A0[1]
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],8($tptr,$j) # t[3]
+
+ lea 16($j),$j
+ jmp .Lsqr4x_inner
+
+.align 16
+.Lsqr4x_inner:
+ mov ($aptr,$j),$ai # a[4]
+ xor $A1[0],$A1[0]
+ add ($tptr,$j),$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1] # a[3]*a[1]+t[4]
+ mov $ai,%rax
+ adc %rdx,$A1[0]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+ mul $a0 # a[4]*a[0]
+ add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
+ mov $ai,%rax # a[3]
+ adc %rdx,$A0[0]
+ mov $A0[1],($tptr,$j) # t[4]
+
+ mov 8($aptr,$j),$ai # a[5]
+ xor $A1[1],$A1[1]
+ add 8($tptr,$j),$A1[0]
+ adc \$0,$A1[1]
+ mul $a1 # a[4]*a[3]
+ add %rax,$A1[0] # a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A1[1]
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ lea 16($j),$j # j++
+ adc \$0,$A0[1]
+ mul $a0 # a[5]*a[2]
+ add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
+
+ cmp \$0,$j
+ jne .Lsqr4x_inner
+
+ xor $A1[0],$A1[0]
+ add $A0[1],$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[5]*a[3]
+ add %rax,$A1[1]
+ adc %rdx,$A1[0]
+
+ mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
+ mov $A1[0],8($tptr) # t[7], "preloaded t[3]" below
+
+ add \$16,$i
+ jnz .Lsqr4x_outer
+
+ # comments apply to $num==4 case
+ mov -32($aptr),$a0 # a[0]
+ lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr),$ai # a[2]
+ mov %rax,$a1
+
+ xor $A0[1],$A0[1]
+ mul $a0 # a[1]*a[0]
+ add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
+ mov $ai,%rax # a[2]
+ adc %rdx,$A0[1]
+ mov $A0[0],-24($tptr) # t[1]
+
+ xor $A0[0],$A0[0]
+ add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
+ adc \$0,$A0[0]
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ adc %rdx,$A0[0]
+ mov $A0[1],-16($tptr) # t[2]
+
+ mov -8($aptr),$ai # a[3]
+ mul $a1 # a[2]*a[1]
+ add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
+ mov $ai,%rax
+ adc \$0,%rdx
+
+ xor $A0[1],$A0[1]
+ add $A1[0],$A0[0]
+ mov %rdx,$A1[1]
+ adc \$0,$A0[1]
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc %rdx,$A0[1]
+ mov $A0[0],-8($tptr) # t[3]
+
+ xor $A1[0],$A1[0]
+ add $A0[1],$A1[1]
+ adc \$0,$A1[0]
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1]
+ mov -16($aptr),%rax # a[2]
+ adc %rdx,$A1[0]
+
+ mov $A1[1],($tptr) # t[4]
+ mov $A1[0],8($tptr) # t[5]
+
+ mul $ai # a[2]*a[3]
+___
+{
+my ($shift,$carry)=($a0,$a1);
+my @S=(@A1,$ai,$n0);
+$code.=<<___;
+ add \$16,$i
+ xor $shift,$shift
+ sub $num,$i # $i=16-$num
+ xor $carry,$carry
+
+ add $A1[0],%rax # t[5]
+ adc \$0,%rdx
+ mov %rax,8($tptr) # t[5]
+ mov %rdx,16($tptr) # t[6]
+ mov $carry,24($tptr) # t[7]
+
+ mov -16($aptr,$i),%rax # a[0]
+ lea 64(%rsp,$num,2),$tptr
+ xor $A0[0],$A0[0] # t[0]
+ mov -24($tptr,$i,2),$A0[1] # t[1]
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],-32($tptr,$i,2)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],-24($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 0($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],-16($tptr,$i,2)
+ adc %rdx,$S[3]
+ lea 16($i),$i
+ mov $S[3],-40($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ jmp .Lsqr4x_shift_n_add
+
+.align 16
+.Lsqr4x_shift_n_add:
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],-32($tptr,$i,2)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],-24($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 0($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],-16($tptr,$i,2)
+ adc %rdx,$S[3]
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ mov $S[3],-8($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov 16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 24($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov 8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],0($tptr,$i,2)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],8($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 32($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 40($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 16($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],16($tptr,$i,2)
+ adc %rdx,$S[3]
+ mov $S[3],24($tptr,$i,2)
+ sbb $carry,$carry # mov cf,$carry
+ add \$32,$i
+ jnz .Lsqr4x_shift_n_add
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr),%rax # a[i+1] # prefetch
+ mov $S[0],-32($tptr)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
+ mov $S[1],-24($tptr)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ adc %rax,$S[2]
+ adc %rdx,$S[3]
+ mov $S[2],-16($tptr)
+ mov $S[3],-8($tptr)
+___
+}
+##############################################################
+# Montgomery reduction part, "word-by-word" algorithm.
+#
+{
+my ($topbit,$nptr)=("%rbp",$aptr);
+my ($m0,$m1)=($a0,$a1);
+my @Ni=("%rbx","%r9");
+$code.=<<___;
+ mov 40(%rsp),$nptr # restore $nptr
+ mov 48(%rsp),$n0 # restore *n0
+ xor $j,$j
+ mov $num,0(%rsp) # save $num
+ sub $num,$j # $j=-$num
+ mov 64(%rsp),$A0[0] # t[0] # modsched #
+ mov $n0,$m0 # # modsched #
+ lea 64(%rsp,$num,2),%rax # end of t[] buffer
+ lea 64(%rsp,$num),$tptr # end of t[] window
+ mov %rax,8(%rsp) # save end of t[] buffer
+ lea ($nptr,$num),$nptr # end of n[] buffer
+ xor $topbit,$topbit # $topbit=0
+
+ mov 0($nptr,$j),%rax # n[0] # modsched #
+ mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
+ imulq $A0[0],$m0 # m0=t[0]*n0 # modsched #
+ mov %rax,$Ni[0] # # modsched #
+ jmp .Lsqr4x_mont_outer
+
+.align 16
+.Lsqr4x_mont_outer:
+ xor $A0[1],$A0[1]
+ mul $m0 # n[0]*m0
+ add %rax,$A0[0] # n[0]*m0+t[0]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+ mov $n0,$m1
+
+ xor $A0[0],$A0[0]
+ add 8($tptr,$j),$A0[1]
+ adc \$0,$A0[0]
+ mul $m0 # n[1]*m0
+ add %rax,$A0[1] # n[1]*m0+t[1]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+
+ imulq $A0[1],$m1
+
+ mov 16($nptr,$j),$Ni[0] # n[2]
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[0]*m1
+ add %rax,$A1[0] # n[0]*m1+"t[1]"
+ mov $Ni[0],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],8($tptr,$j) # "t[1]"
+
+ xor $A0[1],$A0[1]
+ add 16($tptr,$j),$A0[0]
+ adc \$0,$A0[1]
+ mul $m0 # n[2]*m0
+ add %rax,$A0[0] # n[2]*m0+t[2]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+
+ mov 24($nptr,$j),$Ni[1] # n[3]
+ xor $A1[0],$A1[0]
+ add $A0[0],$A1[1]
+ adc \$0,$A1[0]
+ mul $m1 # n[1]*m1
+ add %rax,$A1[1] # n[1]*m1+"t[2]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[0]
+ mov $A1[1],16($tptr,$j) # "t[2]"
+
+ xor $A0[0],$A0[0]
+ add 24($tptr,$j),$A0[1]
+ lea 32($j),$j
+ adc \$0,$A0[0]
+ mul $m0 # n[3]*m0
+ add %rax,$A0[1] # n[3]*m0+t[3]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+ jmp .Lsqr4x_mont_inner
+
+.align 16
+.Lsqr4x_mont_inner:
+ mov ($nptr,$j),$Ni[0] # n[4]
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[2]*m1
+ add %rax,$A1[0] # n[2]*m1+"t[3]"
+ mov $Ni[0],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],-8($tptr,$j) # "t[3]"
+
+ xor $A0[1],$A0[1]
+ add ($tptr,$j),$A0[0]
+ adc \$0,$A0[1]
+ mul $m0 # n[4]*m0
+ add %rax,$A0[0] # n[4]*m0+t[4]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+
+ mov 8($nptr,$j),$Ni[1] # n[5]
+ xor $A1[0],$A1[0]
+ add $A0[0],$A1[1]
+ adc \$0,$A1[0]
+ mul $m1 # n[3]*m1
+ add %rax,$A1[1] # n[3]*m1+"t[4]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[0]
+ mov $A1[1],($tptr,$j) # "t[4]"
+
+ xor $A0[0],$A0[0]
+ add 8($tptr,$j),$A0[1]
+ adc \$0,$A0[0]
+ mul $m0 # n[5]*m0
+ add %rax,$A0[1] # n[5]*m0+t[5]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+
+
+ mov 16($nptr,$j),$Ni[0] # n[6]
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[4]*m1
+ add %rax,$A1[0] # n[4]*m1+"t[5]"
+ mov $Ni[0],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],8($tptr,$j) # "t[5]"
+
+ xor $A0[1],$A0[1]
+ add 16($tptr,$j),$A0[0]
+ adc \$0,$A0[1]
+ mul $m0 # n[6]*m0
+ add %rax,$A0[0] # n[6]*m0+t[6]
+ mov $Ni[1],%rax
+ adc %rdx,$A0[1]
+
+ mov 24($nptr,$j),$Ni[1] # n[7]
+ xor $A1[0],$A1[0]
+ add $A0[0],$A1[1]
+ adc \$0,$A1[0]
+ mul $m1 # n[5]*m1
+ add %rax,$A1[1] # n[5]*m1+"t[6]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[0]
+ mov $A1[1],16($tptr,$j) # "t[6]"
+
+ xor $A0[0],$A0[0]
+ add 24($tptr,$j),$A0[1]
+ lea 32($j),$j
+ adc \$0,$A0[0]
+ mul $m0 # n[7]*m0
+ add %rax,$A0[1] # n[7]*m0+t[7]
+ mov $Ni[0],%rax
+ adc %rdx,$A0[0]
+ cmp \$0,$j
+ jne .Lsqr4x_mont_inner
+
+ sub 0(%rsp),$j # $j=-$num # modsched #
+ mov $n0,$m0 # # modsched #
+
+ xor $A1[1],$A1[1]
+ add $A0[1],$A1[0]
+ adc \$0,$A1[1]
+ mul $m1 # n[6]*m1
+ add %rax,$A1[0] # n[6]*m1+"t[7]"
+ mov $Ni[1],%rax
+ adc %rdx,$A1[1]
+ mov $A1[0],-8($tptr) # "t[7]"
+
+ xor $A0[1],$A0[1]
+ add ($tptr),$A0[0] # +t[8]
+ adc \$0,$A0[1]
+ mov 0($nptr,$j),$Ni[0] # n[0] # modsched #
+ add $topbit,$A0[0]
+ adc \$0,$A0[1]
+
+ imulq 16($tptr,$j),$m0 # m0=t[0]*n0 # modsched #
+ xor $A1[0],$A1[0]
+ mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
+ add $A0[0],$A1[1]
+ mov 16($tptr,$j),$A0[0] # t[0] # modsched #
+ adc \$0,$A1[0]
+ mul $m1 # n[7]*m1
+ add %rax,$A1[1] # n[7]*m1+"t[8]"
+ mov $Ni[0],%rax # # modsched #
+ adc %rdx,$A1[0]
+ mov $A1[1],($tptr) # "t[8]"
+
+ xor $topbit,$topbit
+ add 8($tptr),$A1[0] # +t[9]
+ adc $topbit,$topbit
+ add $A0[1],$A1[0]
+ lea 16($tptr),$tptr # "t[$num]>>128"
+ adc \$0,$topbit
+ mov $A1[0],-8($tptr) # "t[9]"
+ cmp 8(%rsp),$tptr # are we done?
+ jb .Lsqr4x_mont_outer
+
+ mov 0(%rsp),$num # restore $num
+ mov $topbit,($tptr) # save $topbit
+___
+}
+##############################################################
+# Post-condition, 4x unrolled copy from bn_mul_mont
+#
+{
+my ($tptr,$nptr)=("%rbx",$aptr);
+my @ri=("%rax","%rdx","%r10","%r11");
+$code.=<<___;
+ mov 64(%rsp,$num),@ri[0] # tp[0]
+ lea 64(%rsp,$num),$tptr # upper half of t[2*$num] holds result
+ mov 40(%rsp),$nptr # restore $nptr
+ shr \$5,$num # num/4
+ mov 8($tptr),@ri[1] # t[1]
+ xor $i,$i # i=0 and clear CF!
+
+ mov 32(%rsp),$rptr # restore $rptr
+ sub 0($nptr),@ri[0]
+ mov 16($tptr),@ri[2] # t[2]
+ mov 24($tptr),@ri[3] # t[3]
+ sbb 8($nptr),@ri[1]
+ lea -1($num),$j # j=num/4-1
+ jmp .Lsqr4x_sub
+.align 16
+.Lsqr4x_sub:
+ mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 16($nptr,$i,8),@ri[2]
+ mov 32($tptr,$i,8),@ri[0] # tp[i+1]
+ mov 40($tptr,$i,8),@ri[1]
+ sbb 24($nptr,$i,8),@ri[3]
+ mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 32($nptr,$i,8),@ri[0]
+ mov 48($tptr,$i,8),@ri[2]
+ mov 56($tptr,$i,8),@ri[3]
+ sbb 40($nptr,$i,8),@ri[1]
+ lea 4($i),$i # i++
+ dec $j # doesn't affect CF!
+ jnz .Lsqr4x_sub
+
+ mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ mov 32($tptr,$i,8),@ri[0] # load overflow bit
+ sbb 16($nptr,$i,8),@ri[2]
+ mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 24($nptr,$i,8),@ri[3]
+ mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
+
+ sbb \$0,@ri[0] # handle upmost overflow bit
+ mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
+ xor $i,$i # i=0
+ and @ri[0],$tptr
+ not @ri[0]
+ mov $rptr,$nptr
+ and @ri[0],$nptr
+ lea -1($num),$j
+ or $nptr,$tptr # tp=borrow?tp:rp
+
+ pxor %xmm0,%xmm0
+ lea 64(%rsp,$num,8),$nptr
+ movdqu ($tptr),%xmm1
+ lea ($nptr,$num,8),$nptr
+ movdqa %xmm0,64(%rsp) # zap lower half of temporary vector
+ movdqa %xmm0,($nptr) # zap upper half of temporary vector
+ movdqu %xmm1,($rptr)
+ jmp .Lsqr4x_copy
+.align 16
+.Lsqr4x_copy: # copy or in-place refresh
+ movdqu 16($tptr,$i),%xmm2
+ movdqu 32($tptr,$i),%xmm1
+ movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
+ movdqa %xmm0,96(%rsp,$i) # zap lower half of temporary vector
+ movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
+ movdqa %xmm0,32($nptr,$i) # zap upper half of temporary vector
+ movdqu %xmm2,16($rptr,$i)
+ movdqu %xmm1,32($rptr,$i)
+ lea 32($i),$i
+ dec $j
+ jnz .Lsqr4x_copy
+
+ movdqu 16($tptr,$i),%xmm2
+ movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
+ movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
+ movdqu %xmm2,16($rptr,$i)
+___
+}
+$code.=<<___;
+ mov 56(%rsp),%rsi # restore %rsp
+ mov \$1,%rax
+ mov 0(%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lsqr4x_epilogue:
+ ret
+.size bn_sqr4x_mont,.-bn_sqr4x_mont
+___
+}}}
+$code.=<<___;
+.asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 16
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type mul_handler,\@abi-omnipotent
+.align 16
+mul_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ mov 192($context),%r10 # pull $num
+ mov 8(%rax,%r10,8),%rax # pull saved stack pointer
+ lea 48(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+ jmp .Lcommon_seh_tail
+.size mul_handler,.-mul_handler
+
+.type sqr_handler,\@abi-omnipotent
+.align 16
+sqr_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lsqr4x_body(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lsqr_body
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ lea .Lsqr4x_epilogue(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
+ jae .Lcommon_seh_tail
+
+ mov 56(%rax),%rax # pull saved stack pointer
+ lea 48(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size sqr_handler,.-sqr_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_bn_mul_mont
+ .rva .LSEH_end_bn_mul_mont
+ .rva .LSEH_info_bn_mul_mont
+
+ .rva .LSEH_begin_bn_mul4x_mont
+ .rva .LSEH_end_bn_mul4x_mont
+ .rva .LSEH_info_bn_mul4x_mont
+
+ .rva .LSEH_begin_bn_sqr4x_mont
+ .rva .LSEH_end_bn_sqr4x_mont
+ .rva .LSEH_info_bn_sqr4x_mont
+
+.section .xdata
+.align 8
+.LSEH_info_bn_mul_mont:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
+.LSEH_info_bn_mul4x_mont:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
+.LSEH_info_bn_sqr4x_mont:
+ .byte 9,0,0,0
+ .rva sqr_handler
+___
+}
+
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont5.S b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont5.S
new file mode 100644
index 00000000..49ec6ac6
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont5.S
@@ -0,0 +1,784 @@
+.text
+
+.globl bn_mul_mont_gather5
+.type bn_mul_mont_gather5,@function
+.align 64
+bn_mul_mont_gather5:
+ testl $3,%r9d
+ jnz .Lmul_enter
+ cmpl $8,%r9d
+ jb .Lmul_enter
+ jmp .Lmul4x_enter
+
+.align 16
+.Lmul_enter:
+ movl %r9d,%r9d
+ movl 8(%rsp),%r10d
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq %rsp,%rax
+ leaq 2(%r9),%r11
+ negq %r11
+ leaq (%rsp,%r11,8),%rsp
+ andq $-1024,%rsp
+
+ movq %rax,8(%rsp,%r9,8)
+.Lmul_body:
+ movq %rdx,%r12
+ movq %r10,%r11
+ shrq $3,%r10
+ andq $7,%r11
+ notq %r10
+ leaq .Lmagic_masks(%rip),%rax
+ andq $3,%r10
+ leaq 96(%r12,%r11,8),%r12
+ movq 0(%rax,%r10,8),%xmm4
+ movq 8(%rax,%r10,8),%xmm5
+ movq 16(%rax,%r10,8),%xmm6
+ movq 24(%rax,%r10,8),%xmm7
+
+ movq -96(%r12),%xmm0
+ movq -32(%r12),%xmm1
+ pand %xmm4,%xmm0
+ movq 32(%r12),%xmm2
+ pand %xmm5,%xmm1
+ movq 96(%r12),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ leaq 256(%r12),%r12
+ por %xmm3,%xmm0
+
+.byte 102,72,15,126,195
+
+ movq (%r8),%r8
+ movq (%rsi),%rax
+
+ xorq %r14,%r14
+ xorq %r15,%r15
+
+ movq -96(%r12),%xmm0
+ movq -32(%r12),%xmm1
+ pand %xmm4,%xmm0
+ movq 32(%r12),%xmm2
+ pand %xmm5,%xmm1
+
+ movq %r8,%rbp
+ mulq %rbx
+ movq %rax,%r10
+ movq (%rcx),%rax
+
+ movq 96(%r12),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq %r10,%rbp
+ movq %rdx,%r11
+
+ por %xmm2,%xmm0
+ leaq 256(%r12),%r12
+ por %xmm3,%xmm0
+
+ mulq %rbp
+ addq %rax,%r10
+ movq 8(%rsi),%rax
+ adcq $0,%rdx
+ movq %rdx,%r13
+
+ leaq 1(%r15),%r15
+ jmp .L1st_enter
+
+.align 16
+.L1st:
+ addq %rax,%r13
+ movq (%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%r13
+ movq %r10,%r11
+ adcq $0,%rdx
+ movq %r13,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+.L1st_enter:
+ mulq %rbx
+ addq %rax,%r11
+ movq (%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ leaq 1(%r15),%r15
+ movq %rdx,%r10
+
+ mulq %rbp
+ cmpq %r9,%r15
+ jne .L1st
+
+.byte 102,72,15,126,195
+
+ addq %rax,%r13
+ movq (%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%r13
+ adcq $0,%rdx
+ movq %r13,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+ movq %r10,%r11
+
+ xorq %rdx,%rdx
+ addq %r11,%r13
+ adcq $0,%rdx
+ movq %r13,-8(%rsp,%r9,8)
+ movq %rdx,(%rsp,%r9,8)
+
+ leaq 1(%r14),%r14
+ jmp .Louter
+.align 16
+.Louter:
+ xorq %r15,%r15
+ movq %r8,%rbp
+ movq (%rsp),%r10
+
+ movq -96(%r12),%xmm0
+ movq -32(%r12),%xmm1
+ pand %xmm4,%xmm0
+ movq 32(%r12),%xmm2
+ pand %xmm5,%xmm1
+
+ mulq %rbx
+ addq %rax,%r10
+ movq (%rcx),%rax
+ adcq $0,%rdx
+
+ movq 96(%r12),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq %r10,%rbp
+ movq %rdx,%r11
+
+ por %xmm2,%xmm0
+ leaq 256(%r12),%r12
+ por %xmm3,%xmm0
+
+ mulq %rbp
+ addq %rax,%r10
+ movq 8(%rsi),%rax
+ adcq $0,%rdx
+ movq 8(%rsp),%r10
+ movq %rdx,%r13
+
+ leaq 1(%r15),%r15
+ jmp .Linner_enter
+
+.align 16
+.Linner:
+ addq %rax,%r13
+ movq (%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ movq (%rsp,%r15,8),%r10
+ adcq $0,%rdx
+ movq %r13,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+.Linner_enter:
+ mulq %rbx
+ addq %rax,%r11
+ movq (%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%r10
+ movq %rdx,%r11
+ adcq $0,%r11
+ leaq 1(%r15),%r15
+
+ mulq %rbp
+ cmpq %r9,%r15
+ jne .Linner
+
+.byte 102,72,15,126,195
+
+ addq %rax,%r13
+ movq (%rsi),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ movq (%rsp,%r15,8),%r10
+ adcq $0,%rdx
+ movq %r13,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+ xorq %rdx,%rdx
+ addq %r11,%r13
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %r13,-8(%rsp,%r9,8)
+ movq %rdx,(%rsp,%r9,8)
+
+ leaq 1(%r14),%r14
+ cmpq %r9,%r14
+ jl .Louter
+
+ xorq %r14,%r14
+ movq (%rsp),%rax
+ leaq (%rsp),%rsi
+ movq %r9,%r15
+ jmp .Lsub
+.align 16
+.Lsub: sbbq (%rcx,%r14,8),%rax
+ movq %rax,(%rdi,%r14,8)
+ movq 8(%rsi,%r14,8),%rax
+ leaq 1(%r14),%r14
+ decq %r15
+ jnz .Lsub
+
+ sbbq $0,%rax
+ xorq %r14,%r14
+ andq %rax,%rsi
+ notq %rax
+ movq %rdi,%rcx
+ andq %rax,%rcx
+ movq %r9,%r15
+ orq %rcx,%rsi
+.align 16
+.Lcopy:
+ movq (%rsi,%r14,8),%rax
+ movq %r14,(%rsp,%r14,8)
+ movq %rax,(%rdi,%r14,8)
+ leaq 1(%r14),%r14
+ subq $1,%r15
+ jnz .Lcopy
+
+ movq 8(%rsp,%r9,8),%rsi
+ movq $1,%rax
+ movq (%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+.Lmul_epilogue:
+ .byte 0xf3,0xc3
+.size bn_mul_mont_gather5,.-bn_mul_mont_gather5
+.type bn_mul4x_mont_gather5,@function
+.align 16
+bn_mul4x_mont_gather5:
+.Lmul4x_enter:
+ movl %r9d,%r9d
+ movl 8(%rsp),%r10d
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq %rsp,%rax
+ leaq 4(%r9),%r11
+ negq %r11
+ leaq (%rsp,%r11,8),%rsp
+ andq $-1024,%rsp
+
+ movq %rax,8(%rsp,%r9,8)
+.Lmul4x_body:
+ movq %rdi,16(%rsp,%r9,8)
+ movq %rdx,%r12
+ movq %r10,%r11
+ shrq $3,%r10
+ andq $7,%r11
+ notq %r10
+ leaq .Lmagic_masks(%rip),%rax
+ andq $3,%r10
+ leaq 96(%r12,%r11,8),%r12
+ movq 0(%rax,%r10,8),%xmm4
+ movq 8(%rax,%r10,8),%xmm5
+ movq 16(%rax,%r10,8),%xmm6
+ movq 24(%rax,%r10,8),%xmm7
+
+ movq -96(%r12),%xmm0
+ movq -32(%r12),%xmm1
+ pand %xmm4,%xmm0
+ movq 32(%r12),%xmm2
+ pand %xmm5,%xmm1
+ movq 96(%r12),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ leaq 256(%r12),%r12
+ por %xmm3,%xmm0
+
+.byte 102,72,15,126,195
+ movq (%r8),%r8
+ movq (%rsi),%rax
+
+ xorq %r14,%r14
+ xorq %r15,%r15
+
+ movq -96(%r12),%xmm0
+ movq -32(%r12),%xmm1
+ pand %xmm4,%xmm0
+ movq 32(%r12),%xmm2
+ pand %xmm5,%xmm1
+
+ movq %r8,%rbp
+ mulq %rbx
+ movq %rax,%r10
+ movq (%rcx),%rax
+
+ movq 96(%r12),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq %r10,%rbp
+ movq %rdx,%r11
+
+ por %xmm2,%xmm0
+ leaq 256(%r12),%r12
+ por %xmm3,%xmm0
+
+ mulq %rbp
+ addq %rax,%r10
+ movq 8(%rsi),%rax
+ adcq $0,%rdx
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 8(%rcx),%rax
+ adcq $0,%rdx
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq 16(%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ leaq 4(%r15),%r15
+ adcq $0,%rdx
+ movq %rdi,(%rsp)
+ movq %rdx,%r13
+ jmp .L1st4x
+.align 16
+.L1st4x:
+ mulq %rbx
+ addq %rax,%r10
+ movq -16(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq -8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %r13,-24(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq -8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq (%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %rdi,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+ mulq %rbx
+ addq %rax,%r10
+ movq (%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq 8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %r13,-8(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ leaq 4(%r15),%r15
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq -16(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %rdi,-32(%rsp,%r15,8)
+ movq %rdx,%r13
+ cmpq %r9,%r15
+ jl .L1st4x
+
+ mulq %rbx
+ addq %rax,%r10
+ movq -16(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq -8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %r13,-24(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq -8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq (%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %rdi,-16(%rsp,%r15,8)
+ movq %rdx,%r13
+
+.byte 102,72,15,126,195
+
+ xorq %rdi,%rdi
+ addq %r10,%r13
+ adcq $0,%rdi
+ movq %r13,-8(%rsp,%r15,8)
+ movq %rdi,(%rsp,%r15,8)
+
+ leaq 1(%r14),%r14
+.align 4
+.Louter4x:
+ xorq %r15,%r15
+ movq -96(%r12),%xmm0
+ movq -32(%r12),%xmm1
+ pand %xmm4,%xmm0
+ movq 32(%r12),%xmm2
+ pand %xmm5,%xmm1
+
+ movq (%rsp),%r10
+ movq %r8,%rbp
+ mulq %rbx
+ addq %rax,%r10
+ movq (%rcx),%rax
+ adcq $0,%rdx
+
+ movq 96(%r12),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq %r10,%rbp
+ movq %rdx,%r11
+
+ por %xmm2,%xmm0
+ leaq 256(%r12),%r12
+ por %xmm3,%xmm0
+
+ mulq %rbp
+ addq %rax,%r10
+ movq 8(%rsi),%rax
+ adcq $0,%rdx
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 8(%rcx),%rax
+ adcq $0,%rdx
+ addq 8(%rsp),%r11
+ adcq $0,%rdx
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq 16(%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ leaq 4(%r15),%r15
+ adcq $0,%rdx
+ movq %rdx,%r13
+ jmp .Linner4x
+.align 16
+.Linner4x:
+ mulq %rbx
+ addq %rax,%r10
+ movq -16(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq -16(%rsp,%r15,8),%r10
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq -8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %rdi,-32(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq -8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq -8(%rsp,%r15,8),%r11
+ adcq $0,%rdx
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq (%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %r13,-24(%rsp,%r15,8)
+ movq %rdx,%r13
+
+ mulq %rbx
+ addq %rax,%r10
+ movq (%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq (%rsp,%r15,8),%r10
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq 8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %rdi,-16(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq 8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq 8(%rsp,%r15,8),%r11
+ adcq $0,%rdx
+ leaq 4(%r15),%r15
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq -16(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %r13,-40(%rsp,%r15,8)
+ movq %rdx,%r13
+ cmpq %r9,%r15
+ jl .Linner4x
+
+ mulq %rbx
+ addq %rax,%r10
+ movq -16(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq -16(%rsp,%r15,8),%r10
+ adcq $0,%rdx
+ movq %rdx,%r11
+
+ mulq %rbp
+ addq %rax,%r13
+ movq -8(%rsi,%r15,8),%rax
+ adcq $0,%rdx
+ addq %r10,%r13
+ adcq $0,%rdx
+ movq %rdi,-32(%rsp,%r15,8)
+ movq %rdx,%rdi
+
+ mulq %rbx
+ addq %rax,%r11
+ movq -8(%rcx,%r15,8),%rax
+ adcq $0,%rdx
+ addq -8(%rsp,%r15,8),%r11
+ adcq $0,%rdx
+ leaq 1(%r14),%r14
+ movq %rdx,%r10
+
+ mulq %rbp
+ addq %rax,%rdi
+ movq (%rsi),%rax
+ adcq $0,%rdx
+ addq %r11,%rdi
+ adcq $0,%rdx
+ movq %r13,-24(%rsp,%r15,8)
+ movq %rdx,%r13
+
+.byte 102,72,15,126,195
+ movq %rdi,-16(%rsp,%r15,8)
+
+ xorq %rdi,%rdi
+ addq %r10,%r13
+ adcq $0,%rdi
+ addq (%rsp,%r9,8),%r13
+ adcq $0,%rdi
+ movq %r13,-8(%rsp,%r15,8)
+ movq %rdi,(%rsp,%r15,8)
+
+ cmpq %r9,%r14
+ jl .Louter4x
+ movq 16(%rsp,%r9,8),%rdi
+ movq 0(%rsp),%rax
+ pxor %xmm0,%xmm0
+ movq 8(%rsp),%rdx
+ shrq $2,%r9
+ leaq (%rsp),%rsi
+ xorq %r14,%r14
+
+ subq 0(%rcx),%rax
+ movq 16(%rsi),%rbx
+ movq 24(%rsi),%rbp
+ sbbq 8(%rcx),%rdx
+ leaq -1(%r9),%r15
+ jmp .Lsub4x
+.align 16
+.Lsub4x:
+ movq %rax,0(%rdi,%r14,8)
+ movq %rdx,8(%rdi,%r14,8)
+ sbbq 16(%rcx,%r14,8),%rbx
+ movq 32(%rsi,%r14,8),%rax
+ movq 40(%rsi,%r14,8),%rdx
+ sbbq 24(%rcx,%r14,8),%rbp
+ movq %rbx,16(%rdi,%r14,8)
+ movq %rbp,24(%rdi,%r14,8)
+ sbbq 32(%rcx,%r14,8),%rax
+ movq 48(%rsi,%r14,8),%rbx
+ movq 56(%rsi,%r14,8),%rbp
+ sbbq 40(%rcx,%r14,8),%rdx
+ leaq 4(%r14),%r14
+ decq %r15
+ jnz .Lsub4x
+
+ movq %rax,0(%rdi,%r14,8)
+ movq 32(%rsi,%r14,8),%rax
+ sbbq 16(%rcx,%r14,8),%rbx
+ movq %rdx,8(%rdi,%r14,8)
+ sbbq 24(%rcx,%r14,8),%rbp
+ movq %rbx,16(%rdi,%r14,8)
+
+ sbbq $0,%rax
+ movq %rbp,24(%rdi,%r14,8)
+ xorq %r14,%r14
+ andq %rax,%rsi
+ notq %rax
+ movq %rdi,%rcx
+ andq %rax,%rcx
+ leaq -1(%r9),%r15
+ orq %rcx,%rsi
+
+ movdqu (%rsi),%xmm1
+ movdqa %xmm0,(%rsp)
+ movdqu %xmm1,(%rdi)
+ jmp .Lcopy4x
+.align 16
+.Lcopy4x:
+ movdqu 16(%rsi,%r14,1),%xmm2
+ movdqu 32(%rsi,%r14,1),%xmm1
+ movdqa %xmm0,16(%rsp,%r14,1)
+ movdqu %xmm2,16(%rdi,%r14,1)
+ movdqa %xmm0,32(%rsp,%r14,1)
+ movdqu %xmm1,32(%rdi,%r14,1)
+ leaq 32(%r14),%r14
+ decq %r15
+ jnz .Lcopy4x
+
+ shlq $2,%r9
+ movdqu 16(%rsi,%r14,1),%xmm2
+ movdqa %xmm0,16(%rsp,%r14,1)
+ movdqu %xmm2,16(%rdi,%r14,1)
+ movq 8(%rsp,%r9,8),%rsi
+ movq $1,%rax
+ movq (%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+.Lmul4x_epilogue:
+ .byte 0xf3,0xc3
+.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
+.globl bn_scatter5
+.type bn_scatter5,@function
+.align 16
+bn_scatter5:
+ cmpq $0,%rsi
+ jz .Lscatter_epilogue
+ leaq (%rdx,%rcx,8),%rdx
+.Lscatter:
+ movq (%rdi),%rax
+ leaq 8(%rdi),%rdi
+ movq %rax,(%rdx)
+ leaq 256(%rdx),%rdx
+ subq $1,%rsi
+ jnz .Lscatter
+.Lscatter_epilogue:
+ .byte 0xf3,0xc3
+.size bn_scatter5,.-bn_scatter5
+
+.globl bn_gather5
+.type bn_gather5,@function
+.align 16
+bn_gather5:
+ movq %rcx,%r11
+ shrq $3,%rcx
+ andq $7,%r11
+ notq %rcx
+ leaq .Lmagic_masks(%rip),%rax
+ andq $3,%rcx
+ leaq 96(%rdx,%r11,8),%rdx
+ movq 0(%rax,%rcx,8),%xmm4
+ movq 8(%rax,%rcx,8),%xmm5
+ movq 16(%rax,%rcx,8),%xmm6
+ movq 24(%rax,%rcx,8),%xmm7
+ jmp .Lgather
+.align 16
+.Lgather:
+ movq -96(%rdx),%xmm0
+ movq -32(%rdx),%xmm1
+ pand %xmm4,%xmm0
+ movq 32(%rdx),%xmm2
+ pand %xmm5,%xmm1
+ movq 96(%rdx),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ leaq 256(%rdx),%rdx
+ por %xmm3,%xmm0
+
+ movq %xmm0,(%rdi)
+ leaq 8(%rdi),%rdi
+ subq $1,%rsi
+ jnz .Lgather
+ .byte 0xf3,0xc3
+.LSEH_end_bn_gather5:
+.size bn_gather5,.-bn_gather5
+.align 64
+.Lmagic_masks:
+.long 0,0, 0,0, 0,0, -1,-1
+.long 0,0, 0,0, 0,0, 0,0
+.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115,99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont5.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont5.pl
new file mode 100755
index 00000000..dae0fe24
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/asm/x86_64-mont5.pl
@@ -0,0 +1,1071 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# August 2011.
+#
+# Companion to x86_64-mont.pl that optimizes cache-timing attack
+# countermeasures. The subroutines are produced by replacing bp[i]
+# references in their x86_64-mont.pl counterparts with cache-neutral
+# references to powers table computed in BN_mod_exp_mont_consttime.
+# In addition subroutine that scatters elements of the powers table
+# is implemented, so that scatter-/gathering can be tuned without
+# bn_exp.c modifications.
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+# int bn_mul_mont_gather5(
+$rp="%rdi"; # BN_ULONG *rp,
+$ap="%rsi"; # const BN_ULONG *ap,
+$bp="%rdx"; # const BN_ULONG *bp,
+$np="%rcx"; # const BN_ULONG *np,
+$n0="%r8"; # const BN_ULONG *n0,
+$num="%r9"; # int num,
+ # int idx); # 0 to 2^5-1, "index" in $bp holding
+ # pre-computed powers of a', interlaced
+ # in such manner that b[0] is $bp[idx],
+ # b[1] is [2^5+idx], etc.
+$lo0="%r10";
+$hi0="%r11";
+$hi1="%r13";
+$i="%r14";
+$j="%r15";
+$m0="%rbx";
+$m1="%rbp";
+
+$code=<<___;
+.text
+
+.globl bn_mul_mont_gather5
+.type bn_mul_mont_gather5,\@function,6
+.align 64
+bn_mul_mont_gather5:
+ test \$3,${num}d
+ jnz .Lmul_enter
+ cmp \$8,${num}d
+ jb .Lmul_enter
+ jmp .Lmul4x_enter
+
+.align 16
+.Lmul_enter:
+ mov ${num}d,${num}d
+ mov `($win64?56:8)`(%rsp),%r10d # load 7th argument
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+___
+$code.=<<___ if ($win64);
+ lea -0x28(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+.Lmul_alloca:
+___
+$code.=<<___;
+ mov %rsp,%rax
+ lea 2($num),%r11
+ neg %r11
+ lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+2))
+ and \$-1024,%rsp # minimize TLB usage
+
+ mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul_body:
+ mov $bp,%r12 # reassign $bp
+___
+ $bp="%r12";
+ $STRIDE=2**5*8; # 5 is "window size"
+ $N=$STRIDE/4; # should match cache line size
+$code.=<<___;
+ mov %r10,%r11
+ shr \$`log($N/8)/log(2)`,%r10
+ and \$`$N/8-1`,%r11
+ not %r10
+ lea .Lmagic_masks(%rip),%rax
+ and \$`2**5/($N/8)-1`,%r10 # 5 is "window size"
+ lea 96($bp,%r11,8),$bp # pointer within 1st cache line
+ movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
+ movq 8(%rax,%r10,8),%xmm5 # cache line contains element
+ movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
+ movq 24(%rax,%r10,8),%xmm7
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ movq %xmm0,$m0 # m0=bp[0]
+
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$lo0
+ mov ($np),%rax
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $lo0,$m1 # "tp[0]"*n0
+ mov %rdx,$hi0
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .L1st_enter
+
+.align 16
+.L1st:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ mov $lo0,$hi0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.L1st_enter:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 1($j),$j # j++
+ mov %rdx,$lo0
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .L1st
+
+ movq %xmm0,$m0 # bp[1]
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+ mov $lo0,$hi0
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ jmp .Louter
+.align 16
+.Louter:
+ xor $j,$j # j=0
+ mov $n0,$m1
+ mov (%rsp),$lo0
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$lo0 # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $lo0,$m1 # tp[0]*n0
+ mov %rdx,$hi0
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov 8(%rsp),$lo0 # tp[1]
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .Linner_enter
+
+.align 16
+.Linner:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.Linner_enter:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
+ mov %rdx,$hi0
+ adc \$0,$hi0
+ lea 1($j),$j # j++
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .Linner
+
+ movq %xmm0,$m0 # bp[i+1]
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ add $lo0,$hi1 # pull upmost overflow bit
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ cmp $num,$i
+ jl .Louter
+
+ xor $i,$i # i=0 and clear CF!
+ mov (%rsp),%rax # tp[0]
+ lea (%rsp),$ap # borrow ap for tp
+ mov $num,$j # j=num
+ jmp .Lsub
+.align 16
+.Lsub: sbb ($np,$i,8),%rax
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 8($ap,$i,8),%rax # tp[i+1]
+ lea 1($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub
+
+ sbb \$0,%rax # handle upmost overflow bit
+ xor $i,$i
+ and %rax,$ap
+ not %rax
+ mov $rp,$np
+ and %rax,$np
+ mov $num,$j # j=num
+ or $np,$ap # ap=borrow?tp:rp
+.align 16
+.Lcopy: # copy or in-place refresh
+ mov ($ap,$i,8),%rax
+ mov $i,(%rsp,$i,8) # zap temporary vector
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]
+ lea 1($i),$i
+ sub \$1,$j
+ jnz .Lcopy
+
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+___
+$code.=<<___ if ($win64);
+ movaps (%rsi),%xmm6
+ movaps 0x10(%rsi),%xmm7
+ lea 0x28(%rsi),%rsi
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lmul_epilogue:
+ ret
+.size bn_mul_mont_gather5,.-bn_mul_mont_gather5
+___
+{{{
+my @A=("%r10","%r11");
+my @N=("%r13","%rdi");
+$code.=<<___;
+.type bn_mul4x_mont_gather5,\@function,6
+.align 16
+bn_mul4x_mont_gather5:
+.Lmul4x_enter:
+ mov ${num}d,${num}d
+ mov `($win64?56:8)`(%rsp),%r10d # load 7th argument
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+___
+$code.=<<___ if ($win64);
+ lea -0x28(%rsp),%rsp
+ movaps %xmm6,(%rsp)
+ movaps %xmm7,0x10(%rsp)
+.Lmul4x_alloca:
+___
+$code.=<<___;
+ mov %rsp,%rax
+ lea 4($num),%r11
+ neg %r11
+ lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+4))
+ and \$-1024,%rsp # minimize TLB usage
+
+ mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul4x_body:
+ mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
+ mov %rdx,%r12 # reassign $bp
+___
+ $bp="%r12";
+ $STRIDE=2**5*8; # 5 is "window size"
+ $N=$STRIDE/4; # should match cache line size
+$code.=<<___;
+ mov %r10,%r11
+ shr \$`log($N/8)/log(2)`,%r10
+ and \$`$N/8-1`,%r11
+ not %r10
+ lea .Lmagic_masks(%rip),%rax
+ and \$`2**5/($N/8)-1`,%r10 # 5 is "window size"
+ lea 96($bp,%r11,8),$bp # pointer within 1st cache line
+ movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
+ movq 8(%rax,%r10,8),%xmm5 # cache line contains element
+ movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
+ movq 24(%rax,%r10,8),%xmm7
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ movq %xmm0,$m0 # m0=bp[0]
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$A[0]
+ mov ($np),%rax
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $A[0],$m1 # "tp[0]"*n0
+ mov %rdx,$A[1]
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ lea 4($j),$j # j++
+ adc \$0,%rdx
+ mov $N[1],(%rsp)
+ mov %rdx,$N[0]
+ jmp .L1st4x
+.align 16
+.L1st4x:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-8(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .L1st4x
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ movq %xmm0,$m0 # bp[1]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+.align 4
+.Louter4x:
+ xor $j,$j # j=0
+ movq `0*$STRIDE/4-96`($bp),%xmm0
+ movq `1*$STRIDE/4-96`($bp),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($bp),%xmm2
+ pand %xmm5,%xmm1
+
+ mov (%rsp),$A[0]
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$A[0] # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ movq `3*$STRIDE/4-96`($bp),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+
+ imulq $A[0],$m1 # tp[0]*n0
+ mov %rdx,$A[1]
+
+ por %xmm2,%xmm0
+ lea $STRIDE($bp),$bp
+ por %xmm3,%xmm0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # "$N[0]", discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ add 8(%rsp),$A[1] # +tp[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
+ lea 4($j),$j # j+=2
+ adc \$0,%rdx
+ mov %rdx,$N[0]
+ jmp .Linner4x
+.align 16
+.Linner4x:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ add 8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[0],-40(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jl .Linner4x
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 1($i),$i # i++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ movq %xmm0,$m0 # bp[i+1]
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ add (%rsp,$num,8),$N[0] # pull upmost overflow bit
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ cmp $num,$i
+ jl .Louter4x
+___
+{
+my @ri=("%rax","%rdx",$m0,$m1);
+$code.=<<___;
+ mov 16(%rsp,$num,8),$rp # restore $rp
+ mov 0(%rsp),@ri[0] # tp[0]
+ pxor %xmm0,%xmm0
+ mov 8(%rsp),@ri[1] # tp[1]
+ shr \$2,$num # num/=4
+ lea (%rsp),$ap # borrow ap for tp
+ xor $i,$i # i=0 and clear CF!
+
+ sub 0($np),@ri[0]
+ mov 16($ap),@ri[2] # tp[2]
+ mov 24($ap),@ri[3] # tp[3]
+ sbb 8($np),@ri[1]
+ lea -1($num),$j # j=num/4-1
+ jmp .Lsub4x
+.align 16
+.Lsub4x:
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 16($np,$i,8),@ri[2]
+ mov 32($ap,$i,8),@ri[0] # tp[i+1]
+ mov 40($ap,$i,8),@ri[1]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 32($np,$i,8),@ri[0]
+ mov 48($ap,$i,8),@ri[2]
+ mov 56($ap,$i,8),@ri[3]
+ sbb 40($np,$i,8),@ri[1]
+ lea 4($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub4x
+
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 32($ap,$i,8),@ri[0] # load overflow bit
+ sbb 16($np,$i,8),@ri[2]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+
+ sbb \$0,@ri[0] # handle upmost overflow bit
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ xor $i,$i # i=0
+ and @ri[0],$ap
+ not @ri[0]
+ mov $rp,$np
+ and @ri[0],$np
+ lea -1($num),$j
+ or $np,$ap # ap=borrow?tp:rp
+
+ movdqu ($ap),%xmm1
+ movdqa %xmm0,(%rsp)
+ movdqu %xmm1,($rp)
+ jmp .Lcopy4x
+.align 16
+.Lcopy4x: # copy or in-place refresh
+ movdqu 16($ap,$i),%xmm2
+ movdqu 32($ap,$i),%xmm1
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+ movdqa %xmm0,32(%rsp,$i)
+ movdqu %xmm1,32($rp,$i)
+ lea 32($i),$i
+ dec $j
+ jnz .Lcopy4x
+
+ shl \$2,$num
+ movdqu 16($ap,$i),%xmm2
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+___
+}
+$code.=<<___;
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+___
+$code.=<<___ if ($win64);
+ movaps (%rsi),%xmm6
+ movaps 0x10(%rsi),%xmm7
+ lea 0x28(%rsi),%rsi
+___
+$code.=<<___;
+ mov (%rsi),%r15
+ mov 8(%rsi),%r14
+ mov 16(%rsi),%r13
+ mov 24(%rsi),%r12
+ mov 32(%rsi),%rbp
+ mov 40(%rsi),%rbx
+ lea 48(%rsi),%rsp
+.Lmul4x_epilogue:
+ ret
+.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
+___
+}}}
+
+{
+my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%rdx","%r8", "%r9") : # Win64 order
+ ("%rdi","%rsi","%rdx","%rcx"); # Unix order
+my $out=$inp;
+my $STRIDE=2**5*8;
+my $N=$STRIDE/4;
+
+$code.=<<___;
+.globl bn_scatter5
+.type bn_scatter5,\@abi-omnipotent
+.align 16
+bn_scatter5:
+ cmp \$0, $num
+ jz .Lscatter_epilogue
+ lea ($tbl,$idx,8),$tbl
+.Lscatter:
+ mov ($inp),%rax
+ lea 8($inp),$inp
+ mov %rax,($tbl)
+ lea 32*8($tbl),$tbl
+ sub \$1,$num
+ jnz .Lscatter
+.Lscatter_epilogue:
+ ret
+.size bn_scatter5,.-bn_scatter5
+
+.globl bn_gather5
+.type bn_gather5,\@abi-omnipotent
+.align 16
+bn_gather5:
+___
+$code.=<<___ if ($win64);
+.LSEH_begin_bn_gather5:
+ # I can't trust assembler to use specific encoding:-(
+ .byte 0x48,0x83,0xec,0x28 #sub \$0x28,%rsp
+ .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
+ .byte 0x0f,0x29,0x7c,0x24,0x10 #movdqa %xmm7,0x10(%rsp)
+___
+$code.=<<___;
+ mov $idx,%r11
+ shr \$`log($N/8)/log(2)`,$idx
+ and \$`$N/8-1`,%r11
+ not $idx
+ lea .Lmagic_masks(%rip),%rax
+ and \$`2**5/($N/8)-1`,$idx # 5 is "window size"
+ lea 96($tbl,%r11,8),$tbl # pointer within 1st cache line
+ movq 0(%rax,$idx,8),%xmm4 # set of masks denoting which
+ movq 8(%rax,$idx,8),%xmm5 # cache line contains element
+ movq 16(%rax,$idx,8),%xmm6 # denoted by 7th argument
+ movq 24(%rax,$idx,8),%xmm7
+ jmp .Lgather
+.align 16
+.Lgather:
+ movq `0*$STRIDE/4-96`($tbl),%xmm0
+ movq `1*$STRIDE/4-96`($tbl),%xmm1
+ pand %xmm4,%xmm0
+ movq `2*$STRIDE/4-96`($tbl),%xmm2
+ pand %xmm5,%xmm1
+ movq `3*$STRIDE/4-96`($tbl),%xmm3
+ pand %xmm6,%xmm2
+ por %xmm1,%xmm0
+ pand %xmm7,%xmm3
+ por %xmm2,%xmm0
+ lea $STRIDE($tbl),$tbl
+ por %xmm3,%xmm0
+
+ movq %xmm0,($out) # m0=bp[0]
+ lea 8($out),$out
+ sub \$1,$num
+ jnz .Lgather
+___
+$code.=<<___ if ($win64);
+ movaps (%rsp),%xmm6
+ movaps 0x10(%rsp),%xmm7
+ lea 0x28(%rsp),%rsp
+___
+$code.=<<___;
+ ret
+.LSEH_end_bn_gather5:
+.size bn_gather5,.-bn_gather5
+___
+}
+$code.=<<___;
+.align 64
+.Lmagic_masks:
+ .long 0,0, 0,0, 0,0, -1,-1
+ .long 0,0, 0,0, 0,0, 0,0
+.asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type mul_handler,\@abi-omnipotent
+.align 16
+mul_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ lea `40+48`(%rax),%rax
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # end of alloca label
+ cmp %r10,%rbx # context->Rip<end of alloca label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 8(%r11),%r10d # HandlerData[2]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ mov 192($context),%r10 # pull $num
+ mov 8(%rax,%r10,8),%rax # pull saved stack pointer
+
+ movaps (%rax),%xmm0
+ movaps 16(%rax),%xmm1
+ lea `40+48`(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+ movups %xmm0,512($context) # restore context->Xmm6
+ movups %xmm1,528($context) # restore context->Xmm7
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size mul_handler,.-mul_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_bn_mul_mont_gather5
+ .rva .LSEH_end_bn_mul_mont_gather5
+ .rva .LSEH_info_bn_mul_mont_gather5
+
+ .rva .LSEH_begin_bn_mul4x_mont_gather5
+ .rva .LSEH_end_bn_mul4x_mont_gather5
+ .rva .LSEH_info_bn_mul4x_mont_gather5
+
+ .rva .LSEH_begin_bn_gather5
+ .rva .LSEH_end_bn_gather5
+ .rva .LSEH_info_bn_gather5
+
+.section .xdata
+.align 8
+.LSEH_info_bn_mul_mont_gather5:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul_alloca,.Lmul_body,.Lmul_epilogue # HandlerData[]
+.align 8
+.LSEH_info_bn_mul4x_mont_gather5:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul4x_alloca,.Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
+.align 8
+.LSEH_info_bn_gather5:
+ .byte 0x01,0x0d,0x05,0x00
+ .byte 0x0d,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
+ .byte 0x08,0x68,0x00,0x00 #movaps (rsp),xmm6
+ .byte 0x04,0x42,0x00,0x00 #sub rsp,0x28
+.align 8
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+close STDOUT;
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn.h b/ics-openvpn-stripped/main/openssl/crypto/bn/bn.h
new file mode 100644
index 00000000..e776c07a
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn.h
@@ -0,0 +1,908 @@
+/* crypto/bn/bn.h */
+/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+/* ====================================================================
+ * Copyright (c) 1998-2006 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+/* ====================================================================
+ * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
+ *
+ * Portions of the attached software ("Contribution") are developed by
+ * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project.
+ *
+ * The Contribution is licensed pursuant to the Eric Young open source
+ * license provided above.
+ *
+ * The binary polynomial arithmetic software is originally written by
+ * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems Laboratories.
+ *
+ */
+
+#ifndef HEADER_BN_H
+#define HEADER_BN_H
+
+#include <openssl/e_os2.h>
+#ifndef OPENSSL_NO_FP_API
+#include <stdio.h> /* FILE */
+#endif
+#include <openssl/ossl_typ.h>
+#include <openssl/crypto.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* These preprocessor symbols control various aspects of the bignum headers and
+ * library code. They're not defined by any "normal" configuration, as they are
+ * intended for development and testing purposes. NB: defining all three can be
+ * useful for debugging application code as well as openssl itself.
+ *
+ * BN_DEBUG - turn on various debugging alterations to the bignum code
+ * BN_DEBUG_RAND - uses random poisoning of unused words to trip up
+ * mismanagement of bignum internals. You must also define BN_DEBUG.
+ */
+/* #define BN_DEBUG */
+/* #define BN_DEBUG_RAND */
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
+#define BN_MUL_COMBA
+#define BN_SQR_COMBA
+#define BN_RECURSION
+#endif
+
+/* This next option uses the C libraries (2 word)/(1 word) function.
+ * If it is not defined, I use my C version (which is slower).
+ * The reason for this flag is that when the particular C compiler
+ * library routine is used, and the library is linked with a different
+ * compiler, the library is missing. This mostly happens when the
+ * library is built with gcc and then linked using normal cc. This would
+ * be a common occurrence because gcc normally produces code that is
+ * 2 times faster than system compilers for the big number stuff.
+ * For machines with only one compiler (or shared libraries), this should
+ * be on. Again this in only really a problem on machines
+ * using "long long's", are 32bit, and are not using my assembler code. */
+#if defined(OPENSSL_SYS_MSDOS) || defined(OPENSSL_SYS_WINDOWS) || \
+ defined(OPENSSL_SYS_WIN32) || defined(linux)
+# ifndef BN_DIV2W
+# define BN_DIV2W
+# endif
+#endif
+
+/* assuming long is 64bit - this is the DEC Alpha
+ * unsigned long long is only 64 bits :-(, don't define
+ * BN_LLONG for the DEC Alpha */
+#ifdef SIXTY_FOUR_BIT_LONG
+#define BN_ULLONG unsigned long long
+#define BN_ULONG unsigned long
+#define BN_LONG long
+#define BN_BITS 128
+#define BN_BYTES 8
+#define BN_BITS2 64
+#define BN_BITS4 32
+#define BN_MASK (0xffffffffffffffffffffffffffffffffLL)
+#define BN_MASK2 (0xffffffffffffffffL)
+#define BN_MASK2l (0xffffffffL)
+#define BN_MASK2h (0xffffffff00000000L)
+#define BN_MASK2h1 (0xffffffff80000000L)
+#define BN_TBIT (0x8000000000000000L)
+#define BN_DEC_CONV (10000000000000000000UL)
+#define BN_DEC_FMT1 "%lu"
+#define BN_DEC_FMT2 "%019lu"
+#define BN_DEC_NUM 19
+#define BN_HEX_FMT1 "%lX"
+#define BN_HEX_FMT2 "%016lX"
+#endif
+
+/* This is where the long long data type is 64 bits, but long is 32.
+ * For machines where there are 64bit registers, this is the mode to use.
+ * IRIX, on R4000 and above should use this mode, along with the relevant
+ * assembler code :-). Do NOT define BN_LLONG.
+ */
+#ifdef SIXTY_FOUR_BIT
+#undef BN_LLONG
+#undef BN_ULLONG
+#define BN_ULONG unsigned long long
+#define BN_LONG long long
+#define BN_BITS 128
+#define BN_BYTES 8
+#define BN_BITS2 64
+#define BN_BITS4 32
+#define BN_MASK2 (0xffffffffffffffffLL)
+#define BN_MASK2l (0xffffffffL)
+#define BN_MASK2h (0xffffffff00000000LL)
+#define BN_MASK2h1 (0xffffffff80000000LL)
+#define BN_TBIT (0x8000000000000000LL)
+#define BN_DEC_CONV (10000000000000000000ULL)
+#define BN_DEC_FMT1 "%llu"
+#define BN_DEC_FMT2 "%019llu"
+#define BN_DEC_NUM 19
+#define BN_HEX_FMT1 "%llX"
+#define BN_HEX_FMT2 "%016llX"
+#endif
+
+#ifdef THIRTY_TWO_BIT
+#ifdef BN_LLONG
+# if defined(_WIN32) && !defined(__GNUC__)
+# define BN_ULLONG unsigned __int64
+# define BN_MASK (0xffffffffffffffffI64)
+# else
+# define BN_ULLONG unsigned long long
+# define BN_MASK (0xffffffffffffffffLL)
+# endif
+#endif
+#define BN_ULONG unsigned int
+#define BN_LONG int
+#define BN_BITS 64
+#define BN_BYTES 4
+#define BN_BITS2 32
+#define BN_BITS4 16
+#define BN_MASK2 (0xffffffffL)
+#define BN_MASK2l (0xffff)
+#define BN_MASK2h1 (0xffff8000L)
+#define BN_MASK2h (0xffff0000L)
+#define BN_TBIT (0x80000000L)
+#define BN_DEC_CONV (1000000000L)
+#define BN_DEC_FMT1 "%u"
+#define BN_DEC_FMT2 "%09u"
+#define BN_DEC_NUM 9
+#define BN_HEX_FMT1 "%X"
+#define BN_HEX_FMT2 "%08X"
+#endif
+
+/* 2011-02-22 SMS.
+ * In various places, a size_t variable or a type cast to size_t was
+ * used to perform integer-only operations on pointers. This failed on
+ * VMS with 64-bit pointers (CC /POINTER_SIZE = 64) because size_t is
+ * still only 32 bits. What's needed in these cases is an integer type
+ * with the same size as a pointer, which size_t is not certain to be.
+ * The only fix here is VMS-specific.
+ */
+#if defined(OPENSSL_SYS_VMS)
+# if __INITIAL_POINTER_SIZE == 64
+# define PTR_SIZE_INT long long
+# else /* __INITIAL_POINTER_SIZE == 64 */
+# define PTR_SIZE_INT int
+# endif /* __INITIAL_POINTER_SIZE == 64 [else] */
+#else /* defined(OPENSSL_SYS_VMS) */
+# define PTR_SIZE_INT size_t
+#endif /* defined(OPENSSL_SYS_VMS) [else] */
+
+#define BN_DEFAULT_BITS 1280
+
+#define BN_FLG_MALLOCED 0x01
+#define BN_FLG_STATIC_DATA 0x02
+#define BN_FLG_CONSTTIME 0x04 /* avoid leaking exponent information through timing,
+ * BN_mod_exp_mont() will call BN_mod_exp_mont_consttime,
+ * BN_div() will call BN_div_no_branch,
+ * BN_mod_inverse() will call BN_mod_inverse_no_branch.
+ */
+
+#ifndef OPENSSL_NO_DEPRECATED
+#define BN_FLG_EXP_CONSTTIME BN_FLG_CONSTTIME /* deprecated name for the flag */
+ /* avoid leaking exponent information through timings
+ * (BN_mod_exp_mont() will call BN_mod_exp_mont_consttime) */
+#endif
+
+#ifndef OPENSSL_NO_DEPRECATED
+#define BN_FLG_FREE 0x8000 /* used for debuging */
+#endif
+#define BN_set_flags(b,n) ((b)->flags|=(n))
+#define BN_get_flags(b,n) ((b)->flags&(n))
+
+/* get a clone of a BIGNUM with changed flags, for *temporary* use only
+ * (the two BIGNUMs cannot not be used in parallel!) */
+#define BN_with_flags(dest,b,n) ((dest)->d=(b)->d, \
+ (dest)->top=(b)->top, \
+ (dest)->dmax=(b)->dmax, \
+ (dest)->neg=(b)->neg, \
+ (dest)->flags=(((dest)->flags & BN_FLG_MALLOCED) \
+ | ((b)->flags & ~BN_FLG_MALLOCED) \
+ | BN_FLG_STATIC_DATA \
+ | (n)))
+
+/* Already declared in ossl_typ.h */
+#if 0
+typedef struct bignum_st BIGNUM;
+/* Used for temp variables (declaration hidden in bn_lcl.h) */
+typedef struct bignum_ctx BN_CTX;
+typedef struct bn_blinding_st BN_BLINDING;
+typedef struct bn_mont_ctx_st BN_MONT_CTX;
+typedef struct bn_recp_ctx_st BN_RECP_CTX;
+typedef struct bn_gencb_st BN_GENCB;
+#endif
+
+struct bignum_st
+ {
+ BN_ULONG *d; /* Pointer to an array of 'BN_BITS2' bit chunks. */
+ int top; /* Index of last used d +1. */
+ /* The next are internal book keeping for bn_expand. */
+ int dmax; /* Size of the d array. */
+ int neg; /* one if the number is negative */
+ int flags;
+ };
+
+/* Used for montgomery multiplication */
+struct bn_mont_ctx_st
+ {
+ int ri; /* number of bits in R */
+ BIGNUM RR; /* used to convert to montgomery form */
+ BIGNUM N; /* The modulus */
+ BIGNUM Ni; /* R*(1/R mod N) - N*Ni = 1
+ * (Ni is only stored for bignum algorithm) */
+ BN_ULONG n0[2];/* least significant word(s) of Ni;
+ (type changed with 0.9.9, was "BN_ULONG n0;" before) */
+ int flags;
+ };
+
+/* Used for reciprocal division/mod functions
+ * It cannot be shared between threads
+ */
+struct bn_recp_ctx_st
+ {
+ BIGNUM N; /* the divisor */
+ BIGNUM Nr; /* the reciprocal */
+ int num_bits;
+ int shift;
+ int flags;
+ };
+
+/* Used for slow "generation" functions. */
+struct bn_gencb_st
+ {
+ unsigned int ver; /* To handle binary (in)compatibility */
+ void *arg; /* callback-specific data */
+ union
+ {
+ /* if(ver==1) - handles old style callbacks */
+ void (*cb_1)(int, int, void *);
+ /* if(ver==2) - new callback style */
+ int (*cb_2)(int, int, BN_GENCB *);
+ } cb;
+ };
+/* Wrapper function to make using BN_GENCB easier, */
+int BN_GENCB_call(BN_GENCB *cb, int a, int b);
+/* Macro to populate a BN_GENCB structure with an "old"-style callback */
+#define BN_GENCB_set_old(gencb, callback, cb_arg) { \
+ BN_GENCB *tmp_gencb = (gencb); \
+ tmp_gencb->ver = 1; \
+ tmp_gencb->arg = (cb_arg); \
+ tmp_gencb->cb.cb_1 = (callback); }
+/* Macro to populate a BN_GENCB structure with a "new"-style callback */
+#define BN_GENCB_set(gencb, callback, cb_arg) { \
+ BN_GENCB *tmp_gencb = (gencb); \
+ tmp_gencb->ver = 2; \
+ tmp_gencb->arg = (cb_arg); \
+ tmp_gencb->cb.cb_2 = (callback); }
+
+#define BN_prime_checks 0 /* default: select number of iterations
+ based on the size of the number */
+
+/* number of Miller-Rabin iterations for an error rate of less than 2^-80
+ * for random 'b'-bit input, b >= 100 (taken from table 4.4 in the Handbook
+ * of Applied Cryptography [Menezes, van Oorschot, Vanstone; CRC Press 1996];
+ * original paper: Damgaard, Landrock, Pomerance: Average case error estimates
+ * for the strong probable prime test. -- Math. Comp. 61 (1993) 177-194) */
+#define BN_prime_checks_for_size(b) ((b) >= 1300 ? 2 : \
+ (b) >= 850 ? 3 : \
+ (b) >= 650 ? 4 : \
+ (b) >= 550 ? 5 : \
+ (b) >= 450 ? 6 : \
+ (b) >= 400 ? 7 : \
+ (b) >= 350 ? 8 : \
+ (b) >= 300 ? 9 : \
+ (b) >= 250 ? 12 : \
+ (b) >= 200 ? 15 : \
+ (b) >= 150 ? 18 : \
+ /* b >= 100 */ 27)
+
+#define BN_num_bytes(a) ((BN_num_bits(a)+7)/8)
+
+/* Note that BN_abs_is_word didn't work reliably for w == 0 until 0.9.8 */
+#define BN_abs_is_word(a,w) ((((a)->top == 1) && ((a)->d[0] == (BN_ULONG)(w))) || \
+ (((w) == 0) && ((a)->top == 0)))
+#define BN_is_zero(a) ((a)->top == 0)
+#define BN_is_one(a) (BN_abs_is_word((a),1) && !(a)->neg)
+#define BN_is_word(a,w) (BN_abs_is_word((a),(w)) && (!(w) || !(a)->neg))
+#define BN_is_odd(a) (((a)->top > 0) && ((a)->d[0] & 1))
+
+#define BN_one(a) (BN_set_word((a),1))
+#define BN_zero_ex(a) \
+ do { \
+ BIGNUM *_tmp_bn = (a); \
+ _tmp_bn->top = 0; \
+ _tmp_bn->neg = 0; \
+ } while(0)
+#ifdef OPENSSL_NO_DEPRECATED
+#define BN_zero(a) BN_zero_ex(a)
+#else
+#define BN_zero(a) (BN_set_word((a),0))
+#endif
+
+const BIGNUM *BN_value_one(void);
+char * BN_options(void);
+BN_CTX *BN_CTX_new(void);
+#ifndef OPENSSL_NO_DEPRECATED
+void BN_CTX_init(BN_CTX *c);
+#endif
+void BN_CTX_free(BN_CTX *c);
+void BN_CTX_start(BN_CTX *ctx);
+BIGNUM *BN_CTX_get(BN_CTX *ctx);
+void BN_CTX_end(BN_CTX *ctx);
+int BN_rand(BIGNUM *rnd, int bits, int top,int bottom);
+int BN_pseudo_rand(BIGNUM *rnd, int bits, int top,int bottom);
+int BN_rand_range(BIGNUM *rnd, const BIGNUM *range);
+int BN_pseudo_rand_range(BIGNUM *rnd, const BIGNUM *range);
+int BN_num_bits(const BIGNUM *a);
+int BN_num_bits_word(BN_ULONG);
+BIGNUM *BN_new(void);
+void BN_init(BIGNUM *);
+void BN_clear_free(BIGNUM *a);
+BIGNUM *BN_copy(BIGNUM *a, const BIGNUM *b);
+void BN_swap(BIGNUM *a, BIGNUM *b);
+BIGNUM *BN_bin2bn(const unsigned char *s,int len,BIGNUM *ret);
+int BN_bn2bin(const BIGNUM *a, unsigned char *to);
+BIGNUM *BN_mpi2bn(const unsigned char *s,int len,BIGNUM *ret);
+int BN_bn2mpi(const BIGNUM *a, unsigned char *to);
+int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b);
+int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b);
+int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b);
+int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b);
+int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx);
+int BN_sqr(BIGNUM *r, const BIGNUM *a,BN_CTX *ctx);
+/** BN_set_negative sets sign of a BIGNUM
+ * \param b pointer to the BIGNUM object
+ * \param n 0 if the BIGNUM b should be positive and a value != 0 otherwise
+ */
+void BN_set_negative(BIGNUM *b, int n);
+/** BN_is_negative returns 1 if the BIGNUM is negative
+ * \param a pointer to the BIGNUM object
+ * \return 1 if a < 0 and 0 otherwise
+ */
+#define BN_is_negative(a) ((a)->neg != 0)
+
+int BN_div(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, const BIGNUM *d,
+ BN_CTX *ctx);
+#define BN_mod(rem,m,d,ctx) BN_div(NULL,(rem),(m),(d),(ctx))
+int BN_nnmod(BIGNUM *r, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx);
+int BN_mod_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx);
+int BN_mod_add_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m);
+int BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx);
+int BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m);
+int BN_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
+ const BIGNUM *m, BN_CTX *ctx);
+int BN_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx);
+int BN_mod_lshift1(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx);
+int BN_mod_lshift1_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *m);
+int BN_mod_lshift(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m, BN_CTX *ctx);
+int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m);
+
+BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w);
+BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w);
+int BN_mul_word(BIGNUM *a, BN_ULONG w);
+int BN_add_word(BIGNUM *a, BN_ULONG w);
+int BN_sub_word(BIGNUM *a, BN_ULONG w);
+int BN_set_word(BIGNUM *a, BN_ULONG w);
+BN_ULONG BN_get_word(const BIGNUM *a);
+
+int BN_cmp(const BIGNUM *a, const BIGNUM *b);
+void BN_free(BIGNUM *a);
+int BN_is_bit_set(const BIGNUM *a, int n);
+int BN_lshift(BIGNUM *r, const BIGNUM *a, int n);
+int BN_lshift1(BIGNUM *r, const BIGNUM *a);
+int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,BN_CTX *ctx);
+
+int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m,BN_CTX *ctx);
+int BN_mod_exp_mont(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx);
+int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont);
+int BN_mod_exp_mont_word(BIGNUM *r, BN_ULONG a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx);
+int BN_mod_exp2_mont(BIGNUM *r, const BIGNUM *a1, const BIGNUM *p1,
+ const BIGNUM *a2, const BIGNUM *p2,const BIGNUM *m,
+ BN_CTX *ctx,BN_MONT_CTX *m_ctx);
+int BN_mod_exp_simple(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m,BN_CTX *ctx);
+
+int BN_mask_bits(BIGNUM *a,int n);
+#ifndef OPENSSL_NO_FP_API
+int BN_print_fp(FILE *fp, const BIGNUM *a);
+#endif
+#ifdef HEADER_BIO_H
+int BN_print(BIO *fp, const BIGNUM *a);
+#else
+int BN_print(void *fp, const BIGNUM *a);
+#endif
+int BN_reciprocal(BIGNUM *r, const BIGNUM *m, int len, BN_CTX *ctx);
+int BN_rshift(BIGNUM *r, const BIGNUM *a, int n);
+int BN_rshift1(BIGNUM *r, const BIGNUM *a);
+void BN_clear(BIGNUM *a);
+BIGNUM *BN_dup(const BIGNUM *a);
+int BN_ucmp(const BIGNUM *a, const BIGNUM *b);
+int BN_set_bit(BIGNUM *a, int n);
+int BN_clear_bit(BIGNUM *a, int n);
+char * BN_bn2hex(const BIGNUM *a);
+char * BN_bn2dec(const BIGNUM *a);
+int BN_hex2bn(BIGNUM **a, const char *str);
+int BN_dec2bn(BIGNUM **a, const char *str);
+int BN_asc2bn(BIGNUM **a, const char *str);
+int BN_gcd(BIGNUM *r,const BIGNUM *a,const BIGNUM *b,BN_CTX *ctx);
+int BN_kronecker(const BIGNUM *a,const BIGNUM *b,BN_CTX *ctx); /* returns -2 for error */
+BIGNUM *BN_mod_inverse(BIGNUM *ret,
+ const BIGNUM *a, const BIGNUM *n,BN_CTX *ctx);
+BIGNUM *BN_mod_sqrt(BIGNUM *ret,
+ const BIGNUM *a, const BIGNUM *n,BN_CTX *ctx);
+
+void BN_consttime_swap(BN_ULONG swap, BIGNUM *a, BIGNUM *b, int nwords);
+
+/* Deprecated versions */
+#ifndef OPENSSL_NO_DEPRECATED
+BIGNUM *BN_generate_prime(BIGNUM *ret,int bits,int safe,
+ const BIGNUM *add, const BIGNUM *rem,
+ void (*callback)(int,int,void *),void *cb_arg);
+int BN_is_prime(const BIGNUM *p,int nchecks,
+ void (*callback)(int,int,void *),
+ BN_CTX *ctx,void *cb_arg);
+int BN_is_prime_fasttest(const BIGNUM *p,int nchecks,
+ void (*callback)(int,int,void *),BN_CTX *ctx,void *cb_arg,
+ int do_trial_division);
+#endif /* !defined(OPENSSL_NO_DEPRECATED) */
+
+/* Newer versions */
+int BN_generate_prime_ex(BIGNUM *ret,int bits,int safe, const BIGNUM *add,
+ const BIGNUM *rem, BN_GENCB *cb);
+int BN_is_prime_ex(const BIGNUM *p,int nchecks, BN_CTX *ctx, BN_GENCB *cb);
+int BN_is_prime_fasttest_ex(const BIGNUM *p,int nchecks, BN_CTX *ctx,
+ int do_trial_division, BN_GENCB *cb);
+
+int BN_X931_generate_Xpq(BIGNUM *Xp, BIGNUM *Xq, int nbits, BN_CTX *ctx);
+
+int BN_X931_derive_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2,
+ const BIGNUM *Xp, const BIGNUM *Xp1, const BIGNUM *Xp2,
+ const BIGNUM *e, BN_CTX *ctx, BN_GENCB *cb);
+int BN_X931_generate_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2,
+ BIGNUM *Xp1, BIGNUM *Xp2,
+ const BIGNUM *Xp,
+ const BIGNUM *e, BN_CTX *ctx,
+ BN_GENCB *cb);
+
+BN_MONT_CTX *BN_MONT_CTX_new(void );
+void BN_MONT_CTX_init(BN_MONT_CTX *ctx);
+int BN_mod_mul_montgomery(BIGNUM *r,const BIGNUM *a,const BIGNUM *b,
+ BN_MONT_CTX *mont, BN_CTX *ctx);
+#define BN_to_montgomery(r,a,mont,ctx) BN_mod_mul_montgomery(\
+ (r),(a),&((mont)->RR),(mont),(ctx))
+int BN_from_montgomery(BIGNUM *r,const BIGNUM *a,
+ BN_MONT_CTX *mont, BN_CTX *ctx);
+void BN_MONT_CTX_free(BN_MONT_CTX *mont);
+int BN_MONT_CTX_set(BN_MONT_CTX *mont,const BIGNUM *mod,BN_CTX *ctx);
+BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to,BN_MONT_CTX *from);
+BN_MONT_CTX *BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, int lock,
+ const BIGNUM *mod, BN_CTX *ctx);
+
+/* BN_BLINDING flags */
+#define BN_BLINDING_NO_UPDATE 0x00000001
+#define BN_BLINDING_NO_RECREATE 0x00000002
+
+BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, BIGNUM *mod);
+void BN_BLINDING_free(BN_BLINDING *b);
+int BN_BLINDING_update(BN_BLINDING *b,BN_CTX *ctx);
+int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx);
+int BN_BLINDING_invert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx);
+int BN_BLINDING_convert_ex(BIGNUM *n, BIGNUM *r, BN_BLINDING *b, BN_CTX *);
+int BN_BLINDING_invert_ex(BIGNUM *n, const BIGNUM *r, BN_BLINDING *b, BN_CTX *);
+#ifndef OPENSSL_NO_DEPRECATED
+unsigned long BN_BLINDING_get_thread_id(const BN_BLINDING *);
+void BN_BLINDING_set_thread_id(BN_BLINDING *, unsigned long);
+#endif
+CRYPTO_THREADID *BN_BLINDING_thread_id(BN_BLINDING *);
+unsigned long BN_BLINDING_get_flags(const BN_BLINDING *);
+void BN_BLINDING_set_flags(BN_BLINDING *, unsigned long);
+BN_BLINDING *BN_BLINDING_create_param(BN_BLINDING *b,
+ const BIGNUM *e, BIGNUM *m, BN_CTX *ctx,
+ int (*bn_mod_exp)(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx),
+ BN_MONT_CTX *m_ctx);
+
+#ifndef OPENSSL_NO_DEPRECATED
+void BN_set_params(int mul,int high,int low,int mont);
+int BN_get_params(int which); /* 0, mul, 1 high, 2 low, 3 mont */
+#endif
+
+void BN_RECP_CTX_init(BN_RECP_CTX *recp);
+BN_RECP_CTX *BN_RECP_CTX_new(void);
+void BN_RECP_CTX_free(BN_RECP_CTX *recp);
+int BN_RECP_CTX_set(BN_RECP_CTX *recp,const BIGNUM *rdiv,BN_CTX *ctx);
+int BN_mod_mul_reciprocal(BIGNUM *r, const BIGNUM *x, const BIGNUM *y,
+ BN_RECP_CTX *recp,BN_CTX *ctx);
+int BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx);
+int BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m,
+ BN_RECP_CTX *recp, BN_CTX *ctx);
+
+#ifndef OPENSSL_NO_EC2M
+
+/* Functions for arithmetic over binary polynomials represented by BIGNUMs.
+ *
+ * The BIGNUM::neg property of BIGNUMs representing binary polynomials is
+ * ignored.
+ *
+ * Note that input arguments are not const so that their bit arrays can
+ * be expanded to the appropriate size if needed.
+ */
+
+int BN_GF2m_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); /*r = a + b*/
+#define BN_GF2m_sub(r, a, b) BN_GF2m_add(r, a, b)
+int BN_GF2m_mod(BIGNUM *r, const BIGNUM *a, const BIGNUM *p); /*r=a mod p*/
+int BN_GF2m_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
+ const BIGNUM *p, BN_CTX *ctx); /* r = (a * b) mod p */
+int BN_GF2m_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ BN_CTX *ctx); /* r = (a * a) mod p */
+int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *b, const BIGNUM *p,
+ BN_CTX *ctx); /* r = (1 / b) mod p */
+int BN_GF2m_mod_div(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
+ const BIGNUM *p, BN_CTX *ctx); /* r = (a / b) mod p */
+int BN_GF2m_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
+ const BIGNUM *p, BN_CTX *ctx); /* r = (a ^ b) mod p */
+int BN_GF2m_mod_sqrt(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ BN_CTX *ctx); /* r = sqrt(a) mod p */
+int BN_GF2m_mod_solve_quad(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ BN_CTX *ctx); /* r^2 + r = a mod p */
+#define BN_GF2m_cmp(a, b) BN_ucmp((a), (b))
+/* Some functions allow for representation of the irreducible polynomials
+ * as an unsigned int[], say p. The irreducible f(t) is then of the form:
+ * t^p[0] + t^p[1] + ... + t^p[k]
+ * where m = p[0] > p[1] > ... > p[k] = 0.
+ */
+int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const int p[]);
+ /* r = a mod p */
+int BN_GF2m_mod_mul_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
+ const int p[], BN_CTX *ctx); /* r = (a * b) mod p */
+int BN_GF2m_mod_sqr_arr(BIGNUM *r, const BIGNUM *a, const int p[],
+ BN_CTX *ctx); /* r = (a * a) mod p */
+int BN_GF2m_mod_inv_arr(BIGNUM *r, const BIGNUM *b, const int p[],
+ BN_CTX *ctx); /* r = (1 / b) mod p */
+int BN_GF2m_mod_div_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
+ const int p[], BN_CTX *ctx); /* r = (a / b) mod p */
+int BN_GF2m_mod_exp_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
+ const int p[], BN_CTX *ctx); /* r = (a ^ b) mod p */
+int BN_GF2m_mod_sqrt_arr(BIGNUM *r, const BIGNUM *a,
+ const int p[], BN_CTX *ctx); /* r = sqrt(a) mod p */
+int BN_GF2m_mod_solve_quad_arr(BIGNUM *r, const BIGNUM *a,
+ const int p[], BN_CTX *ctx); /* r^2 + r = a mod p */
+int BN_GF2m_poly2arr(const BIGNUM *a, int p[], int max);
+int BN_GF2m_arr2poly(const int p[], BIGNUM *a);
+
+#endif
+
+/* faster mod functions for the 'NIST primes'
+ * 0 <= a < p^2 */
+int BN_nist_mod_192(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
+int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
+int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
+int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
+int BN_nist_mod_521(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx);
+
+const BIGNUM *BN_get0_nist_prime_192(void);
+const BIGNUM *BN_get0_nist_prime_224(void);
+const BIGNUM *BN_get0_nist_prime_256(void);
+const BIGNUM *BN_get0_nist_prime_384(void);
+const BIGNUM *BN_get0_nist_prime_521(void);
+
+int BN_generate_dsa_nonce(BIGNUM *out, const BIGNUM *range, const BIGNUM *priv,
+ const unsigned char *message, size_t message_len,
+ BN_CTX *ctx);
+
+/* library internal functions */
+
+#define bn_expand(a,bits) ((((((bits+BN_BITS2-1))/BN_BITS2)) <= (a)->dmax)?\
+ (a):bn_expand2((a),(bits+BN_BITS2-1)/BN_BITS2))
+#define bn_wexpand(a,words) (((words) <= (a)->dmax)?(a):bn_expand2((a),(words)))
+BIGNUM *bn_expand2(BIGNUM *a, int words);
+#ifndef OPENSSL_NO_DEPRECATED
+BIGNUM *bn_dup_expand(const BIGNUM *a, int words); /* unused */
+#endif
+
+/* Bignum consistency macros
+ * There is one "API" macro, bn_fix_top(), for stripping leading zeroes from
+ * bignum data after direct manipulations on the data. There is also an
+ * "internal" macro, bn_check_top(), for verifying that there are no leading
+ * zeroes. Unfortunately, some auditing is required due to the fact that
+ * bn_fix_top() has become an overabused duct-tape because bignum data is
+ * occasionally passed around in an inconsistent state. So the following
+ * changes have been made to sort this out;
+ * - bn_fix_top()s implementation has been moved to bn_correct_top()
+ * - if BN_DEBUG isn't defined, bn_fix_top() maps to bn_correct_top(), and
+ * bn_check_top() is as before.
+ * - if BN_DEBUG *is* defined;
+ * - bn_check_top() tries to pollute unused words even if the bignum 'top' is
+ * consistent. (ed: only if BN_DEBUG_RAND is defined)
+ * - bn_fix_top() maps to bn_check_top() rather than "fixing" anything.
+ * The idea is to have debug builds flag up inconsistent bignums when they
+ * occur. If that occurs in a bn_fix_top(), we examine the code in question; if
+ * the use of bn_fix_top() was appropriate (ie. it follows directly after code
+ * that manipulates the bignum) it is converted to bn_correct_top(), and if it
+ * was not appropriate, we convert it permanently to bn_check_top() and track
+ * down the cause of the bug. Eventually, no internal code should be using the
+ * bn_fix_top() macro. External applications and libraries should try this with
+ * their own code too, both in terms of building against the openssl headers
+ * with BN_DEBUG defined *and* linking with a version of OpenSSL built with it
+ * defined. This not only improves external code, it provides more test
+ * coverage for openssl's own code.
+ */
+
+#ifdef BN_DEBUG
+
+/* We only need assert() when debugging */
+#include <assert.h>
+
+#ifdef BN_DEBUG_RAND
+/* To avoid "make update" cvs wars due to BN_DEBUG, use some tricks */
+#ifndef RAND_pseudo_bytes
+int RAND_pseudo_bytes(unsigned char *buf,int num);
+#define BN_DEBUG_TRIX
+#endif
+#define bn_pollute(a) \
+ do { \
+ const BIGNUM *_bnum1 = (a); \
+ if(_bnum1->top < _bnum1->dmax) { \
+ unsigned char _tmp_char; \
+ /* We cast away const without the compiler knowing, any \
+ * *genuinely* constant variables that aren't mutable \
+ * wouldn't be constructed with top!=dmax. */ \
+ BN_ULONG *_not_const; \
+ memcpy(&_not_const, &_bnum1->d, sizeof(BN_ULONG*)); \
+ RAND_pseudo_bytes(&_tmp_char, 1); \
+ memset((unsigned char *)(_not_const + _bnum1->top), _tmp_char, \
+ (_bnum1->dmax - _bnum1->top) * sizeof(BN_ULONG)); \
+ } \
+ } while(0)
+#ifdef BN_DEBUG_TRIX
+#undef RAND_pseudo_bytes
+#endif
+#else
+#define bn_pollute(a)
+#endif
+#define bn_check_top(a) \
+ do { \
+ const BIGNUM *_bnum2 = (a); \
+ if (_bnum2 != NULL) { \
+ assert((_bnum2->top == 0) || \
+ (_bnum2->d[_bnum2->top - 1] != 0)); \
+ bn_pollute(_bnum2); \
+ } \
+ } while(0)
+
+#define bn_fix_top(a) bn_check_top(a)
+
+#define bn_check_size(bn, bits) bn_wcheck_size(bn, ((bits+BN_BITS2-1))/BN_BITS2)
+#define bn_wcheck_size(bn, words) \
+ do { \
+ const BIGNUM *_bnum2 = (bn); \
+ assert(words <= (_bnum2)->dmax && words >= (_bnum2)->top); \
+ } while(0)
+
+#else /* !BN_DEBUG */
+
+#define bn_pollute(a)
+#define bn_check_top(a)
+#define bn_fix_top(a) bn_correct_top(a)
+#define bn_check_size(bn, bits)
+#define bn_wcheck_size(bn, words)
+
+#endif
+
+#define bn_correct_top(a) \
+ { \
+ BN_ULONG *ftl; \
+ int tmp_top = (a)->top; \
+ if (tmp_top > 0) \
+ { \
+ for (ftl= &((a)->d[tmp_top-1]); tmp_top > 0; tmp_top--) \
+ if (*(ftl--)) break; \
+ (a)->top = tmp_top; \
+ } \
+ bn_pollute(a); \
+ }
+
+BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w);
+BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w);
+void bn_sqr_words(BN_ULONG *rp, const BN_ULONG *ap, int num);
+BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d);
+BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int num);
+BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int num);
+
+/* Primes from RFC 2409 */
+BIGNUM *get_rfc2409_prime_768(BIGNUM *bn);
+BIGNUM *get_rfc2409_prime_1024(BIGNUM *bn);
+
+/* Primes from RFC 3526 */
+BIGNUM *get_rfc3526_prime_1536(BIGNUM *bn);
+BIGNUM *get_rfc3526_prime_2048(BIGNUM *bn);
+BIGNUM *get_rfc3526_prime_3072(BIGNUM *bn);
+BIGNUM *get_rfc3526_prime_4096(BIGNUM *bn);
+BIGNUM *get_rfc3526_prime_6144(BIGNUM *bn);
+BIGNUM *get_rfc3526_prime_8192(BIGNUM *bn);
+
+int BN_bntest_rand(BIGNUM *rnd, int bits, int top,int bottom);
+
+/* BEGIN ERROR CODES */
+/* The following lines are auto generated by the script mkerr.pl. Any changes
+ * made after this point may be overwritten when the script is next run.
+ */
+void ERR_load_BN_strings(void);
+
+/* Error codes for the BN functions. */
+
+/* Function codes. */
+#define BN_F_BNRAND 127
+#define BN_F_BN_BLINDING_CONVERT_EX 100
+#define BN_F_BN_BLINDING_CREATE_PARAM 128
+#define BN_F_BN_BLINDING_INVERT_EX 101
+#define BN_F_BN_BLINDING_NEW 102
+#define BN_F_BN_BLINDING_UPDATE 103
+#define BN_F_BN_BN2DEC 104
+#define BN_F_BN_BN2HEX 105
+#define BN_F_BN_CTX_GET 116
+#define BN_F_BN_CTX_NEW 106
+#define BN_F_BN_CTX_START 129
+#define BN_F_BN_DIV 107
+#define BN_F_BN_DIV_NO_BRANCH 138
+#define BN_F_BN_DIV_RECP 130
+#define BN_F_BN_EXP 123
+#define BN_F_BN_EXPAND2 108
+#define BN_F_BN_EXPAND_INTERNAL 120
+#define BN_F_BN_GENERATE_DSA_NONCE 140
+#define BN_F_BN_GF2M_MOD 131
+#define BN_F_BN_GF2M_MOD_EXP 132
+#define BN_F_BN_GF2M_MOD_MUL 133
+#define BN_F_BN_GF2M_MOD_SOLVE_QUAD 134
+#define BN_F_BN_GF2M_MOD_SOLVE_QUAD_ARR 135
+#define BN_F_BN_GF2M_MOD_SQR 136
+#define BN_F_BN_GF2M_MOD_SQRT 137
+#define BN_F_BN_MOD_EXP2_MONT 118
+#define BN_F_BN_MOD_EXP_MONT 109
+#define BN_F_BN_MOD_EXP_MONT_CONSTTIME 124
+#define BN_F_BN_MOD_EXP_MONT_WORD 117
+#define BN_F_BN_MOD_EXP_RECP 125
+#define BN_F_BN_MOD_EXP_SIMPLE 126
+#define BN_F_BN_MOD_INVERSE 110
+#define BN_F_BN_MOD_INVERSE_NO_BRANCH 139
+#define BN_F_BN_MOD_LSHIFT_QUICK 119
+#define BN_F_BN_MOD_MUL_RECIPROCAL 111
+#define BN_F_BN_MOD_SQRT 121
+#define BN_F_BN_MPI2BN 112
+#define BN_F_BN_NEW 113
+#define BN_F_BN_RAND 114
+#define BN_F_BN_RAND_RANGE 122
+#define BN_F_BN_USUB 115
+
+/* Reason codes. */
+#define BN_R_ARG2_LT_ARG3 100
+#define BN_R_BAD_RECIPROCAL 101
+#define BN_R_BIGNUM_TOO_LONG 114
+#define BN_R_CALLED_WITH_EVEN_MODULUS 102
+#define BN_R_DIV_BY_ZERO 103
+#define BN_R_ENCODING_ERROR 104
+#define BN_R_EXPAND_ON_STATIC_BIGNUM_DATA 105
+#define BN_R_INPUT_NOT_REDUCED 110
+#define BN_R_INVALID_LENGTH 106
+#define BN_R_INVALID_RANGE 115
+#define BN_R_NOT_A_SQUARE 111
+#define BN_R_NOT_INITIALIZED 107
+#define BN_R_NO_INVERSE 108
+#define BN_R_NO_SOLUTION 116
+#define BN_R_PRIVATE_KEY_TOO_LARGE 117
+#define BN_R_P_IS_NOT_PRIME 112
+#define BN_R_TOO_MANY_ITERATIONS 113
+#define BN_R_TOO_MANY_TEMPORARY_VARIABLES 109
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn.mul b/ics-openvpn-stripped/main/openssl/crypto/bn/bn.mul
new file mode 100644
index 00000000..9728870d
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn.mul
@@ -0,0 +1,19 @@
+We need
+
+* bn_mul_comba8
+* bn_mul_comba4
+* bn_mul_normal
+* bn_mul_recursive
+
+* bn_sqr_comba8
+* bn_sqr_comba4
+bn_sqr_normal -> BN_sqr
+* bn_sqr_recursive
+
+* bn_mul_low_recursive
+* bn_mul_low_normal
+* bn_mul_high
+
+* bn_mul_part_recursive # symetric but not power of 2
+
+bn_mul_asymetric_recursive # uneven, but do the chop up.
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_add.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_add.c
new file mode 100644
index 00000000..94051637
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_add.c
@@ -0,0 +1,313 @@
+/* crypto/bn/bn_add.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+/* r can == a or b */
+int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b)
+ {
+ const BIGNUM *tmp;
+ int a_neg = a->neg, ret;
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ /* a + b a+b
+ * a + -b a-b
+ * -a + b b-a
+ * -a + -b -(a+b)
+ */
+ if (a_neg ^ b->neg)
+ {
+ /* only one is negative */
+ if (a_neg)
+ { tmp=a; a=b; b=tmp; }
+
+ /* we are now a - b */
+
+ if (BN_ucmp(a,b) < 0)
+ {
+ if (!BN_usub(r,b,a)) return(0);
+ r->neg=1;
+ }
+ else
+ {
+ if (!BN_usub(r,a,b)) return(0);
+ r->neg=0;
+ }
+ return(1);
+ }
+
+ ret = BN_uadd(r,a,b);
+ r->neg = a_neg;
+ bn_check_top(r);
+ return ret;
+ }
+
+/* unsigned add of b to a */
+int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b)
+ {
+ int max,min,dif;
+ BN_ULONG *ap,*bp,*rp,carry,t1,t2;
+ const BIGNUM *tmp;
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ if (a->top < b->top)
+ { tmp=a; a=b; b=tmp; }
+ max = a->top;
+ min = b->top;
+ dif = max - min;
+
+ if (bn_wexpand(r,max+1) == NULL)
+ return 0;
+
+ r->top=max;
+
+
+ ap=a->d;
+ bp=b->d;
+ rp=r->d;
+
+ carry=bn_add_words(rp,ap,bp,min);
+ rp+=min;
+ ap+=min;
+ bp+=min;
+
+ if (carry)
+ {
+ while (dif)
+ {
+ dif--;
+ t1 = *(ap++);
+ t2 = (t1+1) & BN_MASK2;
+ *(rp++) = t2;
+ if (t2)
+ {
+ carry=0;
+ break;
+ }
+ }
+ if (carry)
+ {
+ /* carry != 0 => dif == 0 */
+ *rp = 1;
+ r->top++;
+ }
+ }
+ if (dif && rp != ap)
+ while (dif--)
+ /* copy remaining words if ap != rp */
+ *(rp++) = *(ap++);
+ r->neg = 0;
+ bn_check_top(r);
+ return 1;
+ }
+
+/* unsigned subtraction of b from a, a must be larger than b. */
+int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b)
+ {
+ int max,min,dif;
+ register BN_ULONG t1,t2,*ap,*bp,*rp;
+ int i,carry;
+#if defined(IRIX_CC_BUG) && !defined(LINT)
+ int dummy;
+#endif
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ max = a->top;
+ min = b->top;
+ dif = max - min;
+
+ if (dif < 0) /* hmm... should not be happening */
+ {
+ BNerr(BN_F_BN_USUB,BN_R_ARG2_LT_ARG3);
+ return(0);
+ }
+
+ if (bn_wexpand(r,max) == NULL) return(0);
+
+ ap=a->d;
+ bp=b->d;
+ rp=r->d;
+
+#if 1
+ carry=0;
+ for (i = min; i != 0; i--)
+ {
+ t1= *(ap++);
+ t2= *(bp++);
+ if (carry)
+ {
+ carry=(t1 <= t2);
+ t1=(t1-t2-1)&BN_MASK2;
+ }
+ else
+ {
+ carry=(t1 < t2);
+ t1=(t1-t2)&BN_MASK2;
+ }
+#if defined(IRIX_CC_BUG) && !defined(LINT)
+ dummy=t1;
+#endif
+ *(rp++)=t1&BN_MASK2;
+ }
+#else
+ carry=bn_sub_words(rp,ap,bp,min);
+ ap+=min;
+ bp+=min;
+ rp+=min;
+#endif
+ if (carry) /* subtracted */
+ {
+ if (!dif)
+ /* error: a < b */
+ return 0;
+ while (dif)
+ {
+ dif--;
+ t1 = *(ap++);
+ t2 = (t1-1)&BN_MASK2;
+ *(rp++) = t2;
+ if (t1)
+ break;
+ }
+ }
+#if 0
+ memcpy(rp,ap,sizeof(*rp)*(max-i));
+#else
+ if (rp != ap)
+ {
+ for (;;)
+ {
+ if (!dif--) break;
+ rp[0]=ap[0];
+ if (!dif--) break;
+ rp[1]=ap[1];
+ if (!dif--) break;
+ rp[2]=ap[2];
+ if (!dif--) break;
+ rp[3]=ap[3];
+ rp+=4;
+ ap+=4;
+ }
+ }
+#endif
+
+ r->top=max;
+ r->neg=0;
+ bn_correct_top(r);
+ return(1);
+ }
+
+int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b)
+ {
+ int max;
+ int add=0,neg=0;
+ const BIGNUM *tmp;
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ /* a - b a-b
+ * a - -b a+b
+ * -a - b -(a+b)
+ * -a - -b b-a
+ */
+ if (a->neg)
+ {
+ if (b->neg)
+ { tmp=a; a=b; b=tmp; }
+ else
+ { add=1; neg=1; }
+ }
+ else
+ {
+ if (b->neg) { add=1; neg=0; }
+ }
+
+ if (add)
+ {
+ if (!BN_uadd(r,a,b)) return(0);
+ r->neg=neg;
+ return(1);
+ }
+
+ /* We are actually doing a - b :-) */
+
+ max=(a->top > b->top)?a->top:b->top;
+ if (bn_wexpand(r,max) == NULL) return(0);
+ if (BN_ucmp(a,b) < 0)
+ {
+ if (!BN_usub(r,b,a)) return(0);
+ r->neg=1;
+ }
+ else
+ {
+ if (!BN_usub(r,a,b)) return(0);
+ r->neg=0;
+ }
+ bn_check_top(r);
+ return(1);
+ }
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_asm.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_asm.c
new file mode 100644
index 00000000..c43c91cc
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_asm.c
@@ -0,0 +1,1030 @@
+/* crypto/bn/bn_asm.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#ifndef BN_DEBUG
+# undef NDEBUG /* avoid conflicting definitions */
+# define NDEBUG
+#endif
+
+#include <stdio.h>
+#include <assert.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+#if defined(BN_LLONG) || defined(BN_UMULT_HIGH)
+
+BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
+ {
+ BN_ULONG c1=0;
+
+ assert(num >= 0);
+ if (num <= 0) return(c1);
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (num&~3)
+ {
+ mul_add(rp[0],ap[0],w,c1);
+ mul_add(rp[1],ap[1],w,c1);
+ mul_add(rp[2],ap[2],w,c1);
+ mul_add(rp[3],ap[3],w,c1);
+ ap+=4; rp+=4; num-=4;
+ }
+#endif
+ while (num)
+ {
+ mul_add(rp[0],ap[0],w,c1);
+ ap++; rp++; num--;
+ }
+
+ return(c1);
+ }
+
+BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
+ {
+ BN_ULONG c1=0;
+
+ assert(num >= 0);
+ if (num <= 0) return(c1);
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (num&~3)
+ {
+ mul(rp[0],ap[0],w,c1);
+ mul(rp[1],ap[1],w,c1);
+ mul(rp[2],ap[2],w,c1);
+ mul(rp[3],ap[3],w,c1);
+ ap+=4; rp+=4; num-=4;
+ }
+#endif
+ while (num)
+ {
+ mul(rp[0],ap[0],w,c1);
+ ap++; rp++; num--;
+ }
+ return(c1);
+ }
+
+void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
+ {
+ assert(n >= 0);
+ if (n <= 0) return;
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (n&~3)
+ {
+ sqr(r[0],r[1],a[0]);
+ sqr(r[2],r[3],a[1]);
+ sqr(r[4],r[5],a[2]);
+ sqr(r[6],r[7],a[3]);
+ a+=4; r+=8; n-=4;
+ }
+#endif
+ while (n)
+ {
+ sqr(r[0],r[1],a[0]);
+ a++; r+=2; n--;
+ }
+ }
+
+#else /* !(defined(BN_LLONG) || defined(BN_UMULT_HIGH)) */
+
+BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
+ {
+ BN_ULONG c=0;
+ BN_ULONG bl,bh;
+
+ assert(num >= 0);
+ if (num <= 0) return((BN_ULONG)0);
+
+ bl=LBITS(w);
+ bh=HBITS(w);
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (num&~3)
+ {
+ mul_add(rp[0],ap[0],bl,bh,c);
+ mul_add(rp[1],ap[1],bl,bh,c);
+ mul_add(rp[2],ap[2],bl,bh,c);
+ mul_add(rp[3],ap[3],bl,bh,c);
+ ap+=4; rp+=4; num-=4;
+ }
+#endif
+ while (num)
+ {
+ mul_add(rp[0],ap[0],bl,bh,c);
+ ap++; rp++; num--;
+ }
+ return(c);
+ }
+
+BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
+ {
+ BN_ULONG carry=0;
+ BN_ULONG bl,bh;
+
+ assert(num >= 0);
+ if (num <= 0) return((BN_ULONG)0);
+
+ bl=LBITS(w);
+ bh=HBITS(w);
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (num&~3)
+ {
+ mul(rp[0],ap[0],bl,bh,carry);
+ mul(rp[1],ap[1],bl,bh,carry);
+ mul(rp[2],ap[2],bl,bh,carry);
+ mul(rp[3],ap[3],bl,bh,carry);
+ ap+=4; rp+=4; num-=4;
+ }
+#endif
+ while (num)
+ {
+ mul(rp[0],ap[0],bl,bh,carry);
+ ap++; rp++; num--;
+ }
+ return(carry);
+ }
+
+void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
+ {
+ assert(n >= 0);
+ if (n <= 0) return;
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (n&~3)
+ {
+ sqr64(r[0],r[1],a[0]);
+ sqr64(r[2],r[3],a[1]);
+ sqr64(r[4],r[5],a[2]);
+ sqr64(r[6],r[7],a[3]);
+ a+=4; r+=8; n-=4;
+ }
+#endif
+ while (n)
+ {
+ sqr64(r[0],r[1],a[0]);
+ a++; r+=2; n--;
+ }
+ }
+
+#endif /* !(defined(BN_LLONG) || defined(BN_UMULT_HIGH)) */
+
+#if defined(BN_LLONG) && defined(BN_DIV2W)
+
+BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
+ {
+ return((BN_ULONG)(((((BN_ULLONG)h)<<BN_BITS2)|l)/(BN_ULLONG)d));
+ }
+
+#else
+
+/* Divide h,l by d and return the result. */
+/* I need to test this some more :-( */
+BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
+ {
+ BN_ULONG dh,dl,q,ret=0,th,tl,t;
+ int i,count=2;
+
+ if (d == 0) return(BN_MASK2);
+
+ i=BN_num_bits_word(d);
+ assert((i == BN_BITS2) || (h <= (BN_ULONG)1<<i));
+
+ i=BN_BITS2-i;
+ if (h >= d) h-=d;
+
+ if (i)
+ {
+ d<<=i;
+ h=(h<<i)|(l>>(BN_BITS2-i));
+ l<<=i;
+ }
+ dh=(d&BN_MASK2h)>>BN_BITS4;
+ dl=(d&BN_MASK2l);
+ for (;;)
+ {
+ if ((h>>BN_BITS4) == dh)
+ q=BN_MASK2l;
+ else
+ q=h/dh;
+
+ th=q*dh;
+ tl=dl*q;
+ for (;;)
+ {
+ t=h-th;
+ if ((t&BN_MASK2h) ||
+ ((tl) <= (
+ (t<<BN_BITS4)|
+ ((l&BN_MASK2h)>>BN_BITS4))))
+ break;
+ q--;
+ th-=dh;
+ tl-=dl;
+ }
+ t=(tl>>BN_BITS4);
+ tl=(tl<<BN_BITS4)&BN_MASK2h;
+ th+=t;
+
+ if (l < tl) th++;
+ l-=tl;
+ if (h < th)
+ {
+ h+=d;
+ q--;
+ }
+ h-=th;
+
+ if (--count == 0) break;
+
+ ret=q<<BN_BITS4;
+ h=((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2;
+ l=(l&BN_MASK2l)<<BN_BITS4;
+ }
+ ret|=q;
+ return(ret);
+ }
+#endif /* !defined(BN_LLONG) && defined(BN_DIV2W) */
+
+#ifdef BN_LLONG
+BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n)
+ {
+ BN_ULLONG ll=0;
+
+ assert(n >= 0);
+ if (n <= 0) return((BN_ULONG)0);
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (n&~3)
+ {
+ ll+=(BN_ULLONG)a[0]+b[0];
+ r[0]=(BN_ULONG)ll&BN_MASK2;
+ ll>>=BN_BITS2;
+ ll+=(BN_ULLONG)a[1]+b[1];
+ r[1]=(BN_ULONG)ll&BN_MASK2;
+ ll>>=BN_BITS2;
+ ll+=(BN_ULLONG)a[2]+b[2];
+ r[2]=(BN_ULONG)ll&BN_MASK2;
+ ll>>=BN_BITS2;
+ ll+=(BN_ULLONG)a[3]+b[3];
+ r[3]=(BN_ULONG)ll&BN_MASK2;
+ ll>>=BN_BITS2;
+ a+=4; b+=4; r+=4; n-=4;
+ }
+#endif
+ while (n)
+ {
+ ll+=(BN_ULLONG)a[0]+b[0];
+ r[0]=(BN_ULONG)ll&BN_MASK2;
+ ll>>=BN_BITS2;
+ a++; b++; r++; n--;
+ }
+ return((BN_ULONG)ll);
+ }
+#else /* !BN_LLONG */
+BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n)
+ {
+ BN_ULONG c,l,t;
+
+ assert(n >= 0);
+ if (n <= 0) return((BN_ULONG)0);
+
+ c=0;
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (n&~3)
+ {
+ t=a[0];
+ t=(t+c)&BN_MASK2;
+ c=(t < c);
+ l=(t+b[0])&BN_MASK2;
+ c+=(l < t);
+ r[0]=l;
+ t=a[1];
+ t=(t+c)&BN_MASK2;
+ c=(t < c);
+ l=(t+b[1])&BN_MASK2;
+ c+=(l < t);
+ r[1]=l;
+ t=a[2];
+ t=(t+c)&BN_MASK2;
+ c=(t < c);
+ l=(t+b[2])&BN_MASK2;
+ c+=(l < t);
+ r[2]=l;
+ t=a[3];
+ t=(t+c)&BN_MASK2;
+ c=(t < c);
+ l=(t+b[3])&BN_MASK2;
+ c+=(l < t);
+ r[3]=l;
+ a+=4; b+=4; r+=4; n-=4;
+ }
+#endif
+ while(n)
+ {
+ t=a[0];
+ t=(t+c)&BN_MASK2;
+ c=(t < c);
+ l=(t+b[0])&BN_MASK2;
+ c+=(l < t);
+ r[0]=l;
+ a++; b++; r++; n--;
+ }
+ return((BN_ULONG)c);
+ }
+#endif /* !BN_LLONG */
+
+BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n)
+ {
+ BN_ULONG t1,t2;
+ int c=0;
+
+ assert(n >= 0);
+ if (n <= 0) return((BN_ULONG)0);
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
+ while (n&~3)
+ {
+ t1=a[0]; t2=b[0];
+ r[0]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ t1=a[1]; t2=b[1];
+ r[1]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ t1=a[2]; t2=b[2];
+ r[2]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ t1=a[3]; t2=b[3];
+ r[3]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ a+=4; b+=4; r+=4; n-=4;
+ }
+#endif
+ while (n)
+ {
+ t1=a[0]; t2=b[0];
+ r[0]=(t1-t2-c)&BN_MASK2;
+ if (t1 != t2) c=(t1 < t2);
+ a++; b++; r++; n--;
+ }
+ return(c);
+ }
+
+#if defined(BN_MUL_COMBA) && !defined(OPENSSL_SMALL_FOOTPRINT)
+
+#undef bn_mul_comba8
+#undef bn_mul_comba4
+#undef bn_sqr_comba8
+#undef bn_sqr_comba4
+
+/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
+/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
+/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
+/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
+
+#ifdef BN_LLONG
+#define mul_add_c(a,b,c0,c1,c2) \
+ t=(BN_ULLONG)a*b; \
+ t1=(BN_ULONG)Lw(t); \
+ t2=(BN_ULONG)Hw(t); \
+ c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
+ c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+
+#define mul_add_c2(a,b,c0,c1,c2) \
+ t=(BN_ULLONG)a*b; \
+ tt=(t+t)&BN_MASK; \
+ if (tt < t) c2++; \
+ t1=(BN_ULONG)Lw(tt); \
+ t2=(BN_ULONG)Hw(tt); \
+ c0=(c0+t1)&BN_MASK2; \
+ if ((c0 < t1) && (((++t2)&BN_MASK2) == 0)) c2++; \
+ c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+
+#define sqr_add_c(a,i,c0,c1,c2) \
+ t=(BN_ULLONG)a[i]*a[i]; \
+ t1=(BN_ULONG)Lw(t); \
+ t2=(BN_ULONG)Hw(t); \
+ c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
+ c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+
+#define sqr_add_c2(a,i,j,c0,c1,c2) \
+ mul_add_c2((a)[i],(a)[j],c0,c1,c2)
+
+#elif defined(BN_UMULT_LOHI)
+
+#define mul_add_c(a,b,c0,c1,c2) { \
+ BN_ULONG ta=(a),tb=(b); \
+ BN_UMULT_LOHI(t1,t2,ta,tb); \
+ c0 += t1; t2 += (c0<t1)?1:0; \
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ }
+
+#define mul_add_c2(a,b,c0,c1,c2) { \
+ BN_ULONG ta=(a),tb=(b),t0; \
+ BN_UMULT_LOHI(t0,t1,ta,tb); \
+ t2 = t1+t1; c2 += (t2<t1)?1:0; \
+ t1 = t0+t0; t2 += (t1<t0)?1:0; \
+ c0 += t1; t2 += (c0<t1)?1:0; \
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ }
+
+#define sqr_add_c(a,i,c0,c1,c2) { \
+ BN_ULONG ta=(a)[i]; \
+ BN_UMULT_LOHI(t1,t2,ta,ta); \
+ c0 += t1; t2 += (c0<t1)?1:0; \
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ }
+
+#define sqr_add_c2(a,i,j,c0,c1,c2) \
+ mul_add_c2((a)[i],(a)[j],c0,c1,c2)
+
+#elif defined(BN_UMULT_HIGH)
+
+#define mul_add_c(a,b,c0,c1,c2) { \
+ BN_ULONG ta=(a),tb=(b); \
+ t1 = ta * tb; \
+ t2 = BN_UMULT_HIGH(ta,tb); \
+ c0 += t1; t2 += (c0<t1)?1:0; \
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ }
+
+#define mul_add_c2(a,b,c0,c1,c2) { \
+ BN_ULONG ta=(a),tb=(b),t0; \
+ t1 = BN_UMULT_HIGH(ta,tb); \
+ t0 = ta * tb; \
+ t2 = t1+t1; c2 += (t2<t1)?1:0; \
+ t1 = t0+t0; t2 += (t1<t0)?1:0; \
+ c0 += t1; t2 += (c0<t1)?1:0; \
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ }
+
+#define sqr_add_c(a,i,c0,c1,c2) { \
+ BN_ULONG ta=(a)[i]; \
+ t1 = ta * ta; \
+ t2 = BN_UMULT_HIGH(ta,ta); \
+ c0 += t1; t2 += (c0<t1)?1:0; \
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ }
+
+#define sqr_add_c2(a,i,j,c0,c1,c2) \
+ mul_add_c2((a)[i],(a)[j],c0,c1,c2)
+
+#else /* !BN_LLONG */
+#define mul_add_c(a,b,c0,c1,c2) \
+ t1=LBITS(a); t2=HBITS(a); \
+ bl=LBITS(b); bh=HBITS(b); \
+ mul64(t1,t2,bl,bh); \
+ c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
+ c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+
+#define mul_add_c2(a,b,c0,c1,c2) \
+ t1=LBITS(a); t2=HBITS(a); \
+ bl=LBITS(b); bh=HBITS(b); \
+ mul64(t1,t2,bl,bh); \
+ if (t2 & BN_TBIT) c2++; \
+ t2=(t2+t2)&BN_MASK2; \
+ if (t1 & BN_TBIT) t2++; \
+ t1=(t1+t1)&BN_MASK2; \
+ c0=(c0+t1)&BN_MASK2; \
+ if ((c0 < t1) && (((++t2)&BN_MASK2) == 0)) c2++; \
+ c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+
+#define sqr_add_c(a,i,c0,c1,c2) \
+ sqr64(t1,t2,(a)[i]); \
+ c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
+ c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+
+#define sqr_add_c2(a,i,j,c0,c1,c2) \
+ mul_add_c2((a)[i],(a)[j],c0,c1,c2)
+#endif /* !BN_LLONG */
+
+void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+ {
+#ifdef BN_LLONG
+ BN_ULLONG t;
+#else
+ BN_ULONG bl,bh;
+#endif
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ mul_add_c(a[0],b[0],c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ mul_add_c(a[0],b[1],c2,c3,c1);
+ mul_add_c(a[1],b[0],c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ mul_add_c(a[2],b[0],c3,c1,c2);
+ mul_add_c(a[1],b[1],c3,c1,c2);
+ mul_add_c(a[0],b[2],c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ mul_add_c(a[0],b[3],c1,c2,c3);
+ mul_add_c(a[1],b[2],c1,c2,c3);
+ mul_add_c(a[2],b[1],c1,c2,c3);
+ mul_add_c(a[3],b[0],c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ mul_add_c(a[4],b[0],c2,c3,c1);
+ mul_add_c(a[3],b[1],c2,c3,c1);
+ mul_add_c(a[2],b[2],c2,c3,c1);
+ mul_add_c(a[1],b[3],c2,c3,c1);
+ mul_add_c(a[0],b[4],c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ mul_add_c(a[0],b[5],c3,c1,c2);
+ mul_add_c(a[1],b[4],c3,c1,c2);
+ mul_add_c(a[2],b[3],c3,c1,c2);
+ mul_add_c(a[3],b[2],c3,c1,c2);
+ mul_add_c(a[4],b[1],c3,c1,c2);
+ mul_add_c(a[5],b[0],c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ mul_add_c(a[6],b[0],c1,c2,c3);
+ mul_add_c(a[5],b[1],c1,c2,c3);
+ mul_add_c(a[4],b[2],c1,c2,c3);
+ mul_add_c(a[3],b[3],c1,c2,c3);
+ mul_add_c(a[2],b[4],c1,c2,c3);
+ mul_add_c(a[1],b[5],c1,c2,c3);
+ mul_add_c(a[0],b[6],c1,c2,c3);
+ r[6]=c1;
+ c1=0;
+ mul_add_c(a[0],b[7],c2,c3,c1);
+ mul_add_c(a[1],b[6],c2,c3,c1);
+ mul_add_c(a[2],b[5],c2,c3,c1);
+ mul_add_c(a[3],b[4],c2,c3,c1);
+ mul_add_c(a[4],b[3],c2,c3,c1);
+ mul_add_c(a[5],b[2],c2,c3,c1);
+ mul_add_c(a[6],b[1],c2,c3,c1);
+ mul_add_c(a[7],b[0],c2,c3,c1);
+ r[7]=c2;
+ c2=0;
+ mul_add_c(a[7],b[1],c3,c1,c2);
+ mul_add_c(a[6],b[2],c3,c1,c2);
+ mul_add_c(a[5],b[3],c3,c1,c2);
+ mul_add_c(a[4],b[4],c3,c1,c2);
+ mul_add_c(a[3],b[5],c3,c1,c2);
+ mul_add_c(a[2],b[6],c3,c1,c2);
+ mul_add_c(a[1],b[7],c3,c1,c2);
+ r[8]=c3;
+ c3=0;
+ mul_add_c(a[2],b[7],c1,c2,c3);
+ mul_add_c(a[3],b[6],c1,c2,c3);
+ mul_add_c(a[4],b[5],c1,c2,c3);
+ mul_add_c(a[5],b[4],c1,c2,c3);
+ mul_add_c(a[6],b[3],c1,c2,c3);
+ mul_add_c(a[7],b[2],c1,c2,c3);
+ r[9]=c1;
+ c1=0;
+ mul_add_c(a[7],b[3],c2,c3,c1);
+ mul_add_c(a[6],b[4],c2,c3,c1);
+ mul_add_c(a[5],b[5],c2,c3,c1);
+ mul_add_c(a[4],b[6],c2,c3,c1);
+ mul_add_c(a[3],b[7],c2,c3,c1);
+ r[10]=c2;
+ c2=0;
+ mul_add_c(a[4],b[7],c3,c1,c2);
+ mul_add_c(a[5],b[6],c3,c1,c2);
+ mul_add_c(a[6],b[5],c3,c1,c2);
+ mul_add_c(a[7],b[4],c3,c1,c2);
+ r[11]=c3;
+ c3=0;
+ mul_add_c(a[7],b[5],c1,c2,c3);
+ mul_add_c(a[6],b[6],c1,c2,c3);
+ mul_add_c(a[5],b[7],c1,c2,c3);
+ r[12]=c1;
+ c1=0;
+ mul_add_c(a[6],b[7],c2,c3,c1);
+ mul_add_c(a[7],b[6],c2,c3,c1);
+ r[13]=c2;
+ c2=0;
+ mul_add_c(a[7],b[7],c3,c1,c2);
+ r[14]=c3;
+ r[15]=c1;
+ }
+
+void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+ {
+#ifdef BN_LLONG
+ BN_ULLONG t;
+#else
+ BN_ULONG bl,bh;
+#endif
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ mul_add_c(a[0],b[0],c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ mul_add_c(a[0],b[1],c2,c3,c1);
+ mul_add_c(a[1],b[0],c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ mul_add_c(a[2],b[0],c3,c1,c2);
+ mul_add_c(a[1],b[1],c3,c1,c2);
+ mul_add_c(a[0],b[2],c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ mul_add_c(a[0],b[3],c1,c2,c3);
+ mul_add_c(a[1],b[2],c1,c2,c3);
+ mul_add_c(a[2],b[1],c1,c2,c3);
+ mul_add_c(a[3],b[0],c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ mul_add_c(a[3],b[1],c2,c3,c1);
+ mul_add_c(a[2],b[2],c2,c3,c1);
+ mul_add_c(a[1],b[3],c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ mul_add_c(a[2],b[3],c3,c1,c2);
+ mul_add_c(a[3],b[2],c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ mul_add_c(a[3],b[3],c1,c2,c3);
+ r[6]=c1;
+ r[7]=c2;
+ }
+
+void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
+ {
+#ifdef BN_LLONG
+ BN_ULLONG t,tt;
+#else
+ BN_ULONG bl,bh;
+#endif
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ sqr_add_c(a,0,c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ sqr_add_c2(a,1,0,c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ sqr_add_c(a,1,c3,c1,c2);
+ sqr_add_c2(a,2,0,c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ sqr_add_c2(a,3,0,c1,c2,c3);
+ sqr_add_c2(a,2,1,c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ sqr_add_c(a,2,c2,c3,c1);
+ sqr_add_c2(a,3,1,c2,c3,c1);
+ sqr_add_c2(a,4,0,c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ sqr_add_c2(a,5,0,c3,c1,c2);
+ sqr_add_c2(a,4,1,c3,c1,c2);
+ sqr_add_c2(a,3,2,c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ sqr_add_c(a,3,c1,c2,c3);
+ sqr_add_c2(a,4,2,c1,c2,c3);
+ sqr_add_c2(a,5,1,c1,c2,c3);
+ sqr_add_c2(a,6,0,c1,c2,c3);
+ r[6]=c1;
+ c1=0;
+ sqr_add_c2(a,7,0,c2,c3,c1);
+ sqr_add_c2(a,6,1,c2,c3,c1);
+ sqr_add_c2(a,5,2,c2,c3,c1);
+ sqr_add_c2(a,4,3,c2,c3,c1);
+ r[7]=c2;
+ c2=0;
+ sqr_add_c(a,4,c3,c1,c2);
+ sqr_add_c2(a,5,3,c3,c1,c2);
+ sqr_add_c2(a,6,2,c3,c1,c2);
+ sqr_add_c2(a,7,1,c3,c1,c2);
+ r[8]=c3;
+ c3=0;
+ sqr_add_c2(a,7,2,c1,c2,c3);
+ sqr_add_c2(a,6,3,c1,c2,c3);
+ sqr_add_c2(a,5,4,c1,c2,c3);
+ r[9]=c1;
+ c1=0;
+ sqr_add_c(a,5,c2,c3,c1);
+ sqr_add_c2(a,6,4,c2,c3,c1);
+ sqr_add_c2(a,7,3,c2,c3,c1);
+ r[10]=c2;
+ c2=0;
+ sqr_add_c2(a,7,4,c3,c1,c2);
+ sqr_add_c2(a,6,5,c3,c1,c2);
+ r[11]=c3;
+ c3=0;
+ sqr_add_c(a,6,c1,c2,c3);
+ sqr_add_c2(a,7,5,c1,c2,c3);
+ r[12]=c1;
+ c1=0;
+ sqr_add_c2(a,7,6,c2,c3,c1);
+ r[13]=c2;
+ c2=0;
+ sqr_add_c(a,7,c3,c1,c2);
+ r[14]=c3;
+ r[15]=c1;
+ }
+
+void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
+ {
+#ifdef BN_LLONG
+ BN_ULLONG t,tt;
+#else
+ BN_ULONG bl,bh;
+#endif
+ BN_ULONG t1,t2;
+ BN_ULONG c1,c2,c3;
+
+ c1=0;
+ c2=0;
+ c3=0;
+ sqr_add_c(a,0,c1,c2,c3);
+ r[0]=c1;
+ c1=0;
+ sqr_add_c2(a,1,0,c2,c3,c1);
+ r[1]=c2;
+ c2=0;
+ sqr_add_c(a,1,c3,c1,c2);
+ sqr_add_c2(a,2,0,c3,c1,c2);
+ r[2]=c3;
+ c3=0;
+ sqr_add_c2(a,3,0,c1,c2,c3);
+ sqr_add_c2(a,2,1,c1,c2,c3);
+ r[3]=c1;
+ c1=0;
+ sqr_add_c(a,2,c2,c3,c1);
+ sqr_add_c2(a,3,1,c2,c3,c1);
+ r[4]=c2;
+ c2=0;
+ sqr_add_c2(a,3,2,c3,c1,c2);
+ r[5]=c3;
+ c3=0;
+ sqr_add_c(a,3,c1,c2,c3);
+ r[6]=c1;
+ r[7]=c2;
+ }
+
+#ifdef OPENSSL_NO_ASM
+#ifdef OPENSSL_BN_ASM_MONT
+#include <alloca.h>
+/*
+ * This is essentially reference implementation, which may or may not
+ * result in performance improvement. E.g. on IA-32 this routine was
+ * observed to give 40% faster rsa1024 private key operations and 10%
+ * faster rsa4096 ones, while on AMD64 it improves rsa1024 sign only
+ * by 10% and *worsens* rsa4096 sign by 15%. Once again, it's a
+ * reference implementation, one to be used as starting point for
+ * platform-specific assembler. Mentioned numbers apply to compiler
+ * generated code compiled with and without -DOPENSSL_BN_ASM_MONT and
+ * can vary not only from platform to platform, but even for compiler
+ * versions. Assembler vs. assembler improvement coefficients can
+ * [and are known to] differ and are to be documented elsewhere.
+ */
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0p, int num)
+ {
+ BN_ULONG c0,c1,ml,*tp,n0;
+#ifdef mul64
+ BN_ULONG mh;
+#endif
+ volatile BN_ULONG *vp;
+ int i=0,j;
+
+#if 0 /* template for platform-specific implementation */
+ if (ap==bp) return bn_sqr_mont(rp,ap,np,n0p,num);
+#endif
+ vp = tp = alloca((num+2)*sizeof(BN_ULONG));
+
+ n0 = *n0p;
+
+ c0 = 0;
+ ml = bp[0];
+#ifdef mul64
+ mh = HBITS(ml);
+ ml = LBITS(ml);
+ for (j=0;j<num;++j)
+ mul(tp[j],ap[j],ml,mh,c0);
+#else
+ for (j=0;j<num;++j)
+ mul(tp[j],ap[j],ml,c0);
+#endif
+
+ tp[num] = c0;
+ tp[num+1] = 0;
+ goto enter;
+
+ for(i=0;i<num;i++)
+ {
+ c0 = 0;
+ ml = bp[i];
+#ifdef mul64
+ mh = HBITS(ml);
+ ml = LBITS(ml);
+ for (j=0;j<num;++j)
+ mul_add(tp[j],ap[j],ml,mh,c0);
+#else
+ for (j=0;j<num;++j)
+ mul_add(tp[j],ap[j],ml,c0);
+#endif
+ c1 = (tp[num] + c0)&BN_MASK2;
+ tp[num] = c1;
+ tp[num+1] = (c1<c0?1:0);
+ enter:
+ c1 = tp[0];
+ ml = (c1*n0)&BN_MASK2;
+ c0 = 0;
+#ifdef mul64
+ mh = HBITS(ml);
+ ml = LBITS(ml);
+ mul_add(c1,np[0],ml,mh,c0);
+#else
+ mul_add(c1,ml,np[0],c0);
+#endif
+ for(j=1;j<num;j++)
+ {
+ c1 = tp[j];
+#ifdef mul64
+ mul_add(c1,np[j],ml,mh,c0);
+#else
+ mul_add(c1,ml,np[j],c0);
+#endif
+ tp[j-1] = c1&BN_MASK2;
+ }
+ c1 = (tp[num] + c0)&BN_MASK2;
+ tp[num-1] = c1;
+ tp[num] = tp[num+1] + (c1<c0?1:0);
+ }
+
+ if (tp[num]!=0 || tp[num-1]>=np[num-1])
+ {
+ c0 = bn_sub_words(rp,tp,np,num);
+ if (tp[num]!=0 || c0==0)
+ {
+ for(i=0;i<num+2;i++) vp[i] = 0;
+ return 1;
+ }
+ }
+ for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0;
+ vp[num] = 0;
+ vp[num+1] = 0;
+ return 1;
+ }
+#else
+/*
+ * Return value of 0 indicates that multiplication/convolution was not
+ * performed to signal the caller to fall down to alternative/original
+ * code-path.
+ */
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num)
+{ return 0; }
+#endif /* OPENSSL_BN_ASM_MONT */
+#endif
+
+#else /* !BN_MUL_COMBA */
+
+/* hmm... is it faster just to do a multiply? */
+#undef bn_sqr_comba4
+void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
+ {
+ BN_ULONG t[8];
+ bn_sqr_normal(r,a,4,t);
+ }
+
+#undef bn_sqr_comba8
+void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
+ {
+ BN_ULONG t[16];
+ bn_sqr_normal(r,a,8,t);
+ }
+
+void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+ {
+ r[4]=bn_mul_words( &(r[0]),a,4,b[0]);
+ r[5]=bn_mul_add_words(&(r[1]),a,4,b[1]);
+ r[6]=bn_mul_add_words(&(r[2]),a,4,b[2]);
+ r[7]=bn_mul_add_words(&(r[3]),a,4,b[3]);
+ }
+
+void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+ {
+ r[ 8]=bn_mul_words( &(r[0]),a,8,b[0]);
+ r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]);
+ r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]);
+ r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]);
+ r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]);
+ r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]);
+ r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]);
+ r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]);
+ }
+
+#ifdef OPENSSL_NO_ASM
+#ifdef OPENSSL_BN_ASM_MONT
+#include <alloca.h>
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0p, int num)
+ {
+ BN_ULONG c0,c1,*tp,n0=*n0p;
+ volatile BN_ULONG *vp;
+ int i=0,j;
+
+ vp = tp = alloca((num+2)*sizeof(BN_ULONG));
+
+ for(i=0;i<=num;i++) tp[i]=0;
+
+ for(i=0;i<num;i++)
+ {
+ c0 = bn_mul_add_words(tp,ap,num,bp[i]);
+ c1 = (tp[num] + c0)&BN_MASK2;
+ tp[num] = c1;
+ tp[num+1] = (c1<c0?1:0);
+
+ c0 = bn_mul_add_words(tp,np,num,tp[0]*n0);
+ c1 = (tp[num] + c0)&BN_MASK2;
+ tp[num] = c1;
+ tp[num+1] += (c1<c0?1:0);
+ for(j=0;j<=num;j++) tp[j]=tp[j+1];
+ }
+
+ if (tp[num]!=0 || tp[num-1]>=np[num-1])
+ {
+ c0 = bn_sub_words(rp,tp,np,num);
+ if (tp[num]!=0 || c0==0)
+ {
+ for(i=0;i<num+2;i++) vp[i] = 0;
+ return 1;
+ }
+ }
+ for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0;
+ vp[num] = 0;
+ vp[num+1] = 0;
+ return 1;
+ }
+#else
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num)
+{ return 0; }
+#endif /* OPENSSL_BN_ASM_MONT */
+#endif
+
+#endif /* !BN_MUL_COMBA */
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_blind.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_blind.c
new file mode 100644
index 00000000..9ed8bc2b
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_blind.c
@@ -0,0 +1,385 @@
+/* crypto/bn/bn_blind.c */
+/* ====================================================================
+ * Copyright (c) 1998-2006 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+#define BN_BLINDING_COUNTER 32
+
+struct bn_blinding_st
+ {
+ BIGNUM *A;
+ BIGNUM *Ai;
+ BIGNUM *e;
+ BIGNUM *mod; /* just a reference */
+#ifndef OPENSSL_NO_DEPRECATED
+ unsigned long thread_id; /* added in OpenSSL 0.9.6j and 0.9.7b;
+ * used only by crypto/rsa/rsa_eay.c, rsa_lib.c */
+#endif
+ CRYPTO_THREADID tid;
+ int counter;
+ unsigned long flags;
+ BN_MONT_CTX *m_ctx;
+ int (*bn_mod_exp)(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx,
+ BN_MONT_CTX *m_ctx);
+ };
+
+BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, BIGNUM *mod)
+ {
+ BN_BLINDING *ret=NULL;
+
+ bn_check_top(mod);
+
+ if ((ret=(BN_BLINDING *)OPENSSL_malloc(sizeof(BN_BLINDING))) == NULL)
+ {
+ BNerr(BN_F_BN_BLINDING_NEW,ERR_R_MALLOC_FAILURE);
+ return(NULL);
+ }
+ memset(ret,0,sizeof(BN_BLINDING));
+ if (A != NULL)
+ {
+ if ((ret->A = BN_dup(A)) == NULL) goto err;
+ }
+ if (Ai != NULL)
+ {
+ if ((ret->Ai = BN_dup(Ai)) == NULL) goto err;
+ }
+
+ /* save a copy of mod in the BN_BLINDING structure */
+ if ((ret->mod = BN_dup(mod)) == NULL) goto err;
+ if (BN_get_flags(mod, BN_FLG_CONSTTIME) != 0)
+ BN_set_flags(ret->mod, BN_FLG_CONSTTIME);
+
+ /* Set the counter to the special value -1
+ * to indicate that this is never-used fresh blinding
+ * that does not need updating before first use. */
+ ret->counter = -1;
+ CRYPTO_THREADID_current(&ret->tid);
+ return(ret);
+err:
+ if (ret != NULL) BN_BLINDING_free(ret);
+ return(NULL);
+ }
+
+void BN_BLINDING_free(BN_BLINDING *r)
+ {
+ if(r == NULL)
+ return;
+
+ if (r->A != NULL) BN_free(r->A );
+ if (r->Ai != NULL) BN_free(r->Ai);
+ if (r->e != NULL) BN_free(r->e );
+ if (r->mod != NULL) BN_free(r->mod);
+ OPENSSL_free(r);
+ }
+
+int BN_BLINDING_update(BN_BLINDING *b, BN_CTX *ctx)
+ {
+ int ret=0;
+
+ if ((b->A == NULL) || (b->Ai == NULL))
+ {
+ BNerr(BN_F_BN_BLINDING_UPDATE,BN_R_NOT_INITIALIZED);
+ goto err;
+ }
+
+ if (b->counter == -1)
+ b->counter = 0;
+
+ if (++b->counter == BN_BLINDING_COUNTER && b->e != NULL &&
+ !(b->flags & BN_BLINDING_NO_RECREATE))
+ {
+ /* re-create blinding parameters */
+ if (!BN_BLINDING_create_param(b, NULL, NULL, ctx, NULL, NULL))
+ goto err;
+ }
+ else if (!(b->flags & BN_BLINDING_NO_UPDATE))
+ {
+ if (!BN_mod_mul(b->A,b->A,b->A,b->mod,ctx)) goto err;
+ if (!BN_mod_mul(b->Ai,b->Ai,b->Ai,b->mod,ctx)) goto err;
+ }
+
+ ret=1;
+err:
+ if (b->counter == BN_BLINDING_COUNTER)
+ b->counter = 0;
+ return(ret);
+ }
+
+int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx)
+ {
+ return BN_BLINDING_convert_ex(n, NULL, b, ctx);
+ }
+
+int BN_BLINDING_convert_ex(BIGNUM *n, BIGNUM *r, BN_BLINDING *b, BN_CTX *ctx)
+ {
+ int ret = 1;
+
+ bn_check_top(n);
+
+ if ((b->A == NULL) || (b->Ai == NULL))
+ {
+ BNerr(BN_F_BN_BLINDING_CONVERT_EX,BN_R_NOT_INITIALIZED);
+ return(0);
+ }
+
+ if (b->counter == -1)
+ /* Fresh blinding, doesn't need updating. */
+ b->counter = 0;
+ else if (!BN_BLINDING_update(b,ctx))
+ return(0);
+
+ if (r != NULL)
+ {
+ if (!BN_copy(r, b->Ai)) ret=0;
+ }
+
+ if (!BN_mod_mul(n,n,b->A,b->mod,ctx)) ret=0;
+
+ return ret;
+ }
+
+int BN_BLINDING_invert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx)
+ {
+ return BN_BLINDING_invert_ex(n, NULL, b, ctx);
+ }
+
+int BN_BLINDING_invert_ex(BIGNUM *n, const BIGNUM *r, BN_BLINDING *b, BN_CTX *ctx)
+ {
+ int ret;
+
+ bn_check_top(n);
+
+ if (r != NULL)
+ ret = BN_mod_mul(n, n, r, b->mod, ctx);
+ else
+ {
+ if (b->Ai == NULL)
+ {
+ BNerr(BN_F_BN_BLINDING_INVERT_EX,BN_R_NOT_INITIALIZED);
+ return(0);
+ }
+ ret = BN_mod_mul(n, n, b->Ai, b->mod, ctx);
+ }
+
+ bn_check_top(n);
+ return(ret);
+ }
+
+#ifndef OPENSSL_NO_DEPRECATED
+unsigned long BN_BLINDING_get_thread_id(const BN_BLINDING *b)
+ {
+ return b->thread_id;
+ }
+
+void BN_BLINDING_set_thread_id(BN_BLINDING *b, unsigned long n)
+ {
+ b->thread_id = n;
+ }
+#endif
+
+CRYPTO_THREADID *BN_BLINDING_thread_id(BN_BLINDING *b)
+ {
+ return &b->tid;
+ }
+
+unsigned long BN_BLINDING_get_flags(const BN_BLINDING *b)
+ {
+ return b->flags;
+ }
+
+void BN_BLINDING_set_flags(BN_BLINDING *b, unsigned long flags)
+ {
+ b->flags = flags;
+ }
+
+BN_BLINDING *BN_BLINDING_create_param(BN_BLINDING *b,
+ const BIGNUM *e, BIGNUM *m, BN_CTX *ctx,
+ int (*bn_mod_exp)(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx),
+ BN_MONT_CTX *m_ctx)
+{
+ int retry_counter = 32;
+ BN_BLINDING *ret = NULL;
+
+ if (b == NULL)
+ ret = BN_BLINDING_new(NULL, NULL, m);
+ else
+ ret = b;
+
+ if (ret == NULL)
+ goto err;
+
+ if (ret->A == NULL && (ret->A = BN_new()) == NULL)
+ goto err;
+ if (ret->Ai == NULL && (ret->Ai = BN_new()) == NULL)
+ goto err;
+
+ if (e != NULL)
+ {
+ if (ret->e != NULL)
+ BN_free(ret->e);
+ ret->e = BN_dup(e);
+ }
+ if (ret->e == NULL)
+ goto err;
+
+ if (bn_mod_exp != NULL)
+ ret->bn_mod_exp = bn_mod_exp;
+ if (m_ctx != NULL)
+ ret->m_ctx = m_ctx;
+
+ do {
+ if (!BN_rand_range(ret->A, ret->mod)) goto err;
+ if (BN_mod_inverse(ret->Ai, ret->A, ret->mod, ctx) == NULL)
+ {
+ /* this should almost never happen for good RSA keys */
+ unsigned long error = ERR_peek_last_error();
+ if (ERR_GET_REASON(error) == BN_R_NO_INVERSE)
+ {
+ if (retry_counter-- == 0)
+ {
+ BNerr(BN_F_BN_BLINDING_CREATE_PARAM,
+ BN_R_TOO_MANY_ITERATIONS);
+ goto err;
+ }
+ ERR_clear_error();
+ }
+ else
+ goto err;
+ }
+ else
+ break;
+ } while (1);
+
+ if (ret->bn_mod_exp != NULL && ret->m_ctx != NULL)
+ {
+ if (!ret->bn_mod_exp(ret->A, ret->A, ret->e, ret->mod, ctx, ret->m_ctx))
+ goto err;
+ }
+ else
+ {
+ if (!BN_mod_exp(ret->A, ret->A, ret->e, ret->mod, ctx))
+ goto err;
+ }
+
+ return ret;
+err:
+ if (b == NULL && ret != NULL)
+ {
+ BN_BLINDING_free(ret);
+ ret = NULL;
+ }
+
+ return ret;
+}
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_const.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_const.c
new file mode 100755
index 00000000..eb60a25b
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_const.c
@@ -0,0 +1,402 @@
+/* crypto/bn/knownprimes.c */
+/* Insert boilerplate */
+
+#include "bn.h"
+
+/* "First Oakley Default Group" from RFC2409, section 6.1.
+ *
+ * The prime is: 2^768 - 2 ^704 - 1 + 2^64 * { [2^638 pi] + 149686 }
+ *
+ * RFC2409 specifies a generator of 2.
+ * RFC2412 specifies a generator of of 22.
+ */
+
+BIGNUM *get_rfc2409_prime_768(BIGNUM *bn)
+ {
+ static const unsigned char RFC2409_PRIME_768[]={
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xC9,0x0F,0xDA,0xA2,
+ 0x21,0x68,0xC2,0x34,0xC4,0xC6,0x62,0x8B,0x80,0xDC,0x1C,0xD1,
+ 0x29,0x02,0x4E,0x08,0x8A,0x67,0xCC,0x74,0x02,0x0B,0xBE,0xA6,
+ 0x3B,0x13,0x9B,0x22,0x51,0x4A,0x08,0x79,0x8E,0x34,0x04,0xDD,
+ 0xEF,0x95,0x19,0xB3,0xCD,0x3A,0x43,0x1B,0x30,0x2B,0x0A,0x6D,
+ 0xF2,0x5F,0x14,0x37,0x4F,0xE1,0x35,0x6D,0x6D,0x51,0xC2,0x45,
+ 0xE4,0x85,0xB5,0x76,0x62,0x5E,0x7E,0xC6,0xF4,0x4C,0x42,0xE9,
+ 0xA6,0x3A,0x36,0x20,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ };
+ return BN_bin2bn(RFC2409_PRIME_768,sizeof(RFC2409_PRIME_768),bn);
+ }
+
+/* "Second Oakley Default Group" from RFC2409, section 6.2.
+ *
+ * The prime is: 2^1024 - 2^960 - 1 + 2^64 * { [2^894 pi] + 129093 }.
+ *
+ * RFC2409 specifies a generator of 2.
+ * RFC2412 specifies a generator of 22.
+ */
+
+BIGNUM *get_rfc2409_prime_1024(BIGNUM *bn)
+ {
+ static const unsigned char RFC2409_PRIME_1024[]={
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xC9,0x0F,0xDA,0xA2,
+ 0x21,0x68,0xC2,0x34,0xC4,0xC6,0x62,0x8B,0x80,0xDC,0x1C,0xD1,
+ 0x29,0x02,0x4E,0x08,0x8A,0x67,0xCC,0x74,0x02,0x0B,0xBE,0xA6,
+ 0x3B,0x13,0x9B,0x22,0x51,0x4A,0x08,0x79,0x8E,0x34,0x04,0xDD,
+ 0xEF,0x95,0x19,0xB3,0xCD,0x3A,0x43,0x1B,0x30,0x2B,0x0A,0x6D,
+ 0xF2,0x5F,0x14,0x37,0x4F,0xE1,0x35,0x6D,0x6D,0x51,0xC2,0x45,
+ 0xE4,0x85,0xB5,0x76,0x62,0x5E,0x7E,0xC6,0xF4,0x4C,0x42,0xE9,
+ 0xA6,0x37,0xED,0x6B,0x0B,0xFF,0x5C,0xB6,0xF4,0x06,0xB7,0xED,
+ 0xEE,0x38,0x6B,0xFB,0x5A,0x89,0x9F,0xA5,0xAE,0x9F,0x24,0x11,
+ 0x7C,0x4B,0x1F,0xE6,0x49,0x28,0x66,0x51,0xEC,0xE6,0x53,0x81,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ };
+ return BN_bin2bn(RFC2409_PRIME_1024,sizeof(RFC2409_PRIME_1024),bn);
+ }
+
+/* "1536-bit MODP Group" from RFC3526, Section 2.
+ *
+ * The prime is: 2^1536 - 2^1472 - 1 + 2^64 * { [2^1406 pi] + 741804 }
+ *
+ * RFC3526 specifies a generator of 2.
+ * RFC2312 specifies a generator of 22.
+ */
+
+BIGNUM *get_rfc3526_prime_1536(BIGNUM *bn)
+ {
+ static const unsigned char RFC3526_PRIME_1536[]={
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xC9,0x0F,0xDA,0xA2,
+ 0x21,0x68,0xC2,0x34,0xC4,0xC6,0x62,0x8B,0x80,0xDC,0x1C,0xD1,
+ 0x29,0x02,0x4E,0x08,0x8A,0x67,0xCC,0x74,0x02,0x0B,0xBE,0xA6,
+ 0x3B,0x13,0x9B,0x22,0x51,0x4A,0x08,0x79,0x8E,0x34,0x04,0xDD,
+ 0xEF,0x95,0x19,0xB3,0xCD,0x3A,0x43,0x1B,0x30,0x2B,0x0A,0x6D,
+ 0xF2,0x5F,0x14,0x37,0x4F,0xE1,0x35,0x6D,0x6D,0x51,0xC2,0x45,
+ 0xE4,0x85,0xB5,0x76,0x62,0x5E,0x7E,0xC6,0xF4,0x4C,0x42,0xE9,
+ 0xA6,0x37,0xED,0x6B,0x0B,0xFF,0x5C,0xB6,0xF4,0x06,0xB7,0xED,
+ 0xEE,0x38,0x6B,0xFB,0x5A,0x89,0x9F,0xA5,0xAE,0x9F,0x24,0x11,
+ 0x7C,0x4B,0x1F,0xE6,0x49,0x28,0x66,0x51,0xEC,0xE4,0x5B,0x3D,
+ 0xC2,0x00,0x7C,0xB8,0xA1,0x63,0xBF,0x05,0x98,0xDA,0x48,0x36,
+ 0x1C,0x55,0xD3,0x9A,0x69,0x16,0x3F,0xA8,0xFD,0x24,0xCF,0x5F,
+ 0x83,0x65,0x5D,0x23,0xDC,0xA3,0xAD,0x96,0x1C,0x62,0xF3,0x56,
+ 0x20,0x85,0x52,0xBB,0x9E,0xD5,0x29,0x07,0x70,0x96,0x96,0x6D,
+ 0x67,0x0C,0x35,0x4E,0x4A,0xBC,0x98,0x04,0xF1,0x74,0x6C,0x08,
+ 0xCA,0x23,0x73,0x27,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ };
+ return BN_bin2bn(RFC3526_PRIME_1536,sizeof(RFC3526_PRIME_1536),bn);
+ }
+
+/* "2048-bit MODP Group" from RFC3526, Section 3.
+ *
+ * The prime is: 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476 }
+ *
+ * RFC3526 specifies a generator of 2.
+ */
+
+BIGNUM *get_rfc3526_prime_2048(BIGNUM *bn)
+ {
+ static const unsigned char RFC3526_PRIME_2048[]={
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xC9,0x0F,0xDA,0xA2,
+ 0x21,0x68,0xC2,0x34,0xC4,0xC6,0x62,0x8B,0x80,0xDC,0x1C,0xD1,
+ 0x29,0x02,0x4E,0x08,0x8A,0x67,0xCC,0x74,0x02,0x0B,0xBE,0xA6,
+ 0x3B,0x13,0x9B,0x22,0x51,0x4A,0x08,0x79,0x8E,0x34,0x04,0xDD,
+ 0xEF,0x95,0x19,0xB3,0xCD,0x3A,0x43,0x1B,0x30,0x2B,0x0A,0x6D,
+ 0xF2,0x5F,0x14,0x37,0x4F,0xE1,0x35,0x6D,0x6D,0x51,0xC2,0x45,
+ 0xE4,0x85,0xB5,0x76,0x62,0x5E,0x7E,0xC6,0xF4,0x4C,0x42,0xE9,
+ 0xA6,0x37,0xED,0x6B,0x0B,0xFF,0x5C,0xB6,0xF4,0x06,0xB7,0xED,
+ 0xEE,0x38,0x6B,0xFB,0x5A,0x89,0x9F,0xA5,0xAE,0x9F,0x24,0x11,
+ 0x7C,0x4B,0x1F,0xE6,0x49,0x28,0x66,0x51,0xEC,0xE4,0x5B,0x3D,
+ 0xC2,0x00,0x7C,0xB8,0xA1,0x63,0xBF,0x05,0x98,0xDA,0x48,0x36,
+ 0x1C,0x55,0xD3,0x9A,0x69,0x16,0x3F,0xA8,0xFD,0x24,0xCF,0x5F,
+ 0x83,0x65,0x5D,0x23,0xDC,0xA3,0xAD,0x96,0x1C,0x62,0xF3,0x56,
+ 0x20,0x85,0x52,0xBB,0x9E,0xD5,0x29,0x07,0x70,0x96,0x96,0x6D,
+ 0x67,0x0C,0x35,0x4E,0x4A,0xBC,0x98,0x04,0xF1,0x74,0x6C,0x08,
+ 0xCA,0x18,0x21,0x7C,0x32,0x90,0x5E,0x46,0x2E,0x36,0xCE,0x3B,
+ 0xE3,0x9E,0x77,0x2C,0x18,0x0E,0x86,0x03,0x9B,0x27,0x83,0xA2,
+ 0xEC,0x07,0xA2,0x8F,0xB5,0xC5,0x5D,0xF0,0x6F,0x4C,0x52,0xC9,
+ 0xDE,0x2B,0xCB,0xF6,0x95,0x58,0x17,0x18,0x39,0x95,0x49,0x7C,
+ 0xEA,0x95,0x6A,0xE5,0x15,0xD2,0x26,0x18,0x98,0xFA,0x05,0x10,
+ 0x15,0x72,0x8E,0x5A,0x8A,0xAC,0xAA,0x68,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,
+ };
+ return BN_bin2bn(RFC3526_PRIME_2048,sizeof(RFC3526_PRIME_2048),bn);
+ }
+
+/* "3072-bit MODP Group" from RFC3526, Section 4.
+ *
+ * The prime is: 2^3072 - 2^3008 - 1 + 2^64 * { [2^2942 pi] + 1690314 }
+ *
+ * RFC3526 specifies a generator of 2.
+ */
+
+BIGNUM *get_rfc3526_prime_3072(BIGNUM *bn)
+ {
+ static const unsigned char RFC3526_PRIME_3072[]={
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xC9,0x0F,0xDA,0xA2,
+ 0x21,0x68,0xC2,0x34,0xC4,0xC6,0x62,0x8B,0x80,0xDC,0x1C,0xD1,
+ 0x29,0x02,0x4E,0x08,0x8A,0x67,0xCC,0x74,0x02,0x0B,0xBE,0xA6,
+ 0x3B,0x13,0x9B,0x22,0x51,0x4A,0x08,0x79,0x8E,0x34,0x04,0xDD,
+ 0xEF,0x95,0x19,0xB3,0xCD,0x3A,0x43,0x1B,0x30,0x2B,0x0A,0x6D,
+ 0xF2,0x5F,0x14,0x37,0x4F,0xE1,0x35,0x6D,0x6D,0x51,0xC2,0x45,
+ 0xE4,0x85,0xB5,0x76,0x62,0x5E,0x7E,0xC6,0xF4,0x4C,0x42,0xE9,
+ 0xA6,0x37,0xED,0x6B,0x0B,0xFF,0x5C,0xB6,0xF4,0x06,0xB7,0xED,
+ 0xEE,0x38,0x6B,0xFB,0x5A,0x89,0x9F,0xA5,0xAE,0x9F,0x24,0x11,
+ 0x7C,0x4B,0x1F,0xE6,0x49,0x28,0x66,0x51,0xEC,0xE4,0x5B,0x3D,
+ 0xC2,0x00,0x7C,0xB8,0xA1,0x63,0xBF,0x05,0x98,0xDA,0x48,0x36,
+ 0x1C,0x55,0xD3,0x9A,0x69,0x16,0x3F,0xA8,0xFD,0x24,0xCF,0x5F,
+ 0x83,0x65,0x5D,0x23,0xDC,0xA3,0xAD,0x96,0x1C,0x62,0xF3,0x56,
+ 0x20,0x85,0x52,0xBB,0x9E,0xD5,0x29,0x07,0x70,0x96,0x96,0x6D,
+ 0x67,0x0C,0x35,0x4E,0x4A,0xBC,0x98,0x04,0xF1,0x74,0x6C,0x08,
+ 0xCA,0x18,0x21,0x7C,0x32,0x90,0x5E,0x46,0x2E,0x36,0xCE,0x3B,
+ 0xE3,0x9E,0x77,0x2C,0x18,0x0E,0x86,0x03,0x9B,0x27,0x83,0xA2,
+ 0xEC,0x07,0xA2,0x8F,0xB5,0xC5,0x5D,0xF0,0x6F,0x4C,0x52,0xC9,
+ 0xDE,0x2B,0xCB,0xF6,0x95,0x58,0x17,0x18,0x39,0x95,0x49,0x7C,
+ 0xEA,0x95,0x6A,0xE5,0x15,0xD2,0x26,0x18,0x98,0xFA,0x05,0x10,
+ 0x15,0x72,0x8E,0x5A,0x8A,0xAA,0xC4,0x2D,0xAD,0x33,0x17,0x0D,
+ 0x04,0x50,0x7A,0x33,0xA8,0x55,0x21,0xAB,0xDF,0x1C,0xBA,0x64,
+ 0xEC,0xFB,0x85,0x04,0x58,0xDB,0xEF,0x0A,0x8A,0xEA,0x71,0x57,
+ 0x5D,0x06,0x0C,0x7D,0xB3,0x97,0x0F,0x85,0xA6,0xE1,0xE4,0xC7,
+ 0xAB,0xF5,0xAE,0x8C,0xDB,0x09,0x33,0xD7,0x1E,0x8C,0x94,0xE0,
+ 0x4A,0x25,0x61,0x9D,0xCE,0xE3,0xD2,0x26,0x1A,0xD2,0xEE,0x6B,
+ 0xF1,0x2F,0xFA,0x06,0xD9,0x8A,0x08,0x64,0xD8,0x76,0x02,0x73,
+ 0x3E,0xC8,0x6A,0x64,0x52,0x1F,0x2B,0x18,0x17,0x7B,0x20,0x0C,
+ 0xBB,0xE1,0x17,0x57,0x7A,0x61,0x5D,0x6C,0x77,0x09,0x88,0xC0,
+ 0xBA,0xD9,0x46,0xE2,0x08,0xE2,0x4F,0xA0,0x74,0xE5,0xAB,0x31,
+ 0x43,0xDB,0x5B,0xFC,0xE0,0xFD,0x10,0x8E,0x4B,0x82,0xD1,0x20,
+ 0xA9,0x3A,0xD2,0xCA,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ };
+ return BN_bin2bn(RFC3526_PRIME_3072,sizeof(RFC3526_PRIME_3072),bn);
+ }
+
+/* "4096-bit MODP Group" from RFC3526, Section 5.
+ *
+ * The prime is: 2^4096 - 2^4032 - 1 + 2^64 * { [2^3966 pi] + 240904 }
+ *
+ * RFC3526 specifies a generator of 2.
+ */
+
+BIGNUM *get_rfc3526_prime_4096(BIGNUM *bn)
+ {
+ static const unsigned char RFC3526_PRIME_4096[]={
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xC9,0x0F,0xDA,0xA2,
+ 0x21,0x68,0xC2,0x34,0xC4,0xC6,0x62,0x8B,0x80,0xDC,0x1C,0xD1,
+ 0x29,0x02,0x4E,0x08,0x8A,0x67,0xCC,0x74,0x02,0x0B,0xBE,0xA6,
+ 0x3B,0x13,0x9B,0x22,0x51,0x4A,0x08,0x79,0x8E,0x34,0x04,0xDD,
+ 0xEF,0x95,0x19,0xB3,0xCD,0x3A,0x43,0x1B,0x30,0x2B,0x0A,0x6D,
+ 0xF2,0x5F,0x14,0x37,0x4F,0xE1,0x35,0x6D,0x6D,0x51,0xC2,0x45,
+ 0xE4,0x85,0xB5,0x76,0x62,0x5E,0x7E,0xC6,0xF4,0x4C,0x42,0xE9,
+ 0xA6,0x37,0xED,0x6B,0x0B,0xFF,0x5C,0xB6,0xF4,0x06,0xB7,0xED,
+ 0xEE,0x38,0x6B,0xFB,0x5A,0x89,0x9F,0xA5,0xAE,0x9F,0x24,0x11,
+ 0x7C,0x4B,0x1F,0xE6,0x49,0x28,0x66,0x51,0xEC,0xE4,0x5B,0x3D,
+ 0xC2,0x00,0x7C,0xB8,0xA1,0x63,0xBF,0x05,0x98,0xDA,0x48,0x36,
+ 0x1C,0x55,0xD3,0x9A,0x69,0x16,0x3F,0xA8,0xFD,0x24,0xCF,0x5F,
+ 0x83,0x65,0x5D,0x23,0xDC,0xA3,0xAD,0x96,0x1C,0x62,0xF3,0x56,
+ 0x20,0x85,0x52,0xBB,0x9E,0xD5,0x29,0x07,0x70,0x96,0x96,0x6D,
+ 0x67,0x0C,0x35,0x4E,0x4A,0xBC,0x98,0x04,0xF1,0x74,0x6C,0x08,
+ 0xCA,0x18,0x21,0x7C,0x32,0x90,0x5E,0x46,0x2E,0x36,0xCE,0x3B,
+ 0xE3,0x9E,0x77,0x2C,0x18,0x0E,0x86,0x03,0x9B,0x27,0x83,0xA2,
+ 0xEC,0x07,0xA2,0x8F,0xB5,0xC5,0x5D,0xF0,0x6F,0x4C,0x52,0xC9,
+ 0xDE,0x2B,0xCB,0xF6,0x95,0x58,0x17,0x18,0x39,0x95,0x49,0x7C,
+ 0xEA,0x95,0x6A,0xE5,0x15,0xD2,0x26,0x18,0x98,0xFA,0x05,0x10,
+ 0x15,0x72,0x8E,0x5A,0x8A,0xAA,0xC4,0x2D,0xAD,0x33,0x17,0x0D,
+ 0x04,0x50,0x7A,0x33,0xA8,0x55,0x21,0xAB,0xDF,0x1C,0xBA,0x64,
+ 0xEC,0xFB,0x85,0x04,0x58,0xDB,0xEF,0x0A,0x8A,0xEA,0x71,0x57,
+ 0x5D,0x06,0x0C,0x7D,0xB3,0x97,0x0F,0x85,0xA6,0xE1,0xE4,0xC7,
+ 0xAB,0xF5,0xAE,0x8C,0xDB,0x09,0x33,0xD7,0x1E,0x8C,0x94,0xE0,
+ 0x4A,0x25,0x61,0x9D,0xCE,0xE3,0xD2,0x26,0x1A,0xD2,0xEE,0x6B,
+ 0xF1,0x2F,0xFA,0x06,0xD9,0x8A,0x08,0x64,0xD8,0x76,0x02,0x73,
+ 0x3E,0xC8,0x6A,0x64,0x52,0x1F,0x2B,0x18,0x17,0x7B,0x20,0x0C,
+ 0xBB,0xE1,0x17,0x57,0x7A,0x61,0x5D,0x6C,0x77,0x09,0x88,0xC0,
+ 0xBA,0xD9,0x46,0xE2,0x08,0xE2,0x4F,0xA0,0x74,0xE5,0xAB,0x31,
+ 0x43,0xDB,0x5B,0xFC,0xE0,0xFD,0x10,0x8E,0x4B,0x82,0xD1,0x20,
+ 0xA9,0x21,0x08,0x01,0x1A,0x72,0x3C,0x12,0xA7,0x87,0xE6,0xD7,
+ 0x88,0x71,0x9A,0x10,0xBD,0xBA,0x5B,0x26,0x99,0xC3,0x27,0x18,
+ 0x6A,0xF4,0xE2,0x3C,0x1A,0x94,0x68,0x34,0xB6,0x15,0x0B,0xDA,
+ 0x25,0x83,0xE9,0xCA,0x2A,0xD4,0x4C,0xE8,0xDB,0xBB,0xC2,0xDB,
+ 0x04,0xDE,0x8E,0xF9,0x2E,0x8E,0xFC,0x14,0x1F,0xBE,0xCA,0xA6,
+ 0x28,0x7C,0x59,0x47,0x4E,0x6B,0xC0,0x5D,0x99,0xB2,0x96,0x4F,
+ 0xA0,0x90,0xC3,0xA2,0x23,0x3B,0xA1,0x86,0x51,0x5B,0xE7,0xED,
+ 0x1F,0x61,0x29,0x70,0xCE,0xE2,0xD7,0xAF,0xB8,0x1B,0xDD,0x76,
+ 0x21,0x70,0x48,0x1C,0xD0,0x06,0x91,0x27,0xD5,0xB0,0x5A,0xA9,
+ 0x93,0xB4,0xEA,0x98,0x8D,0x8F,0xDD,0xC1,0x86,0xFF,0xB7,0xDC,
+ 0x90,0xA6,0xC0,0x8F,0x4D,0xF4,0x35,0xC9,0x34,0x06,0x31,0x99,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ };
+ return BN_bin2bn(RFC3526_PRIME_4096,sizeof(RFC3526_PRIME_4096),bn);
+ }
+
+/* "6144-bit MODP Group" from RFC3526, Section 6.
+ *
+ * The prime is: 2^6144 - 2^6080 - 1 + 2^64 * { [2^6014 pi] + 929484 }
+ *
+ * RFC3526 specifies a generator of 2.
+ */
+
+BIGNUM *get_rfc3526_prime_6144(BIGNUM *bn)
+ {
+ static const unsigned char RFC3526_PRIME_6144[]={
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xC9,0x0F,0xDA,0xA2,
+ 0x21,0x68,0xC2,0x34,0xC4,0xC6,0x62,0x8B,0x80,0xDC,0x1C,0xD1,
+ 0x29,0x02,0x4E,0x08,0x8A,0x67,0xCC,0x74,0x02,0x0B,0xBE,0xA6,
+ 0x3B,0x13,0x9B,0x22,0x51,0x4A,0x08,0x79,0x8E,0x34,0x04,0xDD,
+ 0xEF,0x95,0x19,0xB3,0xCD,0x3A,0x43,0x1B,0x30,0x2B,0x0A,0x6D,
+ 0xF2,0x5F,0x14,0x37,0x4F,0xE1,0x35,0x6D,0x6D,0x51,0xC2,0x45,
+ 0xE4,0x85,0xB5,0x76,0x62,0x5E,0x7E,0xC6,0xF4,0x4C,0x42,0xE9,
+ 0xA6,0x37,0xED,0x6B,0x0B,0xFF,0x5C,0xB6,0xF4,0x06,0xB7,0xED,
+ 0xEE,0x38,0x6B,0xFB,0x5A,0x89,0x9F,0xA5,0xAE,0x9F,0x24,0x11,
+ 0x7C,0x4B,0x1F,0xE6,0x49,0x28,0x66,0x51,0xEC,0xE4,0x5B,0x3D,
+ 0xC2,0x00,0x7C,0xB8,0xA1,0x63,0xBF,0x05,0x98,0xDA,0x48,0x36,
+ 0x1C,0x55,0xD3,0x9A,0x69,0x16,0x3F,0xA8,0xFD,0x24,0xCF,0x5F,
+ 0x83,0x65,0x5D,0x23,0xDC,0xA3,0xAD,0x96,0x1C,0x62,0xF3,0x56,
+ 0x20,0x85,0x52,0xBB,0x9E,0xD5,0x29,0x07,0x70,0x96,0x96,0x6D,
+ 0x67,0x0C,0x35,0x4E,0x4A,0xBC,0x98,0x04,0xF1,0x74,0x6C,0x08,
+ 0xCA,0x18,0x21,0x7C,0x32,0x90,0x5E,0x46,0x2E,0x36,0xCE,0x3B,
+ 0xE3,0x9E,0x77,0x2C,0x18,0x0E,0x86,0x03,0x9B,0x27,0x83,0xA2,
+ 0xEC,0x07,0xA2,0x8F,0xB5,0xC5,0x5D,0xF0,0x6F,0x4C,0x52,0xC9,
+ 0xDE,0x2B,0xCB,0xF6,0x95,0x58,0x17,0x18,0x39,0x95,0x49,0x7C,
+ 0xEA,0x95,0x6A,0xE5,0x15,0xD2,0x26,0x18,0x98,0xFA,0x05,0x10,
+ 0x15,0x72,0x8E,0x5A,0x8A,0xAA,0xC4,0x2D,0xAD,0x33,0x17,0x0D,
+ 0x04,0x50,0x7A,0x33,0xA8,0x55,0x21,0xAB,0xDF,0x1C,0xBA,0x64,
+ 0xEC,0xFB,0x85,0x04,0x58,0xDB,0xEF,0x0A,0x8A,0xEA,0x71,0x57,
+ 0x5D,0x06,0x0C,0x7D,0xB3,0x97,0x0F,0x85,0xA6,0xE1,0xE4,0xC7,
+ 0xAB,0xF5,0xAE,0x8C,0xDB,0x09,0x33,0xD7,0x1E,0x8C,0x94,0xE0,
+ 0x4A,0x25,0x61,0x9D,0xCE,0xE3,0xD2,0x26,0x1A,0xD2,0xEE,0x6B,
+ 0xF1,0x2F,0xFA,0x06,0xD9,0x8A,0x08,0x64,0xD8,0x76,0x02,0x73,
+ 0x3E,0xC8,0x6A,0x64,0x52,0x1F,0x2B,0x18,0x17,0x7B,0x20,0x0C,
+ 0xBB,0xE1,0x17,0x57,0x7A,0x61,0x5D,0x6C,0x77,0x09,0x88,0xC0,
+ 0xBA,0xD9,0x46,0xE2,0x08,0xE2,0x4F,0xA0,0x74,0xE5,0xAB,0x31,
+ 0x43,0xDB,0x5B,0xFC,0xE0,0xFD,0x10,0x8E,0x4B,0x82,0xD1,0x20,
+ 0xA9,0x21,0x08,0x01,0x1A,0x72,0x3C,0x12,0xA7,0x87,0xE6,0xD7,
+ 0x88,0x71,0x9A,0x10,0xBD,0xBA,0x5B,0x26,0x99,0xC3,0x27,0x18,
+ 0x6A,0xF4,0xE2,0x3C,0x1A,0x94,0x68,0x34,0xB6,0x15,0x0B,0xDA,
+ 0x25,0x83,0xE9,0xCA,0x2A,0xD4,0x4C,0xE8,0xDB,0xBB,0xC2,0xDB,
+ 0x04,0xDE,0x8E,0xF9,0x2E,0x8E,0xFC,0x14,0x1F,0xBE,0xCA,0xA6,
+ 0x28,0x7C,0x59,0x47,0x4E,0x6B,0xC0,0x5D,0x99,0xB2,0x96,0x4F,
+ 0xA0,0x90,0xC3,0xA2,0x23,0x3B,0xA1,0x86,0x51,0x5B,0xE7,0xED,
+ 0x1F,0x61,0x29,0x70,0xCE,0xE2,0xD7,0xAF,0xB8,0x1B,0xDD,0x76,
+ 0x21,0x70,0x48,0x1C,0xD0,0x06,0x91,0x27,0xD5,0xB0,0x5A,0xA9,
+ 0x93,0xB4,0xEA,0x98,0x8D,0x8F,0xDD,0xC1,0x86,0xFF,0xB7,0xDC,
+ 0x90,0xA6,0xC0,0x8F,0x4D,0xF4,0x35,0xC9,0x34,0x02,0x84,0x92,
+ 0x36,0xC3,0xFA,0xB4,0xD2,0x7C,0x70,0x26,0xC1,0xD4,0xDC,0xB2,
+ 0x60,0x26,0x46,0xDE,0xC9,0x75,0x1E,0x76,0x3D,0xBA,0x37,0xBD,
+ 0xF8,0xFF,0x94,0x06,0xAD,0x9E,0x53,0x0E,0xE5,0xDB,0x38,0x2F,
+ 0x41,0x30,0x01,0xAE,0xB0,0x6A,0x53,0xED,0x90,0x27,0xD8,0x31,
+ 0x17,0x97,0x27,0xB0,0x86,0x5A,0x89,0x18,0xDA,0x3E,0xDB,0xEB,
+ 0xCF,0x9B,0x14,0xED,0x44,0xCE,0x6C,0xBA,0xCE,0xD4,0xBB,0x1B,
+ 0xDB,0x7F,0x14,0x47,0xE6,0xCC,0x25,0x4B,0x33,0x20,0x51,0x51,
+ 0x2B,0xD7,0xAF,0x42,0x6F,0xB8,0xF4,0x01,0x37,0x8C,0xD2,0xBF,
+ 0x59,0x83,0xCA,0x01,0xC6,0x4B,0x92,0xEC,0xF0,0x32,0xEA,0x15,
+ 0xD1,0x72,0x1D,0x03,0xF4,0x82,0xD7,0xCE,0x6E,0x74,0xFE,0xF6,
+ 0xD5,0x5E,0x70,0x2F,0x46,0x98,0x0C,0x82,0xB5,0xA8,0x40,0x31,
+ 0x90,0x0B,0x1C,0x9E,0x59,0xE7,0xC9,0x7F,0xBE,0xC7,0xE8,0xF3,
+ 0x23,0xA9,0x7A,0x7E,0x36,0xCC,0x88,0xBE,0x0F,0x1D,0x45,0xB7,
+ 0xFF,0x58,0x5A,0xC5,0x4B,0xD4,0x07,0xB2,0x2B,0x41,0x54,0xAA,
+ 0xCC,0x8F,0x6D,0x7E,0xBF,0x48,0xE1,0xD8,0x14,0xCC,0x5E,0xD2,
+ 0x0F,0x80,0x37,0xE0,0xA7,0x97,0x15,0xEE,0xF2,0x9B,0xE3,0x28,
+ 0x06,0xA1,0xD5,0x8B,0xB7,0xC5,0xDA,0x76,0xF5,0x50,0xAA,0x3D,
+ 0x8A,0x1F,0xBF,0xF0,0xEB,0x19,0xCC,0xB1,0xA3,0x13,0xD5,0x5C,
+ 0xDA,0x56,0xC9,0xEC,0x2E,0xF2,0x96,0x32,0x38,0x7F,0xE8,0xD7,
+ 0x6E,0x3C,0x04,0x68,0x04,0x3E,0x8F,0x66,0x3F,0x48,0x60,0xEE,
+ 0x12,0xBF,0x2D,0x5B,0x0B,0x74,0x74,0xD6,0xE6,0x94,0xF9,0x1E,
+ 0x6D,0xCC,0x40,0x24,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ };
+ return BN_bin2bn(RFC3526_PRIME_6144,sizeof(RFC3526_PRIME_6144),bn);
+ }
+
+/* "8192-bit MODP Group" from RFC3526, Section 7.
+ *
+ * The prime is: 2^8192 - 2^8128 - 1 + 2^64 * { [2^8062 pi] + 4743158 }
+ *
+ * RFC3526 specifies a generator of 2.
+ */
+
+BIGNUM *get_rfc3526_prime_8192(BIGNUM *bn)
+ {
+ static const unsigned char RFC3526_PRIME_8192[]={
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xC9,0x0F,0xDA,0xA2,
+ 0x21,0x68,0xC2,0x34,0xC4,0xC6,0x62,0x8B,0x80,0xDC,0x1C,0xD1,
+ 0x29,0x02,0x4E,0x08,0x8A,0x67,0xCC,0x74,0x02,0x0B,0xBE,0xA6,
+ 0x3B,0x13,0x9B,0x22,0x51,0x4A,0x08,0x79,0x8E,0x34,0x04,0xDD,
+ 0xEF,0x95,0x19,0xB3,0xCD,0x3A,0x43,0x1B,0x30,0x2B,0x0A,0x6D,
+ 0xF2,0x5F,0x14,0x37,0x4F,0xE1,0x35,0x6D,0x6D,0x51,0xC2,0x45,
+ 0xE4,0x85,0xB5,0x76,0x62,0x5E,0x7E,0xC6,0xF4,0x4C,0x42,0xE9,
+ 0xA6,0x37,0xED,0x6B,0x0B,0xFF,0x5C,0xB6,0xF4,0x06,0xB7,0xED,
+ 0xEE,0x38,0x6B,0xFB,0x5A,0x89,0x9F,0xA5,0xAE,0x9F,0x24,0x11,
+ 0x7C,0x4B,0x1F,0xE6,0x49,0x28,0x66,0x51,0xEC,0xE4,0x5B,0x3D,
+ 0xC2,0x00,0x7C,0xB8,0xA1,0x63,0xBF,0x05,0x98,0xDA,0x48,0x36,
+ 0x1C,0x55,0xD3,0x9A,0x69,0x16,0x3F,0xA8,0xFD,0x24,0xCF,0x5F,
+ 0x83,0x65,0x5D,0x23,0xDC,0xA3,0xAD,0x96,0x1C,0x62,0xF3,0x56,
+ 0x20,0x85,0x52,0xBB,0x9E,0xD5,0x29,0x07,0x70,0x96,0x96,0x6D,
+ 0x67,0x0C,0x35,0x4E,0x4A,0xBC,0x98,0x04,0xF1,0x74,0x6C,0x08,
+ 0xCA,0x18,0x21,0x7C,0x32,0x90,0x5E,0x46,0x2E,0x36,0xCE,0x3B,
+ 0xE3,0x9E,0x77,0x2C,0x18,0x0E,0x86,0x03,0x9B,0x27,0x83,0xA2,
+ 0xEC,0x07,0xA2,0x8F,0xB5,0xC5,0x5D,0xF0,0x6F,0x4C,0x52,0xC9,
+ 0xDE,0x2B,0xCB,0xF6,0x95,0x58,0x17,0x18,0x39,0x95,0x49,0x7C,
+ 0xEA,0x95,0x6A,0xE5,0x15,0xD2,0x26,0x18,0x98,0xFA,0x05,0x10,
+ 0x15,0x72,0x8E,0x5A,0x8A,0xAA,0xC4,0x2D,0xAD,0x33,0x17,0x0D,
+ 0x04,0x50,0x7A,0x33,0xA8,0x55,0x21,0xAB,0xDF,0x1C,0xBA,0x64,
+ 0xEC,0xFB,0x85,0x04,0x58,0xDB,0xEF,0x0A,0x8A,0xEA,0x71,0x57,
+ 0x5D,0x06,0x0C,0x7D,0xB3,0x97,0x0F,0x85,0xA6,0xE1,0xE4,0xC7,
+ 0xAB,0xF5,0xAE,0x8C,0xDB,0x09,0x33,0xD7,0x1E,0x8C,0x94,0xE0,
+ 0x4A,0x25,0x61,0x9D,0xCE,0xE3,0xD2,0x26,0x1A,0xD2,0xEE,0x6B,
+ 0xF1,0x2F,0xFA,0x06,0xD9,0x8A,0x08,0x64,0xD8,0x76,0x02,0x73,
+ 0x3E,0xC8,0x6A,0x64,0x52,0x1F,0x2B,0x18,0x17,0x7B,0x20,0x0C,
+ 0xBB,0xE1,0x17,0x57,0x7A,0x61,0x5D,0x6C,0x77,0x09,0x88,0xC0,
+ 0xBA,0xD9,0x46,0xE2,0x08,0xE2,0x4F,0xA0,0x74,0xE5,0xAB,0x31,
+ 0x43,0xDB,0x5B,0xFC,0xE0,0xFD,0x10,0x8E,0x4B,0x82,0xD1,0x20,
+ 0xA9,0x21,0x08,0x01,0x1A,0x72,0x3C,0x12,0xA7,0x87,0xE6,0xD7,
+ 0x88,0x71,0x9A,0x10,0xBD,0xBA,0x5B,0x26,0x99,0xC3,0x27,0x18,
+ 0x6A,0xF4,0xE2,0x3C,0x1A,0x94,0x68,0x34,0xB6,0x15,0x0B,0xDA,
+ 0x25,0x83,0xE9,0xCA,0x2A,0xD4,0x4C,0xE8,0xDB,0xBB,0xC2,0xDB,
+ 0x04,0xDE,0x8E,0xF9,0x2E,0x8E,0xFC,0x14,0x1F,0xBE,0xCA,0xA6,
+ 0x28,0x7C,0x59,0x47,0x4E,0x6B,0xC0,0x5D,0x99,0xB2,0x96,0x4F,
+ 0xA0,0x90,0xC3,0xA2,0x23,0x3B,0xA1,0x86,0x51,0x5B,0xE7,0xED,
+ 0x1F,0x61,0x29,0x70,0xCE,0xE2,0xD7,0xAF,0xB8,0x1B,0xDD,0x76,
+ 0x21,0x70,0x48,0x1C,0xD0,0x06,0x91,0x27,0xD5,0xB0,0x5A,0xA9,
+ 0x93,0xB4,0xEA,0x98,0x8D,0x8F,0xDD,0xC1,0x86,0xFF,0xB7,0xDC,
+ 0x90,0xA6,0xC0,0x8F,0x4D,0xF4,0x35,0xC9,0x34,0x02,0x84,0x92,
+ 0x36,0xC3,0xFA,0xB4,0xD2,0x7C,0x70,0x26,0xC1,0xD4,0xDC,0xB2,
+ 0x60,0x26,0x46,0xDE,0xC9,0x75,0x1E,0x76,0x3D,0xBA,0x37,0xBD,
+ 0xF8,0xFF,0x94,0x06,0xAD,0x9E,0x53,0x0E,0xE5,0xDB,0x38,0x2F,
+ 0x41,0x30,0x01,0xAE,0xB0,0x6A,0x53,0xED,0x90,0x27,0xD8,0x31,
+ 0x17,0x97,0x27,0xB0,0x86,0x5A,0x89,0x18,0xDA,0x3E,0xDB,0xEB,
+ 0xCF,0x9B,0x14,0xED,0x44,0xCE,0x6C,0xBA,0xCE,0xD4,0xBB,0x1B,
+ 0xDB,0x7F,0x14,0x47,0xE6,0xCC,0x25,0x4B,0x33,0x20,0x51,0x51,
+ 0x2B,0xD7,0xAF,0x42,0x6F,0xB8,0xF4,0x01,0x37,0x8C,0xD2,0xBF,
+ 0x59,0x83,0xCA,0x01,0xC6,0x4B,0x92,0xEC,0xF0,0x32,0xEA,0x15,
+ 0xD1,0x72,0x1D,0x03,0xF4,0x82,0xD7,0xCE,0x6E,0x74,0xFE,0xF6,
+ 0xD5,0x5E,0x70,0x2F,0x46,0x98,0x0C,0x82,0xB5,0xA8,0x40,0x31,
+ 0x90,0x0B,0x1C,0x9E,0x59,0xE7,0xC9,0x7F,0xBE,0xC7,0xE8,0xF3,
+ 0x23,0xA9,0x7A,0x7E,0x36,0xCC,0x88,0xBE,0x0F,0x1D,0x45,0xB7,
+ 0xFF,0x58,0x5A,0xC5,0x4B,0xD4,0x07,0xB2,0x2B,0x41,0x54,0xAA,
+ 0xCC,0x8F,0x6D,0x7E,0xBF,0x48,0xE1,0xD8,0x14,0xCC,0x5E,0xD2,
+ 0x0F,0x80,0x37,0xE0,0xA7,0x97,0x15,0xEE,0xF2,0x9B,0xE3,0x28,
+ 0x06,0xA1,0xD5,0x8B,0xB7,0xC5,0xDA,0x76,0xF5,0x50,0xAA,0x3D,
+ 0x8A,0x1F,0xBF,0xF0,0xEB,0x19,0xCC,0xB1,0xA3,0x13,0xD5,0x5C,
+ 0xDA,0x56,0xC9,0xEC,0x2E,0xF2,0x96,0x32,0x38,0x7F,0xE8,0xD7,
+ 0x6E,0x3C,0x04,0x68,0x04,0x3E,0x8F,0x66,0x3F,0x48,0x60,0xEE,
+ 0x12,0xBF,0x2D,0x5B,0x0B,0x74,0x74,0xD6,0xE6,0x94,0xF9,0x1E,
+ 0x6D,0xBE,0x11,0x59,0x74,0xA3,0x92,0x6F,0x12,0xFE,0xE5,0xE4,
+ 0x38,0x77,0x7C,0xB6,0xA9,0x32,0xDF,0x8C,0xD8,0xBE,0xC4,0xD0,
+ 0x73,0xB9,0x31,0xBA,0x3B,0xC8,0x32,0xB6,0x8D,0x9D,0xD3,0x00,
+ 0x74,0x1F,0xA7,0xBF,0x8A,0xFC,0x47,0xED,0x25,0x76,0xF6,0x93,
+ 0x6B,0xA4,0x24,0x66,0x3A,0xAB,0x63,0x9C,0x5A,0xE4,0xF5,0x68,
+ 0x34,0x23,0xB4,0x74,0x2B,0xF1,0xC9,0x78,0x23,0x8F,0x16,0xCB,
+ 0xE3,0x9D,0x65,0x2D,0xE3,0xFD,0xB8,0xBE,0xFC,0x84,0x8A,0xD9,
+ 0x22,0x22,0x2E,0x04,0xA4,0x03,0x7C,0x07,0x13,0xEB,0x57,0xA8,
+ 0x1A,0x23,0xF0,0xC7,0x34,0x73,0xFC,0x64,0x6C,0xEA,0x30,0x6B,
+ 0x4B,0xCB,0xC8,0x86,0x2F,0x83,0x85,0xDD,0xFA,0x9D,0x4B,0x7F,
+ 0xA2,0xC0,0x87,0xE8,0x79,0x68,0x33,0x03,0xED,0x5B,0xDD,0x3A,
+ 0x06,0x2B,0x3C,0xF5,0xB3,0xA2,0x78,0xA6,0x6D,0x2A,0x13,0xF8,
+ 0x3F,0x44,0xF8,0x2D,0xDF,0x31,0x0E,0xE0,0x74,0xAB,0x6A,0x36,
+ 0x45,0x97,0xE8,0x99,0xA0,0x25,0x5D,0xC1,0x64,0xF3,0x1C,0xC5,
+ 0x08,0x46,0x85,0x1D,0xF9,0xAB,0x48,0x19,0x5D,0xED,0x7E,0xA1,
+ 0xB1,0xD5,0x10,0xBD,0x7E,0xE7,0x4D,0x73,0xFA,0xF3,0x6B,0xC3,
+ 0x1E,0xCF,0xA2,0x68,0x35,0x90,0x46,0xF4,0xEB,0x87,0x9F,0x92,
+ 0x40,0x09,0x43,0x8B,0x48,0x1C,0x6C,0xD7,0x88,0x9A,0x00,0x2E,
+ 0xD5,0xEE,0x38,0x2B,0xC9,0x19,0x0D,0xA6,0xFC,0x02,0x6E,0x47,
+ 0x95,0x58,0xE4,0x47,0x56,0x77,0xE9,0xAA,0x9E,0x30,0x50,0xE2,
+ 0x76,0x56,0x94,0xDF,0xC8,0x1F,0x56,0xE8,0x80,0xB9,0x6E,0x71,
+ 0x60,0xC9,0x80,0xDD,0x98,0xED,0xD3,0xDF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,
+ };
+ return BN_bin2bn(RFC3526_PRIME_8192,sizeof(RFC3526_PRIME_8192),bn);
+ }
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_ctx.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_ctx.c
new file mode 100644
index 00000000..3f2256f6
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_ctx.c
@@ -0,0 +1,454 @@
+/* crypto/bn/bn_ctx.c */
+/* Written by Ulf Moeller for the OpenSSL project. */
+/* ====================================================================
+ * Copyright (c) 1998-2004 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+#if !defined(BN_CTX_DEBUG) && !defined(BN_DEBUG)
+#ifndef NDEBUG
+#define NDEBUG
+#endif
+#endif
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+/* TODO list
+ *
+ * 1. Check a bunch of "(words+1)" type hacks in various bignum functions and
+ * check they can be safely removed.
+ * - Check +1 and other ugliness in BN_from_montgomery()
+ *
+ * 2. Consider allowing a BN_new_ex() that, at least, lets you specify an
+ * appropriate 'block' size that will be honoured by bn_expand_internal() to
+ * prevent piddly little reallocations. OTOH, profiling bignum expansions in
+ * BN_CTX doesn't show this to be a big issue.
+ */
+
+/* How many bignums are in each "pool item"; */
+#define BN_CTX_POOL_SIZE 16
+/* The stack frame info is resizing, set a first-time expansion size; */
+#define BN_CTX_START_FRAMES 32
+
+/***********/
+/* BN_POOL */
+/***********/
+
+/* A bundle of bignums that can be linked with other bundles */
+typedef struct bignum_pool_item
+ {
+ /* The bignum values */
+ BIGNUM vals[BN_CTX_POOL_SIZE];
+ /* Linked-list admin */
+ struct bignum_pool_item *prev, *next;
+ } BN_POOL_ITEM;
+/* A linked-list of bignums grouped in bundles */
+typedef struct bignum_pool
+ {
+ /* Linked-list admin */
+ BN_POOL_ITEM *head, *current, *tail;
+ /* Stack depth and allocation size */
+ unsigned used, size;
+ } BN_POOL;
+static void BN_POOL_init(BN_POOL *);
+static void BN_POOL_finish(BN_POOL *);
+#ifndef OPENSSL_NO_DEPRECATED
+static void BN_POOL_reset(BN_POOL *);
+#endif
+static BIGNUM * BN_POOL_get(BN_POOL *);
+static void BN_POOL_release(BN_POOL *, unsigned int);
+
+/************/
+/* BN_STACK */
+/************/
+
+/* A wrapper to manage the "stack frames" */
+typedef struct bignum_ctx_stack
+ {
+ /* Array of indexes into the bignum stack */
+ unsigned int *indexes;
+ /* Number of stack frames, and the size of the allocated array */
+ unsigned int depth, size;
+ } BN_STACK;
+static void BN_STACK_init(BN_STACK *);
+static void BN_STACK_finish(BN_STACK *);
+#ifndef OPENSSL_NO_DEPRECATED
+static void BN_STACK_reset(BN_STACK *);
+#endif
+static int BN_STACK_push(BN_STACK *, unsigned int);
+static unsigned int BN_STACK_pop(BN_STACK *);
+
+/**********/
+/* BN_CTX */
+/**********/
+
+/* The opaque BN_CTX type */
+struct bignum_ctx
+ {
+ /* The bignum bundles */
+ BN_POOL pool;
+ /* The "stack frames", if you will */
+ BN_STACK stack;
+ /* The number of bignums currently assigned */
+ unsigned int used;
+ /* Depth of stack overflow */
+ int err_stack;
+ /* Block "gets" until an "end" (compatibility behaviour) */
+ int too_many;
+ };
+
+/* Enable this to find BN_CTX bugs */
+#ifdef BN_CTX_DEBUG
+static const char *ctxdbg_cur = NULL;
+static void ctxdbg(BN_CTX *ctx)
+ {
+ unsigned int bnidx = 0, fpidx = 0;
+ BN_POOL_ITEM *item = ctx->pool.head;
+ BN_STACK *stack = &ctx->stack;
+ fprintf(stderr,"(%08x): ", (unsigned int)ctx);
+ while(bnidx < ctx->used)
+ {
+ fprintf(stderr,"%03x ", item->vals[bnidx++ % BN_CTX_POOL_SIZE].dmax);
+ if(!(bnidx % BN_CTX_POOL_SIZE))
+ item = item->next;
+ }
+ fprintf(stderr,"\n");
+ bnidx = 0;
+ fprintf(stderr," : ");
+ while(fpidx < stack->depth)
+ {
+ while(bnidx++ < stack->indexes[fpidx])
+ fprintf(stderr," ");
+ fprintf(stderr,"^^^ ");
+ bnidx++;
+ fpidx++;
+ }
+ fprintf(stderr,"\n");
+ }
+#define CTXDBG_ENTRY(str, ctx) do { \
+ ctxdbg_cur = (str); \
+ fprintf(stderr,"Starting %s\n", ctxdbg_cur); \
+ ctxdbg(ctx); \
+ } while(0)
+#define CTXDBG_EXIT(ctx) do { \
+ fprintf(stderr,"Ending %s\n", ctxdbg_cur); \
+ ctxdbg(ctx); \
+ } while(0)
+#define CTXDBG_RET(ctx,ret)
+#else
+#define CTXDBG_ENTRY(str, ctx)
+#define CTXDBG_EXIT(ctx)
+#define CTXDBG_RET(ctx,ret)
+#endif
+
+/* This function is an evil legacy and should not be used. This implementation
+ * is WYSIWYG, though I've done my best. */
+#ifndef OPENSSL_NO_DEPRECATED
+void BN_CTX_init(BN_CTX *ctx)
+ {
+ /* Assume the caller obtained the context via BN_CTX_new() and so is
+ * trying to reset it for use. Nothing else makes sense, least of all
+ * binary compatibility from a time when they could declare a static
+ * variable. */
+ BN_POOL_reset(&ctx->pool);
+ BN_STACK_reset(&ctx->stack);
+ ctx->used = 0;
+ ctx->err_stack = 0;
+ ctx->too_many = 0;
+ }
+#endif
+
+BN_CTX *BN_CTX_new(void)
+ {
+ BN_CTX *ret = OPENSSL_malloc(sizeof(BN_CTX));
+ if(!ret)
+ {
+ BNerr(BN_F_BN_CTX_NEW,ERR_R_MALLOC_FAILURE);
+ return NULL;
+ }
+ /* Initialise the structure */
+ BN_POOL_init(&ret->pool);
+ BN_STACK_init(&ret->stack);
+ ret->used = 0;
+ ret->err_stack = 0;
+ ret->too_many = 0;
+ return ret;
+ }
+
+void BN_CTX_free(BN_CTX *ctx)
+ {
+ if (ctx == NULL)
+ return;
+#ifdef BN_CTX_DEBUG
+ {
+ BN_POOL_ITEM *pool = ctx->pool.head;
+ fprintf(stderr,"BN_CTX_free, stack-size=%d, pool-bignums=%d\n",
+ ctx->stack.size, ctx->pool.size);
+ fprintf(stderr,"dmaxs: ");
+ while(pool) {
+ unsigned loop = 0;
+ while(loop < BN_CTX_POOL_SIZE)
+ fprintf(stderr,"%02x ", pool->vals[loop++].dmax);
+ pool = pool->next;
+ }
+ fprintf(stderr,"\n");
+ }
+#endif
+ BN_STACK_finish(&ctx->stack);
+ BN_POOL_finish(&ctx->pool);
+ OPENSSL_free(ctx);
+ }
+
+void BN_CTX_start(BN_CTX *ctx)
+ {
+ CTXDBG_ENTRY("BN_CTX_start", ctx);
+ /* If we're already overflowing ... */
+ if(ctx->err_stack || ctx->too_many)
+ ctx->err_stack++;
+ /* (Try to) get a new frame pointer */
+ else if(!BN_STACK_push(&ctx->stack, ctx->used))
+ {
+ BNerr(BN_F_BN_CTX_START,BN_R_TOO_MANY_TEMPORARY_VARIABLES);
+ ctx->err_stack++;
+ }
+ CTXDBG_EXIT(ctx);
+ }
+
+void BN_CTX_end(BN_CTX *ctx)
+ {
+ CTXDBG_ENTRY("BN_CTX_end", ctx);
+ if(ctx->err_stack)
+ ctx->err_stack--;
+ else
+ {
+ unsigned int fp = BN_STACK_pop(&ctx->stack);
+ /* Does this stack frame have anything to release? */
+ if(fp < ctx->used)
+ BN_POOL_release(&ctx->pool, ctx->used - fp);
+ ctx->used = fp;
+ /* Unjam "too_many" in case "get" had failed */
+ ctx->too_many = 0;
+ }
+ CTXDBG_EXIT(ctx);
+ }
+
+BIGNUM *BN_CTX_get(BN_CTX *ctx)
+ {
+ BIGNUM *ret;
+ CTXDBG_ENTRY("BN_CTX_get", ctx);
+ if(ctx->err_stack || ctx->too_many) return NULL;
+ if((ret = BN_POOL_get(&ctx->pool)) == NULL)
+ {
+ /* Setting too_many prevents repeated "get" attempts from
+ * cluttering the error stack. */
+ ctx->too_many = 1;
+ BNerr(BN_F_BN_CTX_GET,BN_R_TOO_MANY_TEMPORARY_VARIABLES);
+ return NULL;
+ }
+ /* OK, make sure the returned bignum is "zero" */
+ BN_zero(ret);
+ ctx->used++;
+ CTXDBG_RET(ctx, ret);
+ return ret;
+ }
+
+/************/
+/* BN_STACK */
+/************/
+
+static void BN_STACK_init(BN_STACK *st)
+ {
+ st->indexes = NULL;
+ st->depth = st->size = 0;
+ }
+
+static void BN_STACK_finish(BN_STACK *st)
+ {
+ if(st->size) OPENSSL_free(st->indexes);
+ }
+
+#ifndef OPENSSL_NO_DEPRECATED
+static void BN_STACK_reset(BN_STACK *st)
+ {
+ st->depth = 0;
+ }
+#endif
+
+static int BN_STACK_push(BN_STACK *st, unsigned int idx)
+ {
+ if(st->depth == st->size)
+ /* Need to expand */
+ {
+ unsigned int newsize = (st->size ?
+ (st->size * 3 / 2) : BN_CTX_START_FRAMES);
+ unsigned int *newitems = OPENSSL_malloc(newsize *
+ sizeof(unsigned int));
+ if(!newitems) return 0;
+ if(st->depth)
+ memcpy(newitems, st->indexes, st->depth *
+ sizeof(unsigned int));
+ if(st->size) OPENSSL_free(st->indexes);
+ st->indexes = newitems;
+ st->size = newsize;
+ }
+ st->indexes[(st->depth)++] = idx;
+ return 1;
+ }
+
+static unsigned int BN_STACK_pop(BN_STACK *st)
+ {
+ return st->indexes[--(st->depth)];
+ }
+
+/***********/
+/* BN_POOL */
+/***********/
+
+static void BN_POOL_init(BN_POOL *p)
+ {
+ p->head = p->current = p->tail = NULL;
+ p->used = p->size = 0;
+ }
+
+static void BN_POOL_finish(BN_POOL *p)
+ {
+ while(p->head)
+ {
+ unsigned int loop = 0;
+ BIGNUM *bn = p->head->vals;
+ while(loop++ < BN_CTX_POOL_SIZE)
+ {
+ if(bn->d) BN_clear_free(bn);
+ bn++;
+ }
+ p->current = p->head->next;
+ OPENSSL_free(p->head);
+ p->head = p->current;
+ }
+ }
+
+#ifndef OPENSSL_NO_DEPRECATED
+static void BN_POOL_reset(BN_POOL *p)
+ {
+ BN_POOL_ITEM *item = p->head;
+ while(item)
+ {
+ unsigned int loop = 0;
+ BIGNUM *bn = item->vals;
+ while(loop++ < BN_CTX_POOL_SIZE)
+ {
+ if(bn->d) BN_clear(bn);
+ bn++;
+ }
+ item = item->next;
+ }
+ p->current = p->head;
+ p->used = 0;
+ }
+#endif
+
+static BIGNUM *BN_POOL_get(BN_POOL *p)
+ {
+ if(p->used == p->size)
+ {
+ BIGNUM *bn;
+ unsigned int loop = 0;
+ BN_POOL_ITEM *item = OPENSSL_malloc(sizeof(BN_POOL_ITEM));
+ if(!item) return NULL;
+ /* Initialise the structure */
+ bn = item->vals;
+ while(loop++ < BN_CTX_POOL_SIZE)
+ BN_init(bn++);
+ item->prev = p->tail;
+ item->next = NULL;
+ /* Link it in */
+ if(!p->head)
+ p->head = p->current = p->tail = item;
+ else
+ {
+ p->tail->next = item;
+ p->tail = item;
+ p->current = item;
+ }
+ p->size += BN_CTX_POOL_SIZE;
+ p->used++;
+ /* Return the first bignum from the new pool */
+ return item->vals;
+ }
+ if(!p->used)
+ p->current = p->head;
+ else if((p->used % BN_CTX_POOL_SIZE) == 0)
+ p->current = p->current->next;
+ return p->current->vals + ((p->used++) % BN_CTX_POOL_SIZE);
+ }
+
+static void BN_POOL_release(BN_POOL *p, unsigned int num)
+ {
+ unsigned int offset = (p->used - 1) % BN_CTX_POOL_SIZE;
+ p->used -= num;
+ while(num--)
+ {
+ bn_check_top(p->current->vals + offset);
+ if(!offset)
+ {
+ offset = BN_CTX_POOL_SIZE - 1;
+ p->current = p->current->prev;
+ }
+ else
+ offset--;
+ }
+ }
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_depr.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_depr.c
new file mode 100644
index 00000000..27535e4f
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_depr.c
@@ -0,0 +1,112 @@
+/* crypto/bn/bn_depr.c */
+/* ====================================================================
+ * Copyright (c) 1998-2002 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+/* Support for deprecated functions goes here - static linkage will only slurp
+ * this code if applications are using them directly. */
+
+#include <stdio.h>
+#include <time.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+#include <openssl/rand.h>
+
+static void *dummy=&dummy;
+
+#ifndef OPENSSL_NO_DEPRECATED
+BIGNUM *BN_generate_prime(BIGNUM *ret, int bits, int safe,
+ const BIGNUM *add, const BIGNUM *rem,
+ void (*callback)(int,int,void *), void *cb_arg)
+ {
+ BN_GENCB cb;
+ BIGNUM *rnd=NULL;
+ int found = 0;
+
+ BN_GENCB_set_old(&cb, callback, cb_arg);
+
+ if (ret == NULL)
+ {
+ if ((rnd=BN_new()) == NULL) goto err;
+ }
+ else
+ rnd=ret;
+ if(!BN_generate_prime_ex(rnd, bits, safe, add, rem, &cb))
+ goto err;
+
+ /* we have a prime :-) */
+ found = 1;
+err:
+ if (!found && (ret == NULL) && (rnd != NULL)) BN_free(rnd);
+ return(found ? rnd : NULL);
+ }
+
+int BN_is_prime(const BIGNUM *a, int checks, void (*callback)(int,int,void *),
+ BN_CTX *ctx_passed, void *cb_arg)
+ {
+ BN_GENCB cb;
+ BN_GENCB_set_old(&cb, callback, cb_arg);
+ return BN_is_prime_ex(a, checks, ctx_passed, &cb);
+ }
+
+int BN_is_prime_fasttest(const BIGNUM *a, int checks,
+ void (*callback)(int,int,void *),
+ BN_CTX *ctx_passed, void *cb_arg,
+ int do_trial_division)
+ {
+ BN_GENCB cb;
+ BN_GENCB_set_old(&cb, callback, cb_arg);
+ return BN_is_prime_fasttest_ex(a, checks, ctx_passed,
+ do_trial_division, &cb);
+ }
+#endif
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_div.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_div.c
new file mode 100644
index 00000000..7b240318
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_div.c
@@ -0,0 +1,448 @@
+/* crypto/bn/bn_div.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include <openssl/bn.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+
+/* The old slow way */
+#if 0
+int BN_div(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, const BIGNUM *d,
+ BN_CTX *ctx)
+ {
+ int i,nm,nd;
+ int ret = 0;
+ BIGNUM *D;
+
+ bn_check_top(m);
+ bn_check_top(d);
+ if (BN_is_zero(d))
+ {
+ BNerr(BN_F_BN_DIV,BN_R_DIV_BY_ZERO);
+ return(0);
+ }
+
+ if (BN_ucmp(m,d) < 0)
+ {
+ if (rem != NULL)
+ { if (BN_copy(rem,m) == NULL) return(0); }
+ if (dv != NULL) BN_zero(dv);
+ return(1);
+ }
+
+ BN_CTX_start(ctx);
+ D = BN_CTX_get(ctx);
+ if (dv == NULL) dv = BN_CTX_get(ctx);
+ if (rem == NULL) rem = BN_CTX_get(ctx);
+ if (D == NULL || dv == NULL || rem == NULL)
+ goto end;
+
+ nd=BN_num_bits(d);
+ nm=BN_num_bits(m);
+ if (BN_copy(D,d) == NULL) goto end;
+ if (BN_copy(rem,m) == NULL) goto end;
+
+ /* The next 2 are needed so we can do a dv->d[0]|=1 later
+ * since BN_lshift1 will only work once there is a value :-) */
+ BN_zero(dv);
+ if(bn_wexpand(dv,1) == NULL) goto end;
+ dv->top=1;
+
+ if (!BN_lshift(D,D,nm-nd)) goto end;
+ for (i=nm-nd; i>=0; i--)
+ {
+ if (!BN_lshift1(dv,dv)) goto end;
+ if (BN_ucmp(rem,D) >= 0)
+ {
+ dv->d[0]|=1;
+ if (!BN_usub(rem,rem,D)) goto end;
+ }
+/* CAN IMPROVE (and have now :=) */
+ if (!BN_rshift1(D,D)) goto end;
+ }
+ rem->neg=BN_is_zero(rem)?0:m->neg;
+ dv->neg=m->neg^d->neg;
+ ret = 1;
+ end:
+ BN_CTX_end(ctx);
+ return(ret);
+ }
+
+#else
+
+#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) \
+ && !defined(PEDANTIC) && !defined(BN_DIV3W)
+# if defined(__GNUC__) && __GNUC__>=2
+# if defined(__i386) || defined (__i386__)
+ /*
+ * There were two reasons for implementing this template:
+ * - GNU C generates a call to a function (__udivdi3 to be exact)
+ * in reply to ((((BN_ULLONG)n0)<<BN_BITS2)|n1)/d0 (I fail to
+ * understand why...);
+ * - divl doesn't only calculate quotient, but also leaves
+ * remainder in %edx which we can definitely use here:-)
+ *
+ * <appro@fy.chalmers.se>
+ */
+#undef bn_div_words
+# define bn_div_words(n0,n1,d0) \
+ ({ asm volatile ( \
+ "divl %4" \
+ : "=a"(q), "=d"(rem) \
+ : "a"(n1), "d"(n0), "g"(d0) \
+ : "cc"); \
+ q; \
+ })
+# define REMAINDER_IS_ALREADY_CALCULATED
+# elif defined(__x86_64) && defined(SIXTY_FOUR_BIT_LONG)
+ /*
+ * Same story here, but it's 128-bit by 64-bit division. Wow!
+ * <appro@fy.chalmers.se>
+ */
+# undef bn_div_words
+# define bn_div_words(n0,n1,d0) \
+ ({ asm volatile ( \
+ "divq %4" \
+ : "=a"(q), "=d"(rem) \
+ : "a"(n1), "d"(n0), "g"(d0) \
+ : "cc"); \
+ q; \
+ })
+# define REMAINDER_IS_ALREADY_CALCULATED
+# endif /* __<cpu> */
+# endif /* __GNUC__ */
+#endif /* OPENSSL_NO_ASM */
+
+
+/* BN_div computes dv := num / divisor, rounding towards
+ * zero, and sets up rm such that dv*divisor + rm = num holds.
+ * Thus:
+ * dv->neg == num->neg ^ divisor->neg (unless the result is zero)
+ * rm->neg == num->neg (unless the remainder is zero)
+ * If 'dv' or 'rm' is NULL, the respective value is not returned.
+ */
+int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
+ BN_CTX *ctx)
+ {
+ int norm_shift,i,loop;
+ BIGNUM *tmp,wnum,*snum,*sdiv,*res;
+ BN_ULONG *resp,*wnump;
+ BN_ULONG d0,d1;
+ int num_n,div_n;
+ int no_branch=0;
+
+ /* Invalid zero-padding would have particularly bad consequences
+ * in the case of 'num', so don't just rely on bn_check_top() for this one
+ * (bn_check_top() works only for BN_DEBUG builds) */
+ if (num->top > 0 && num->d[num->top - 1] == 0)
+ {
+ BNerr(BN_F_BN_DIV,BN_R_NOT_INITIALIZED);
+ return 0;
+ }
+
+ bn_check_top(num);
+
+ if ((BN_get_flags(num, BN_FLG_CONSTTIME) != 0) || (BN_get_flags(divisor, BN_FLG_CONSTTIME) != 0))
+ {
+ no_branch=1;
+ }
+
+ bn_check_top(dv);
+ bn_check_top(rm);
+ /* bn_check_top(num); */ /* 'num' has been checked already */
+ bn_check_top(divisor);
+
+ if (BN_is_zero(divisor))
+ {
+ BNerr(BN_F_BN_DIV,BN_R_DIV_BY_ZERO);
+ return(0);
+ }
+
+ if (!no_branch && BN_ucmp(num,divisor) < 0)
+ {
+ if (rm != NULL)
+ { if (BN_copy(rm,num) == NULL) return(0); }
+ if (dv != NULL) BN_zero(dv);
+ return(1);
+ }
+
+ BN_CTX_start(ctx);
+ tmp=BN_CTX_get(ctx);
+ snum=BN_CTX_get(ctx);
+ sdiv=BN_CTX_get(ctx);
+ if (dv == NULL)
+ res=BN_CTX_get(ctx);
+ else res=dv;
+ if (sdiv == NULL || res == NULL || tmp == NULL || snum == NULL)
+ goto err;
+
+ /* First we normalise the numbers */
+ norm_shift=BN_BITS2-((BN_num_bits(divisor))%BN_BITS2);
+ if (!(BN_lshift(sdiv,divisor,norm_shift))) goto err;
+ sdiv->neg=0;
+ norm_shift+=BN_BITS2;
+ if (!(BN_lshift(snum,num,norm_shift))) goto err;
+ snum->neg=0;
+
+ if (no_branch)
+ {
+ /* Since we don't know whether snum is larger than sdiv,
+ * we pad snum with enough zeroes without changing its
+ * value.
+ */
+ if (snum->top <= sdiv->top+1)
+ {
+ if (bn_wexpand(snum, sdiv->top + 2) == NULL) goto err;
+ for (i = snum->top; i < sdiv->top + 2; i++) snum->d[i] = 0;
+ snum->top = sdiv->top + 2;
+ }
+ else
+ {
+ if (bn_wexpand(snum, snum->top + 1) == NULL) goto err;
+ snum->d[snum->top] = 0;
+ snum->top ++;
+ }
+ }
+
+ div_n=sdiv->top;
+ num_n=snum->top;
+ loop=num_n-div_n;
+ /* Lets setup a 'window' into snum
+ * This is the part that corresponds to the current
+ * 'area' being divided */
+ wnum.neg = 0;
+ wnum.d = &(snum->d[loop]);
+ wnum.top = div_n;
+ /* only needed when BN_ucmp messes up the values between top and max */
+ wnum.dmax = snum->dmax - loop; /* so we don't step out of bounds */
+
+ /* Get the top 2 words of sdiv */
+ /* div_n=sdiv->top; */
+ d0=sdiv->d[div_n-1];
+ d1=(div_n == 1)?0:sdiv->d[div_n-2];
+
+ /* pointer to the 'top' of snum */
+ wnump= &(snum->d[num_n-1]);
+
+ /* Setup to 'res' */
+ res->neg= (num->neg^divisor->neg);
+ if (!bn_wexpand(res,(loop+1))) goto err;
+ res->top=loop-no_branch;
+ resp= &(res->d[loop-1]);
+
+ /* space for temp */
+ if (!bn_wexpand(tmp,(div_n+1))) goto err;
+
+ if (!no_branch)
+ {
+ if (BN_ucmp(&wnum,sdiv) >= 0)
+ {
+ /* If BN_DEBUG_RAND is defined BN_ucmp changes (via
+ * bn_pollute) the const bignum arguments =>
+ * clean the values between top and max again */
+ bn_clear_top2max(&wnum);
+ bn_sub_words(wnum.d, wnum.d, sdiv->d, div_n);
+ *resp=1;
+ }
+ else
+ res->top--;
+ }
+
+ /* if res->top == 0 then clear the neg value otherwise decrease
+ * the resp pointer */
+ if (res->top == 0)
+ res->neg = 0;
+ else
+ resp--;
+
+ for (i=0; i<loop-1; i++, wnump--, resp--)
+ {
+ BN_ULONG q,l0;
+ /* the first part of the loop uses the top two words of
+ * snum and sdiv to calculate a BN_ULONG q such that
+ * | wnum - sdiv * q | < sdiv */
+#if defined(BN_DIV3W) && !defined(OPENSSL_NO_ASM)
+ BN_ULONG bn_div_3_words(BN_ULONG*,BN_ULONG,BN_ULONG);
+ q=bn_div_3_words(wnump,d1,d0);
+#else
+ BN_ULONG n0,n1,rem=0;
+
+ n0=wnump[0];
+ n1=wnump[-1];
+ if (n0 == d0)
+ q=BN_MASK2;
+ else /* n0 < d0 */
+ {
+#ifdef BN_LLONG
+ BN_ULLONG t2;
+
+#if defined(BN_LLONG) && defined(BN_DIV2W) && !defined(bn_div_words)
+ q=(BN_ULONG)(((((BN_ULLONG)n0)<<BN_BITS2)|n1)/d0);
+#else
+ q=bn_div_words(n0,n1,d0);
+#ifdef BN_DEBUG_LEVITTE
+ fprintf(stderr,"DEBUG: bn_div_words(0x%08X,0x%08X,0x%08\
+X) -> 0x%08X\n",
+ n0, n1, d0, q);
+#endif
+#endif
+
+#ifndef REMAINDER_IS_ALREADY_CALCULATED
+ /*
+ * rem doesn't have to be BN_ULLONG. The least we
+ * know it's less that d0, isn't it?
+ */
+ rem=(n1-q*d0)&BN_MASK2;
+#endif
+ t2=(BN_ULLONG)d1*q;
+
+ for (;;)
+ {
+ if (t2 <= ((((BN_ULLONG)rem)<<BN_BITS2)|wnump[-2]))
+ break;
+ q--;
+ rem += d0;
+ if (rem < d0) break; /* don't let rem overflow */
+ t2 -= d1;
+ }
+#else /* !BN_LLONG */
+ BN_ULONG t2l,t2h;
+
+ q=bn_div_words(n0,n1,d0);
+#ifdef BN_DEBUG_LEVITTE
+ fprintf(stderr,"DEBUG: bn_div_words(0x%08X,0x%08X,0x%08\
+X) -> 0x%08X\n",
+ n0, n1, d0, q);
+#endif
+#ifndef REMAINDER_IS_ALREADY_CALCULATED
+ rem=(n1-q*d0)&BN_MASK2;
+#endif
+
+#if defined(BN_UMULT_LOHI)
+ BN_UMULT_LOHI(t2l,t2h,d1,q);
+#elif defined(BN_UMULT_HIGH)
+ t2l = d1 * q;
+ t2h = BN_UMULT_HIGH(d1,q);
+#else
+ {
+ BN_ULONG ql, qh;
+ t2l=LBITS(d1); t2h=HBITS(d1);
+ ql =LBITS(q); qh =HBITS(q);
+ mul64(t2l,t2h,ql,qh); /* t2=(BN_ULLONG)d1*q; */
+ }
+#endif
+
+ for (;;)
+ {
+ if ((t2h < rem) ||
+ ((t2h == rem) && (t2l <= wnump[-2])))
+ break;
+ q--;
+ rem += d0;
+ if (rem < d0) break; /* don't let rem overflow */
+ if (t2l < d1) t2h--; t2l -= d1;
+ }
+#endif /* !BN_LLONG */
+ }
+#endif /* !BN_DIV3W */
+
+ l0=bn_mul_words(tmp->d,sdiv->d,div_n,q);
+ tmp->d[div_n]=l0;
+ wnum.d--;
+ /* ingore top values of the bignums just sub the two
+ * BN_ULONG arrays with bn_sub_words */
+ if (bn_sub_words(wnum.d, wnum.d, tmp->d, div_n+1))
+ {
+ /* Note: As we have considered only the leading
+ * two BN_ULONGs in the calculation of q, sdiv * q
+ * might be greater than wnum (but then (q-1) * sdiv
+ * is less or equal than wnum)
+ */
+ q--;
+ if (bn_add_words(wnum.d, wnum.d, sdiv->d, div_n))
+ /* we can't have an overflow here (assuming
+ * that q != 0, but if q == 0 then tmp is
+ * zero anyway) */
+ (*wnump)++;
+ }
+ /* store part of the result */
+ *resp = q;
+ }
+ bn_correct_top(snum);
+ if (rm != NULL)
+ {
+ /* Keep a copy of the neg flag in num because if rm==num
+ * BN_rshift() will overwrite it.
+ */
+ int neg = num->neg;
+ BN_rshift(rm,snum,norm_shift);
+ if (!BN_is_zero(rm))
+ rm->neg = neg;
+ bn_check_top(rm);
+ }
+ if (no_branch) bn_correct_top(res);
+ BN_CTX_end(ctx);
+ return(1);
+err:
+ bn_check_top(rm);
+ BN_CTX_end(ctx);
+ return(0);
+ }
+#endif
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_err.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_err.c
new file mode 100644
index 00000000..f722b525
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_err.c
@@ -0,0 +1,152 @@
+/* crypto/bn/bn_err.c */
+/* ====================================================================
+ * Copyright (c) 1999-2007 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@OpenSSL.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+/* NOTE: this file was auto generated by the mkerr.pl script: any changes
+ * made to it will be overwritten when the script next updates this file,
+ * only reason strings will be preserved.
+ */
+
+#include <stdio.h>
+#include <openssl/err.h>
+#include <openssl/bn.h>
+
+/* BEGIN ERROR CODES */
+#ifndef OPENSSL_NO_ERR
+
+#define ERR_FUNC(func) ERR_PACK(ERR_LIB_BN,func,0)
+#define ERR_REASON(reason) ERR_PACK(ERR_LIB_BN,0,reason)
+
+static ERR_STRING_DATA BN_str_functs[]=
+ {
+{ERR_FUNC(BN_F_BNRAND), "BNRAND"},
+{ERR_FUNC(BN_F_BN_BLINDING_CONVERT_EX), "BN_BLINDING_convert_ex"},
+{ERR_FUNC(BN_F_BN_BLINDING_CREATE_PARAM), "BN_BLINDING_create_param"},
+{ERR_FUNC(BN_F_BN_BLINDING_INVERT_EX), "BN_BLINDING_invert_ex"},
+{ERR_FUNC(BN_F_BN_BLINDING_NEW), "BN_BLINDING_new"},
+{ERR_FUNC(BN_F_BN_BLINDING_UPDATE), "BN_BLINDING_update"},
+{ERR_FUNC(BN_F_BN_BN2DEC), "BN_bn2dec"},
+{ERR_FUNC(BN_F_BN_BN2HEX), "BN_bn2hex"},
+{ERR_FUNC(BN_F_BN_CTX_GET), "BN_CTX_get"},
+{ERR_FUNC(BN_F_BN_CTX_NEW), "BN_CTX_new"},
+{ERR_FUNC(BN_F_BN_CTX_START), "BN_CTX_start"},
+{ERR_FUNC(BN_F_BN_DIV), "BN_div"},
+{ERR_FUNC(BN_F_BN_DIV_NO_BRANCH), "BN_div_no_branch"},
+{ERR_FUNC(BN_F_BN_DIV_RECP), "BN_div_recp"},
+{ERR_FUNC(BN_F_BN_EXP), "BN_exp"},
+{ERR_FUNC(BN_F_BN_EXPAND2), "bn_expand2"},
+{ERR_FUNC(BN_F_BN_EXPAND_INTERNAL), "BN_EXPAND_INTERNAL"},
+{ERR_FUNC(BN_F_BN_GENERATE_DSA_NONCE), "BN_generate_dsa_nonce"},
+{ERR_FUNC(BN_F_BN_GF2M_MOD), "BN_GF2m_mod"},
+{ERR_FUNC(BN_F_BN_GF2M_MOD_EXP), "BN_GF2m_mod_exp"},
+{ERR_FUNC(BN_F_BN_GF2M_MOD_MUL), "BN_GF2m_mod_mul"},
+{ERR_FUNC(BN_F_BN_GF2M_MOD_SOLVE_QUAD), "BN_GF2m_mod_solve_quad"},
+{ERR_FUNC(BN_F_BN_GF2M_MOD_SOLVE_QUAD_ARR), "BN_GF2m_mod_solve_quad_arr"},
+{ERR_FUNC(BN_F_BN_GF2M_MOD_SQR), "BN_GF2m_mod_sqr"},
+{ERR_FUNC(BN_F_BN_GF2M_MOD_SQRT), "BN_GF2m_mod_sqrt"},
+{ERR_FUNC(BN_F_BN_MOD_EXP2_MONT), "BN_mod_exp2_mont"},
+{ERR_FUNC(BN_F_BN_MOD_EXP_MONT), "BN_mod_exp_mont"},
+{ERR_FUNC(BN_F_BN_MOD_EXP_MONT_CONSTTIME), "BN_mod_exp_mont_consttime"},
+{ERR_FUNC(BN_F_BN_MOD_EXP_MONT_WORD), "BN_mod_exp_mont_word"},
+{ERR_FUNC(BN_F_BN_MOD_EXP_RECP), "BN_mod_exp_recp"},
+{ERR_FUNC(BN_F_BN_MOD_EXP_SIMPLE), "BN_mod_exp_simple"},
+{ERR_FUNC(BN_F_BN_MOD_INVERSE), "BN_mod_inverse"},
+{ERR_FUNC(BN_F_BN_MOD_INVERSE_NO_BRANCH), "BN_mod_inverse_no_branch"},
+{ERR_FUNC(BN_F_BN_MOD_LSHIFT_QUICK), "BN_mod_lshift_quick"},
+{ERR_FUNC(BN_F_BN_MOD_MUL_RECIPROCAL), "BN_mod_mul_reciprocal"},
+{ERR_FUNC(BN_F_BN_MOD_SQRT), "BN_mod_sqrt"},
+{ERR_FUNC(BN_F_BN_MPI2BN), "BN_mpi2bn"},
+{ERR_FUNC(BN_F_BN_NEW), "BN_new"},
+{ERR_FUNC(BN_F_BN_RAND), "BN_rand"},
+{ERR_FUNC(BN_F_BN_RAND_RANGE), "BN_rand_range"},
+{ERR_FUNC(BN_F_BN_USUB), "BN_usub"},
+{0,NULL}
+ };
+
+static ERR_STRING_DATA BN_str_reasons[]=
+ {
+{ERR_REASON(BN_R_ARG2_LT_ARG3) ,"arg2 lt arg3"},
+{ERR_REASON(BN_R_BAD_RECIPROCAL) ,"bad reciprocal"},
+{ERR_REASON(BN_R_BIGNUM_TOO_LONG) ,"bignum too long"},
+{ERR_REASON(BN_R_CALLED_WITH_EVEN_MODULUS),"called with even modulus"},
+{ERR_REASON(BN_R_DIV_BY_ZERO) ,"div by zero"},
+{ERR_REASON(BN_R_ENCODING_ERROR) ,"encoding error"},
+{ERR_REASON(BN_R_EXPAND_ON_STATIC_BIGNUM_DATA),"expand on static bignum data"},
+{ERR_REASON(BN_R_INPUT_NOT_REDUCED) ,"input not reduced"},
+{ERR_REASON(BN_R_INVALID_LENGTH) ,"invalid length"},
+{ERR_REASON(BN_R_INVALID_RANGE) ,"invalid range"},
+{ERR_REASON(BN_R_NOT_A_SQUARE) ,"not a square"},
+{ERR_REASON(BN_R_NOT_INITIALIZED) ,"not initialized"},
+{ERR_REASON(BN_R_NO_INVERSE) ,"no inverse"},
+{ERR_REASON(BN_R_NO_SOLUTION) ,"no solution"},
+{ERR_REASON(BN_R_PRIVATE_KEY_TOO_LARGE) ,"private key too large"},
+{ERR_REASON(BN_R_P_IS_NOT_PRIME) ,"p is not prime"},
+{ERR_REASON(BN_R_TOO_MANY_ITERATIONS) ,"too many iterations"},
+{ERR_REASON(BN_R_TOO_MANY_TEMPORARY_VARIABLES),"too many temporary variables"},
+{0,NULL}
+ };
+
+#endif
+
+void ERR_load_BN_strings(void)
+ {
+#ifndef OPENSSL_NO_ERR
+
+ if (ERR_func_error_string(BN_str_functs[0].error) == NULL)
+ {
+ ERR_load_strings(0,BN_str_functs);
+ ERR_load_strings(0,BN_str_reasons);
+ }
+#endif
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_exp.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_exp.c
new file mode 100644
index 00000000..2abf6fd6
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_exp.c
@@ -0,0 +1,1097 @@
+/* crypto/bn/bn_exp.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+/* ====================================================================
+ * Copyright (c) 1998-2005 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+#include <stdlib.h>
+#ifdef _WIN32
+# include <malloc.h>
+# ifndef alloca
+# define alloca _alloca
+# endif
+#elif defined(__GNUC__)
+# ifndef alloca
+# define alloca(s) __builtin_alloca((s))
+# endif
+#endif
+
+/* maximum precomputation table size for *variable* sliding windows */
+#define TABLE_SIZE 32
+
+/* this one works - simple but works */
+int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
+ {
+ int i,bits,ret=0;
+ BIGNUM *v,*rr;
+
+ if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0)
+ {
+ /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
+ BNerr(BN_F_BN_EXP,ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
+ return -1;
+ }
+
+ BN_CTX_start(ctx);
+ if ((r == a) || (r == p))
+ rr = BN_CTX_get(ctx);
+ else
+ rr = r;
+ v = BN_CTX_get(ctx);
+ if (rr == NULL || v == NULL) goto err;
+
+ if (BN_copy(v,a) == NULL) goto err;
+ bits=BN_num_bits(p);
+
+ if (BN_is_odd(p))
+ { if (BN_copy(rr,a) == NULL) goto err; }
+ else { if (!BN_one(rr)) goto err; }
+
+ for (i=1; i<bits; i++)
+ {
+ if (!BN_sqr(v,v,ctx)) goto err;
+ if (BN_is_bit_set(p,i))
+ {
+ if (!BN_mul(rr,rr,v,ctx)) goto err;
+ }
+ }
+ ret=1;
+err:
+ if (r != rr) BN_copy(r,rr);
+ BN_CTX_end(ctx);
+ bn_check_top(r);
+ return(ret);
+ }
+
+
+int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m,
+ BN_CTX *ctx)
+ {
+ int ret;
+
+ bn_check_top(a);
+ bn_check_top(p);
+ bn_check_top(m);
+
+ /* For even modulus m = 2^k*m_odd, it might make sense to compute
+ * a^p mod m_odd and a^p mod 2^k separately (with Montgomery
+ * exponentiation for the odd part), using appropriate exponent
+ * reductions, and combine the results using the CRT.
+ *
+ * For now, we use Montgomery only if the modulus is odd; otherwise,
+ * exponentiation using the reciprocal-based quick remaindering
+ * algorithm is used.
+ *
+ * (Timing obtained with expspeed.c [computations a^p mod m
+ * where a, p, m are of the same length: 256, 512, 1024, 2048,
+ * 4096, 8192 bits], compared to the running time of the
+ * standard algorithm:
+ *
+ * BN_mod_exp_mont 33 .. 40 % [AMD K6-2, Linux, debug configuration]
+ * 55 .. 77 % [UltraSparc processor, but
+ * debug-solaris-sparcv8-gcc conf.]
+ *
+ * BN_mod_exp_recp 50 .. 70 % [AMD K6-2, Linux, debug configuration]
+ * 62 .. 118 % [UltraSparc, debug-solaris-sparcv8-gcc]
+ *
+ * On the Sparc, BN_mod_exp_recp was faster than BN_mod_exp_mont
+ * at 2048 and more bits, but at 512 and 1024 bits, it was
+ * slower even than the standard algorithm!
+ *
+ * "Real" timings [linux-elf, solaris-sparcv9-gcc configurations]
+ * should be obtained when the new Montgomery reduction code
+ * has been integrated into OpenSSL.)
+ */
+
+#define MONT_MUL_MOD
+#define MONT_EXP_WORD
+#define RECP_MUL_MOD
+
+#ifdef MONT_MUL_MOD
+ /* I have finally been able to take out this pre-condition of
+ * the top bit being set. It was caused by an error in BN_div
+ * with negatives. There was also another problem when for a^b%m
+ * a >= m. eay 07-May-97 */
+/* if ((m->d[m->top-1]&BN_TBIT) && BN_is_odd(m)) */
+
+ if (BN_is_odd(m))
+ {
+# ifdef MONT_EXP_WORD
+ if (a->top == 1 && !a->neg && (BN_get_flags(p, BN_FLG_CONSTTIME) == 0))
+ {
+ BN_ULONG A = a->d[0];
+ ret=BN_mod_exp_mont_word(r,A,p,m,ctx,NULL);
+ }
+ else
+# endif
+ ret=BN_mod_exp_mont(r,a,p,m,ctx,NULL);
+ }
+ else
+#endif
+#ifdef RECP_MUL_MOD
+ { ret=BN_mod_exp_recp(r,a,p,m,ctx); }
+#else
+ { ret=BN_mod_exp_simple(r,a,p,m,ctx); }
+#endif
+
+ bn_check_top(r);
+ return(ret);
+ }
+
+
+int BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx)
+ {
+ int i,j,bits,ret=0,wstart,wend,window,wvalue;
+ int start=1;
+ BIGNUM *aa;
+ /* Table of variables obtained from 'ctx' */
+ BIGNUM *val[TABLE_SIZE];
+ BN_RECP_CTX recp;
+
+ if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0)
+ {
+ /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
+ BNerr(BN_F_BN_MOD_EXP_RECP,ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
+ return -1;
+ }
+
+ bits=BN_num_bits(p);
+
+ if (bits == 0)
+ {
+ ret = BN_one(r);
+ return ret;
+ }
+
+ BN_CTX_start(ctx);
+ aa = BN_CTX_get(ctx);
+ val[0] = BN_CTX_get(ctx);
+ if(!aa || !val[0]) goto err;
+
+ BN_RECP_CTX_init(&recp);
+ if (m->neg)
+ {
+ /* ignore sign of 'm' */
+ if (!BN_copy(aa, m)) goto err;
+ aa->neg = 0;
+ if (BN_RECP_CTX_set(&recp,aa,ctx) <= 0) goto err;
+ }
+ else
+ {
+ if (BN_RECP_CTX_set(&recp,m,ctx) <= 0) goto err;
+ }
+
+ if (!BN_nnmod(val[0],a,m,ctx)) goto err; /* 1 */
+ if (BN_is_zero(val[0]))
+ {
+ BN_zero(r);
+ ret = 1;
+ goto err;
+ }
+
+ window = BN_window_bits_for_exponent_size(bits);
+ if (window > 1)
+ {
+ if (!BN_mod_mul_reciprocal(aa,val[0],val[0],&recp,ctx))
+ goto err; /* 2 */
+ j=1<<(window-1);
+ for (i=1; i<j; i++)
+ {
+ if(((val[i] = BN_CTX_get(ctx)) == NULL) ||
+ !BN_mod_mul_reciprocal(val[i],val[i-1],
+ aa,&recp,ctx))
+ goto err;
+ }
+ }
+
+ start=1; /* This is used to avoid multiplication etc
+ * when there is only the value '1' in the
+ * buffer. */
+ wvalue=0; /* The 'value' of the window */
+ wstart=bits-1; /* The top bit of the window */
+ wend=0; /* The bottom bit of the window */
+
+ if (!BN_one(r)) goto err;
+
+ for (;;)
+ {
+ if (BN_is_bit_set(p,wstart) == 0)
+ {
+ if (!start)
+ if (!BN_mod_mul_reciprocal(r,r,r,&recp,ctx))
+ goto err;
+ if (wstart == 0) break;
+ wstart--;
+ continue;
+ }
+ /* We now have wstart on a 'set' bit, we now need to work out
+ * how bit a window to do. To do this we need to scan
+ * forward until the last set bit before the end of the
+ * window */
+ j=wstart;
+ wvalue=1;
+ wend=0;
+ for (i=1; i<window; i++)
+ {
+ if (wstart-i < 0) break;
+ if (BN_is_bit_set(p,wstart-i))
+ {
+ wvalue<<=(i-wend);
+ wvalue|=1;
+ wend=i;
+ }
+ }
+
+ /* wend is the size of the current window */
+ j=wend+1;
+ /* add the 'bytes above' */
+ if (!start)
+ for (i=0; i<j; i++)
+ {
+ if (!BN_mod_mul_reciprocal(r,r,r,&recp,ctx))
+ goto err;
+ }
+
+ /* wvalue will be an odd number < 2^window */
+ if (!BN_mod_mul_reciprocal(r,r,val[wvalue>>1],&recp,ctx))
+ goto err;
+
+ /* move the 'window' down further */
+ wstart-=wend+1;
+ wvalue=0;
+ start=0;
+ if (wstart < 0) break;
+ }
+ ret=1;
+err:
+ BN_CTX_end(ctx);
+ BN_RECP_CTX_free(&recp);
+ bn_check_top(r);
+ return(ret);
+ }
+
+
+int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
+ {
+ int i,j,bits,ret=0,wstart,wend,window,wvalue;
+ int start=1;
+ BIGNUM *d,*r;
+ const BIGNUM *aa;
+ /* Table of variables obtained from 'ctx' */
+ BIGNUM *val[TABLE_SIZE];
+ BN_MONT_CTX *mont=NULL;
+
+ if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0)
+ {
+ return BN_mod_exp_mont_consttime(rr, a, p, m, ctx, in_mont);
+ }
+
+ bn_check_top(a);
+ bn_check_top(p);
+ bn_check_top(m);
+
+ if (!BN_is_odd(m))
+ {
+ BNerr(BN_F_BN_MOD_EXP_MONT,BN_R_CALLED_WITH_EVEN_MODULUS);
+ return(0);
+ }
+ bits=BN_num_bits(p);
+ if (bits == 0)
+ {
+ ret = BN_one(rr);
+ return ret;
+ }
+
+ BN_CTX_start(ctx);
+ d = BN_CTX_get(ctx);
+ r = BN_CTX_get(ctx);
+ val[0] = BN_CTX_get(ctx);
+ if (!d || !r || !val[0]) goto err;
+
+ /* If this is not done, things will break in the montgomery
+ * part */
+
+ if (in_mont != NULL)
+ mont=in_mont;
+ else
+ {
+ if ((mont=BN_MONT_CTX_new()) == NULL) goto err;
+ if (!BN_MONT_CTX_set(mont,m,ctx)) goto err;
+ }
+
+ if (a->neg || BN_ucmp(a,m) >= 0)
+ {
+ if (!BN_nnmod(val[0],a,m,ctx))
+ goto err;
+ aa= val[0];
+ }
+ else
+ aa=a;
+ if (BN_is_zero(aa))
+ {
+ BN_zero(rr);
+ ret = 1;
+ goto err;
+ }
+ if (!BN_to_montgomery(val[0],aa,mont,ctx)) goto err; /* 1 */
+
+ window = BN_window_bits_for_exponent_size(bits);
+ if (window > 1)
+ {
+ if (!BN_mod_mul_montgomery(d,val[0],val[0],mont,ctx)) goto err; /* 2 */
+ j=1<<(window-1);
+ for (i=1; i<j; i++)
+ {
+ if(((val[i] = BN_CTX_get(ctx)) == NULL) ||
+ !BN_mod_mul_montgomery(val[i],val[i-1],
+ d,mont,ctx))
+ goto err;
+ }
+ }
+
+ start=1; /* This is used to avoid multiplication etc
+ * when there is only the value '1' in the
+ * buffer. */
+ wvalue=0; /* The 'value' of the window */
+ wstart=bits-1; /* The top bit of the window */
+ wend=0; /* The bottom bit of the window */
+
+ if (!BN_to_montgomery(r,BN_value_one(),mont,ctx)) goto err;
+ for (;;)
+ {
+ if (BN_is_bit_set(p,wstart) == 0)
+ {
+ if (!start)
+ {
+ if (!BN_mod_mul_montgomery(r,r,r,mont,ctx))
+ goto err;
+ }
+ if (wstart == 0) break;
+ wstart--;
+ continue;
+ }
+ /* We now have wstart on a 'set' bit, we now need to work out
+ * how bit a window to do. To do this we need to scan
+ * forward until the last set bit before the end of the
+ * window */
+ j=wstart;
+ wvalue=1;
+ wend=0;
+ for (i=1; i<window; i++)
+ {
+ if (wstart-i < 0) break;
+ if (BN_is_bit_set(p,wstart-i))
+ {
+ wvalue<<=(i-wend);
+ wvalue|=1;
+ wend=i;
+ }
+ }
+
+ /* wend is the size of the current window */
+ j=wend+1;
+ /* add the 'bytes above' */
+ if (!start)
+ for (i=0; i<j; i++)
+ {
+ if (!BN_mod_mul_montgomery(r,r,r,mont,ctx))
+ goto err;
+ }
+
+ /* wvalue will be an odd number < 2^window */
+ if (!BN_mod_mul_montgomery(r,r,val[wvalue>>1],mont,ctx))
+ goto err;
+
+ /* move the 'window' down further */
+ wstart-=wend+1;
+ wvalue=0;
+ start=0;
+ if (wstart < 0) break;
+ }
+ if (!BN_from_montgomery(rr,r,mont,ctx)) goto err;
+ ret=1;
+err:
+ if ((in_mont == NULL) && (mont != NULL)) BN_MONT_CTX_free(mont);
+ BN_CTX_end(ctx);
+ bn_check_top(rr);
+ return(ret);
+ }
+
+
+/* BN_mod_exp_mont_consttime() stores the precomputed powers in a specific layout
+ * so that accessing any of these table values shows the same access pattern as far
+ * as cache lines are concerned. The following functions are used to transfer a BIGNUM
+ * from/to that table. */
+
+static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top, unsigned char *buf, int idx, int width)
+ {
+ size_t i, j;
+
+ if (top > b->top)
+ top = b->top; /* this works because 'buf' is explicitly zeroed */
+ for (i = 0, j=idx; i < top * sizeof b->d[0]; i++, j+=width)
+ {
+ buf[j] = ((unsigned char*)b->d)[i];
+ }
+
+ return 1;
+ }
+
+static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top, unsigned char *buf, int idx, int width)
+ {
+ size_t i, j;
+
+ if (bn_wexpand(b, top) == NULL)
+ return 0;
+
+ for (i=0, j=idx; i < top * sizeof b->d[0]; i++, j+=width)
+ {
+ ((unsigned char*)b->d)[i] = buf[j];
+ }
+
+ b->top = top;
+ bn_correct_top(b);
+ return 1;
+ }
+
+/* Given a pointer value, compute the next address that is a cache line multiple. */
+#define MOD_EXP_CTIME_ALIGN(x_) \
+ ((unsigned char*)(x_) + (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - (((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK))))
+
+/* This variant of BN_mod_exp_mont() uses fixed windows and the special
+ * precomputation memory layout to limit data-dependency to a minimum
+ * to protect secret exponents (cf. the hyper-threading timing attacks
+ * pointed out by Colin Percival,
+ * http://www.daemonology.net/hyperthreading-considered-harmful/)
+ */
+int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
+ {
+ int i,bits,ret=0,window,wvalue;
+ int top;
+ BN_MONT_CTX *mont=NULL;
+
+ int numPowers;
+ unsigned char *powerbufFree=NULL;
+ int powerbufLen = 0;
+ unsigned char *powerbuf=NULL;
+ BIGNUM tmp, am;
+
+ bn_check_top(a);
+ bn_check_top(p);
+ bn_check_top(m);
+
+ top = m->top;
+
+ if (!(m->d[0] & 1))
+ {
+ BNerr(BN_F_BN_MOD_EXP_MONT_CONSTTIME,BN_R_CALLED_WITH_EVEN_MODULUS);
+ return(0);
+ }
+ bits=BN_num_bits(p);
+ if (bits == 0)
+ {
+ ret = BN_one(rr);
+ return ret;
+ }
+
+ BN_CTX_start(ctx);
+
+ /* Allocate a montgomery context if it was not supplied by the caller.
+ * If this is not done, things will break in the montgomery part.
+ */
+ if (in_mont != NULL)
+ mont=in_mont;
+ else
+ {
+ if ((mont=BN_MONT_CTX_new()) == NULL) goto err;
+ if (!BN_MONT_CTX_set(mont,m,ctx)) goto err;
+ }
+
+ /* Get the window size to use with size of p. */
+ window = BN_window_bits_for_ctime_exponent_size(bits);
+#if defined(OPENSSL_BN_ASM_MONT5)
+ if (window==6 && bits<=1024) window=5; /* ~5% improvement of 2048-bit RSA sign */
+#endif
+
+ /* Allocate a buffer large enough to hold all of the pre-computed
+ * powers of am, am itself and tmp.
+ */
+ numPowers = 1 << window;
+ powerbufLen = sizeof(m->d[0])*(top*numPowers +
+ ((2*top)>numPowers?(2*top):numPowers));
+#ifdef alloca
+ if (powerbufLen < 3072)
+ powerbufFree = alloca(powerbufLen+MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH);
+ else
+#endif
+ if ((powerbufFree=(unsigned char*)OPENSSL_malloc(powerbufLen+MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH)) == NULL)
+ goto err;
+
+ powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree);
+ memset(powerbuf, 0, powerbufLen);
+
+#ifdef alloca
+ if (powerbufLen < 3072)
+ powerbufFree = NULL;
+#endif
+
+ /* lay down tmp and am right after powers table */
+ tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0])*top*numPowers);
+ am.d = tmp.d + top;
+ tmp.top = am.top = 0;
+ tmp.dmax = am.dmax = top;
+ tmp.neg = am.neg = 0;
+ tmp.flags = am.flags = BN_FLG_STATIC_DATA;
+
+ /* prepare a^0 in Montgomery domain */
+#if 1
+ if (!BN_to_montgomery(&tmp,BN_value_one(),mont,ctx)) goto err;
+#else
+ tmp.d[0] = (0-m->d[0])&BN_MASK2; /* 2^(top*BN_BITS2) - m */
+ for (i=1;i<top;i++)
+ tmp.d[i] = (~m->d[i])&BN_MASK2;
+ tmp.top = top;
+#endif
+
+ /* prepare a^1 in Montgomery domain */
+ if (a->neg || BN_ucmp(a,m) >= 0)
+ {
+ if (!BN_mod(&am,a,m,ctx)) goto err;
+ if (!BN_to_montgomery(&am,&am,mont,ctx)) goto err;
+ }
+ else if (!BN_to_montgomery(&am,a,mont,ctx)) goto err;
+
+#if defined(OPENSSL_BN_ASM_MONT5)
+ /* This optimization uses ideas from http://eprint.iacr.org/2011/239,
+ * specifically optimization of cache-timing attack countermeasures
+ * and pre-computation optimization. */
+
+ /* Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as
+ * 512-bit RSA is hardly relevant, we omit it to spare size... */
+ if (window==5)
+ {
+ void bn_mul_mont_gather5(BN_ULONG *rp,const BN_ULONG *ap,
+ const void *table,const BN_ULONG *np,
+ const BN_ULONG *n0,int num,int power);
+ void bn_scatter5(const BN_ULONG *inp,size_t num,
+ void *table,size_t power);
+ void bn_gather5(BN_ULONG *out,size_t num,
+ void *table,size_t power);
+
+ BN_ULONG *np=mont->N.d, *n0=mont->n0;
+
+ /* BN_to_montgomery can contaminate words above .top
+ * [in BN_DEBUG[_DEBUG] build]... */
+ for (i=am.top; i<top; i++) am.d[i]=0;
+ for (i=tmp.top; i<top; i++) tmp.d[i]=0;
+
+ bn_scatter5(tmp.d,top,powerbuf,0);
+ bn_scatter5(am.d,am.top,powerbuf,1);
+ bn_mul_mont(tmp.d,am.d,am.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,2);
+
+#if 0
+ for (i=3; i<32; i++)
+ {
+ /* Calculate a^i = a^(i-1) * a */
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ }
+#else
+ /* same as above, but uses squaring for 1/2 of operations */
+ for (i=4; i<32; i*=2)
+ {
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ }
+ for (i=3; i<8; i+=2)
+ {
+ int j;
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ for (j=2*i; j<32; j*=2)
+ {
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,j);
+ }
+ }
+ for (; i<16; i+=2)
+ {
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_scatter5(tmp.d,top,powerbuf,2*i);
+ }
+ for (; i<32; i+=2)
+ {
+ bn_mul_mont_gather5(tmp.d,am.d,powerbuf,np,n0,top,i-1);
+ bn_scatter5(tmp.d,top,powerbuf,i);
+ }
+#endif
+ bits--;
+ for (wvalue=0, i=bits%5; i>=0; i--,bits--)
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+ bn_gather5(tmp.d,top,powerbuf,wvalue);
+
+ /* Scan the exponent one window at a time starting from the most
+ * significant bits.
+ */
+ while (bits >= 0)
+ {
+ for (wvalue=0, i=0; i<5; i++,bits--)
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont(tmp.d,tmp.d,tmp.d,np,n0,top);
+ bn_mul_mont_gather5(tmp.d,tmp.d,powerbuf,np,n0,top,wvalue);
+ }
+
+ tmp.top=top;
+ bn_correct_top(&tmp);
+ }
+ else
+#endif
+ {
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, numPowers)) goto err;
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, numPowers)) goto err;
+
+ /* If the window size is greater than 1, then calculate
+ * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1)
+ * (even powers could instead be computed as (a^(i/2))^2
+ * to use the slight performance advantage of sqr over mul).
+ */
+ if (window > 1)
+ {
+ if (!BN_mod_mul_montgomery(&tmp,&am,&am,mont,ctx)) goto err;
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2, numPowers)) goto err;
+ for (i=3; i<numPowers; i++)
+ {
+ /* Calculate a^i = a^(i-1) * a */
+ if (!BN_mod_mul_montgomery(&tmp,&am,&tmp,mont,ctx))
+ goto err;
+ if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i, numPowers)) goto err;
+ }
+ }
+
+ bits--;
+ for (wvalue=0, i=bits%window; i>=0; i--,bits--)
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+ if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp,top,powerbuf,wvalue,numPowers)) goto err;
+
+ /* Scan the exponent one window at a time starting from the most
+ * significant bits.
+ */
+ while (bits >= 0)
+ {
+ wvalue=0; /* The 'value' of the window */
+
+ /* Scan the window, squaring the result as we go */
+ for (i=0; i<window; i++,bits--)
+ {
+ if (!BN_mod_mul_montgomery(&tmp,&tmp,&tmp,mont,ctx)) goto err;
+ wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+ }
+
+ /* Fetch the appropriate pre-computed value from the pre-buf */
+ if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue, numPowers)) goto err;
+
+ /* Multiply the result into the intermediate result */
+ if (!BN_mod_mul_montgomery(&tmp,&tmp,&am,mont,ctx)) goto err;
+ }
+ }
+
+ /* Convert the final result from montgomery to standard format */
+ if (!BN_from_montgomery(rr,&tmp,mont,ctx)) goto err;
+ ret=1;
+err:
+ if ((in_mont == NULL) && (mont != NULL)) BN_MONT_CTX_free(mont);
+ if (powerbuf!=NULL)
+ {
+ OPENSSL_cleanse(powerbuf,powerbufLen);
+ if (powerbufFree) OPENSSL_free(powerbufFree);
+ }
+ BN_CTX_end(ctx);
+ return(ret);
+ }
+
+int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont)
+ {
+ BN_MONT_CTX *mont = NULL;
+ int b, bits, ret=0;
+ int r_is_one;
+ BN_ULONG w, next_w;
+ BIGNUM *d, *r, *t;
+ BIGNUM *swap_tmp;
+#define BN_MOD_MUL_WORD(r, w, m) \
+ (BN_mul_word(r, (w)) && \
+ (/* BN_ucmp(r, (m)) < 0 ? 1 :*/ \
+ (BN_mod(t, r, m, ctx) && (swap_tmp = r, r = t, t = swap_tmp, 1))))
+ /* BN_MOD_MUL_WORD is only used with 'w' large,
+ * so the BN_ucmp test is probably more overhead
+ * than always using BN_mod (which uses BN_copy if
+ * a similar test returns true). */
+ /* We can use BN_mod and do not need BN_nnmod because our
+ * accumulator is never negative (the result of BN_mod does
+ * not depend on the sign of the modulus).
+ */
+#define BN_TO_MONTGOMERY_WORD(r, w, mont) \
+ (BN_set_word(r, (w)) && BN_to_montgomery(r, r, (mont), ctx))
+
+ if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0)
+ {
+ /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
+ BNerr(BN_F_BN_MOD_EXP_MONT_WORD,ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
+ return -1;
+ }
+
+ bn_check_top(p);
+ bn_check_top(m);
+
+ if (!BN_is_odd(m))
+ {
+ BNerr(BN_F_BN_MOD_EXP_MONT_WORD,BN_R_CALLED_WITH_EVEN_MODULUS);
+ return(0);
+ }
+ if (m->top == 1)
+ a %= m->d[0]; /* make sure that 'a' is reduced */
+
+ bits = BN_num_bits(p);
+ if (bits == 0)
+ {
+ ret = BN_one(rr);
+ return ret;
+ }
+ if (a == 0)
+ {
+ BN_zero(rr);
+ ret = 1;
+ return ret;
+ }
+
+ BN_CTX_start(ctx);
+ d = BN_CTX_get(ctx);
+ r = BN_CTX_get(ctx);
+ t = BN_CTX_get(ctx);
+ if (d == NULL || r == NULL || t == NULL) goto err;
+
+ if (in_mont != NULL)
+ mont=in_mont;
+ else
+ {
+ if ((mont = BN_MONT_CTX_new()) == NULL) goto err;
+ if (!BN_MONT_CTX_set(mont, m, ctx)) goto err;
+ }
+
+ r_is_one = 1; /* except for Montgomery factor */
+
+ /* bits-1 >= 0 */
+
+ /* The result is accumulated in the product r*w. */
+ w = a; /* bit 'bits-1' of 'p' is always set */
+ for (b = bits-2; b >= 0; b--)
+ {
+ /* First, square r*w. */
+ next_w = w*w;
+ if ((next_w/w) != w) /* overflow */
+ {
+ if (r_is_one)
+ {
+ if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) goto err;
+ r_is_one = 0;
+ }
+ else
+ {
+ if (!BN_MOD_MUL_WORD(r, w, m)) goto err;
+ }
+ next_w = 1;
+ }
+ w = next_w;
+ if (!r_is_one)
+ {
+ if (!BN_mod_mul_montgomery(r, r, r, mont, ctx)) goto err;
+ }
+
+ /* Second, multiply r*w by 'a' if exponent bit is set. */
+ if (BN_is_bit_set(p, b))
+ {
+ next_w = w*a;
+ if ((next_w/a) != w) /* overflow */
+ {
+ if (r_is_one)
+ {
+ if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) goto err;
+ r_is_one = 0;
+ }
+ else
+ {
+ if (!BN_MOD_MUL_WORD(r, w, m)) goto err;
+ }
+ next_w = a;
+ }
+ w = next_w;
+ }
+ }
+
+ /* Finally, set r:=r*w. */
+ if (w != 1)
+ {
+ if (r_is_one)
+ {
+ if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) goto err;
+ r_is_one = 0;
+ }
+ else
+ {
+ if (!BN_MOD_MUL_WORD(r, w, m)) goto err;
+ }
+ }
+
+ if (r_is_one) /* can happen only if a == 1*/
+ {
+ if (!BN_one(rr)) goto err;
+ }
+ else
+ {
+ if (!BN_from_montgomery(rr, r, mont, ctx)) goto err;
+ }
+ ret = 1;
+err:
+ if ((in_mont == NULL) && (mont != NULL)) BN_MONT_CTX_free(mont);
+ BN_CTX_end(ctx);
+ bn_check_top(rr);
+ return(ret);
+ }
+
+
+/* The old fallback, simple version :-) */
+int BN_mod_exp_simple(BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx)
+ {
+ int i,j,bits,ret=0,wstart,wend,window,wvalue;
+ int start=1;
+ BIGNUM *d;
+ /* Table of variables obtained from 'ctx' */
+ BIGNUM *val[TABLE_SIZE];
+
+ if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0)
+ {
+ /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */
+ BNerr(BN_F_BN_MOD_EXP_SIMPLE,ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
+ return -1;
+ }
+
+ bits=BN_num_bits(p);
+
+ if (bits == 0)
+ {
+ ret = BN_one(r);
+ return ret;
+ }
+
+ BN_CTX_start(ctx);
+ d = BN_CTX_get(ctx);
+ val[0] = BN_CTX_get(ctx);
+ if(!d || !val[0]) goto err;
+
+ if (!BN_nnmod(val[0],a,m,ctx)) goto err; /* 1 */
+ if (BN_is_zero(val[0]))
+ {
+ BN_zero(r);
+ ret = 1;
+ goto err;
+ }
+
+ window = BN_window_bits_for_exponent_size(bits);
+ if (window > 1)
+ {
+ if (!BN_mod_mul(d,val[0],val[0],m,ctx))
+ goto err; /* 2 */
+ j=1<<(window-1);
+ for (i=1; i<j; i++)
+ {
+ if(((val[i] = BN_CTX_get(ctx)) == NULL) ||
+ !BN_mod_mul(val[i],val[i-1],d,m,ctx))
+ goto err;
+ }
+ }
+
+ start=1; /* This is used to avoid multiplication etc
+ * when there is only the value '1' in the
+ * buffer. */
+ wvalue=0; /* The 'value' of the window */
+ wstart=bits-1; /* The top bit of the window */
+ wend=0; /* The bottom bit of the window */
+
+ if (!BN_one(r)) goto err;
+
+ for (;;)
+ {
+ if (BN_is_bit_set(p,wstart) == 0)
+ {
+ if (!start)
+ if (!BN_mod_mul(r,r,r,m,ctx))
+ goto err;
+ if (wstart == 0) break;
+ wstart--;
+ continue;
+ }
+ /* We now have wstart on a 'set' bit, we now need to work out
+ * how bit a window to do. To do this we need to scan
+ * forward until the last set bit before the end of the
+ * window */
+ j=wstart;
+ wvalue=1;
+ wend=0;
+ for (i=1; i<window; i++)
+ {
+ if (wstart-i < 0) break;
+ if (BN_is_bit_set(p,wstart-i))
+ {
+ wvalue<<=(i-wend);
+ wvalue|=1;
+ wend=i;
+ }
+ }
+
+ /* wend is the size of the current window */
+ j=wend+1;
+ /* add the 'bytes above' */
+ if (!start)
+ for (i=0; i<j; i++)
+ {
+ if (!BN_mod_mul(r,r,r,m,ctx))
+ goto err;
+ }
+
+ /* wvalue will be an odd number < 2^window */
+ if (!BN_mod_mul(r,r,val[wvalue>>1],m,ctx))
+ goto err;
+
+ /* move the 'window' down further */
+ wstart-=wend+1;
+ wvalue=0;
+ start=0;
+ if (wstart < 0) break;
+ }
+ ret=1;
+err:
+ BN_CTX_end(ctx);
+ bn_check_top(r);
+ return(ret);
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_exp2.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_exp2.c
new file mode 100644
index 00000000..bd0c34b9
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_exp2.c
@@ -0,0 +1,312 @@
+/* crypto/bn/bn_exp2.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+/* ====================================================================
+ * Copyright (c) 1998-2000 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+#include <stdio.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+#define TABLE_SIZE 32
+
+int BN_mod_exp2_mont(BIGNUM *rr, const BIGNUM *a1, const BIGNUM *p1,
+ const BIGNUM *a2, const BIGNUM *p2, const BIGNUM *m,
+ BN_CTX *ctx, BN_MONT_CTX *in_mont)
+ {
+ int i,j,bits,b,bits1,bits2,ret=0,wpos1,wpos2,window1,window2,wvalue1,wvalue2;
+ int r_is_one=1;
+ BIGNUM *d,*r;
+ const BIGNUM *a_mod_m;
+ /* Tables of variables obtained from 'ctx' */
+ BIGNUM *val1[TABLE_SIZE], *val2[TABLE_SIZE];
+ BN_MONT_CTX *mont=NULL;
+
+ bn_check_top(a1);
+ bn_check_top(p1);
+ bn_check_top(a2);
+ bn_check_top(p2);
+ bn_check_top(m);
+
+ if (!(m->d[0] & 1))
+ {
+ BNerr(BN_F_BN_MOD_EXP2_MONT,BN_R_CALLED_WITH_EVEN_MODULUS);
+ return(0);
+ }
+ bits1=BN_num_bits(p1);
+ bits2=BN_num_bits(p2);
+ if ((bits1 == 0) && (bits2 == 0))
+ {
+ ret = BN_one(rr);
+ return ret;
+ }
+
+ bits=(bits1 > bits2)?bits1:bits2;
+
+ BN_CTX_start(ctx);
+ d = BN_CTX_get(ctx);
+ r = BN_CTX_get(ctx);
+ val1[0] = BN_CTX_get(ctx);
+ val2[0] = BN_CTX_get(ctx);
+ if(!d || !r || !val1[0] || !val2[0]) goto err;
+
+ if (in_mont != NULL)
+ mont=in_mont;
+ else
+ {
+ if ((mont=BN_MONT_CTX_new()) == NULL) goto err;
+ if (!BN_MONT_CTX_set(mont,m,ctx)) goto err;
+ }
+
+ window1 = BN_window_bits_for_exponent_size(bits1);
+ window2 = BN_window_bits_for_exponent_size(bits2);
+
+ /*
+ * Build table for a1: val1[i] := a1^(2*i + 1) mod m for i = 0 .. 2^(window1-1)
+ */
+ if (a1->neg || BN_ucmp(a1,m) >= 0)
+ {
+ if (!BN_mod(val1[0],a1,m,ctx))
+ goto err;
+ a_mod_m = val1[0];
+ }
+ else
+ a_mod_m = a1;
+ if (BN_is_zero(a_mod_m))
+ {
+ BN_zero(rr);
+ ret = 1;
+ goto err;
+ }
+
+ if (!BN_to_montgomery(val1[0],a_mod_m,mont,ctx)) goto err;
+ if (window1 > 1)
+ {
+ if (!BN_mod_mul_montgomery(d,val1[0],val1[0],mont,ctx)) goto err;
+
+ j=1<<(window1-1);
+ for (i=1; i<j; i++)
+ {
+ if(((val1[i] = BN_CTX_get(ctx)) == NULL) ||
+ !BN_mod_mul_montgomery(val1[i],val1[i-1],
+ d,mont,ctx))
+ goto err;
+ }
+ }
+
+
+ /*
+ * Build table for a2: val2[i] := a2^(2*i + 1) mod m for i = 0 .. 2^(window2-1)
+ */
+ if (a2->neg || BN_ucmp(a2,m) >= 0)
+ {
+ if (!BN_mod(val2[0],a2,m,ctx))
+ goto err;
+ a_mod_m = val2[0];
+ }
+ else
+ a_mod_m = a2;
+ if (BN_is_zero(a_mod_m))
+ {
+ BN_zero(rr);
+ ret = 1;
+ goto err;
+ }
+ if (!BN_to_montgomery(val2[0],a_mod_m,mont,ctx)) goto err;
+ if (window2 > 1)
+ {
+ if (!BN_mod_mul_montgomery(d,val2[0],val2[0],mont,ctx)) goto err;
+
+ j=1<<(window2-1);
+ for (i=1; i<j; i++)
+ {
+ if(((val2[i] = BN_CTX_get(ctx)) == NULL) ||
+ !BN_mod_mul_montgomery(val2[i],val2[i-1],
+ d,mont,ctx))
+ goto err;
+ }
+ }
+
+
+ /* Now compute the power product, using independent windows. */
+ r_is_one=1;
+ wvalue1=0; /* The 'value' of the first window */
+ wvalue2=0; /* The 'value' of the second window */
+ wpos1=0; /* If wvalue1 > 0, the bottom bit of the first window */
+ wpos2=0; /* If wvalue2 > 0, the bottom bit of the second window */
+
+ if (!BN_to_montgomery(r,BN_value_one(),mont,ctx)) goto err;
+ for (b=bits-1; b>=0; b--)
+ {
+ if (!r_is_one)
+ {
+ if (!BN_mod_mul_montgomery(r,r,r,mont,ctx))
+ goto err;
+ }
+
+ if (!wvalue1)
+ if (BN_is_bit_set(p1, b))
+ {
+ /* consider bits b-window1+1 .. b for this window */
+ i = b-window1+1;
+ while (!BN_is_bit_set(p1, i)) /* works for i<0 */
+ i++;
+ wpos1 = i;
+ wvalue1 = 1;
+ for (i = b-1; i >= wpos1; i--)
+ {
+ wvalue1 <<= 1;
+ if (BN_is_bit_set(p1, i))
+ wvalue1++;
+ }
+ }
+
+ if (!wvalue2)
+ if (BN_is_bit_set(p2, b))
+ {
+ /* consider bits b-window2+1 .. b for this window */
+ i = b-window2+1;
+ while (!BN_is_bit_set(p2, i))
+ i++;
+ wpos2 = i;
+ wvalue2 = 1;
+ for (i = b-1; i >= wpos2; i--)
+ {
+ wvalue2 <<= 1;
+ if (BN_is_bit_set(p2, i))
+ wvalue2++;
+ }
+ }
+
+ if (wvalue1 && b == wpos1)
+ {
+ /* wvalue1 is odd and < 2^window1 */
+ if (!BN_mod_mul_montgomery(r,r,val1[wvalue1>>1],mont,ctx))
+ goto err;
+ wvalue1 = 0;
+ r_is_one = 0;
+ }
+
+ if (wvalue2 && b == wpos2)
+ {
+ /* wvalue2 is odd and < 2^window2 */
+ if (!BN_mod_mul_montgomery(r,r,val2[wvalue2>>1],mont,ctx))
+ goto err;
+ wvalue2 = 0;
+ r_is_one = 0;
+ }
+ }
+ if (!BN_from_montgomery(rr,r,mont,ctx))
+ goto err;
+ ret=1;
+err:
+ if ((in_mont == NULL) && (mont != NULL)) BN_MONT_CTX_free(mont);
+ BN_CTX_end(ctx);
+ bn_check_top(rr);
+ return(ret);
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_gcd.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_gcd.c
new file mode 100644
index 00000000..a808f531
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_gcd.c
@@ -0,0 +1,655 @@
+/* crypto/bn/bn_gcd.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+/* ====================================================================
+ * Copyright (c) 1998-2001 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+static BIGNUM *euclid(BIGNUM *a, BIGNUM *b);
+
+int BN_gcd(BIGNUM *r, const BIGNUM *in_a, const BIGNUM *in_b, BN_CTX *ctx)
+ {
+ BIGNUM *a,*b,*t;
+ int ret=0;
+
+ bn_check_top(in_a);
+ bn_check_top(in_b);
+
+ BN_CTX_start(ctx);
+ a = BN_CTX_get(ctx);
+ b = BN_CTX_get(ctx);
+ if (a == NULL || b == NULL) goto err;
+
+ if (BN_copy(a,in_a) == NULL) goto err;
+ if (BN_copy(b,in_b) == NULL) goto err;
+ a->neg = 0;
+ b->neg = 0;
+
+ if (BN_cmp(a,b) < 0) { t=a; a=b; b=t; }
+ t=euclid(a,b);
+ if (t == NULL) goto err;
+
+ if (BN_copy(r,t) == NULL) goto err;
+ ret=1;
+err:
+ BN_CTX_end(ctx);
+ bn_check_top(r);
+ return(ret);
+ }
+
+static BIGNUM *euclid(BIGNUM *a, BIGNUM *b)
+ {
+ BIGNUM *t;
+ int shifts=0;
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ /* 0 <= b <= a */
+ while (!BN_is_zero(b))
+ {
+ /* 0 < b <= a */
+
+ if (BN_is_odd(a))
+ {
+ if (BN_is_odd(b))
+ {
+ if (!BN_sub(a,a,b)) goto err;
+ if (!BN_rshift1(a,a)) goto err;
+ if (BN_cmp(a,b) < 0)
+ { t=a; a=b; b=t; }
+ }
+ else /* a odd - b even */
+ {
+ if (!BN_rshift1(b,b)) goto err;
+ if (BN_cmp(a,b) < 0)
+ { t=a; a=b; b=t; }
+ }
+ }
+ else /* a is even */
+ {
+ if (BN_is_odd(b))
+ {
+ if (!BN_rshift1(a,a)) goto err;
+ if (BN_cmp(a,b) < 0)
+ { t=a; a=b; b=t; }
+ }
+ else /* a even - b even */
+ {
+ if (!BN_rshift1(a,a)) goto err;
+ if (!BN_rshift1(b,b)) goto err;
+ shifts++;
+ }
+ }
+ /* 0 <= b <= a */
+ }
+
+ if (shifts)
+ {
+ if (!BN_lshift(a,a,shifts)) goto err;
+ }
+ bn_check_top(a);
+ return(a);
+err:
+ return(NULL);
+ }
+
+
+/* solves ax == 1 (mod n) */
+static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
+ const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx);
+
+BIGNUM *BN_mod_inverse(BIGNUM *in,
+ const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx)
+ {
+ BIGNUM *A,*B,*X,*Y,*M,*D,*T,*R=NULL;
+ BIGNUM *ret=NULL;
+ int sign;
+
+ if ((BN_get_flags(a, BN_FLG_CONSTTIME) != 0) || (BN_get_flags(n, BN_FLG_CONSTTIME) != 0))
+ {
+ return BN_mod_inverse_no_branch(in, a, n, ctx);
+ }
+
+ bn_check_top(a);
+ bn_check_top(n);
+
+ BN_CTX_start(ctx);
+ A = BN_CTX_get(ctx);
+ B = BN_CTX_get(ctx);
+ X = BN_CTX_get(ctx);
+ D = BN_CTX_get(ctx);
+ M = BN_CTX_get(ctx);
+ Y = BN_CTX_get(ctx);
+ T = BN_CTX_get(ctx);
+ if (T == NULL) goto err;
+
+ if (in == NULL)
+ R=BN_new();
+ else
+ R=in;
+ if (R == NULL) goto err;
+
+ BN_one(X);
+ BN_zero(Y);
+ if (BN_copy(B,a) == NULL) goto err;
+ if (BN_copy(A,n) == NULL) goto err;
+ A->neg = 0;
+ if (B->neg || (BN_ucmp(B, A) >= 0))
+ {
+ if (!BN_nnmod(B, B, A, ctx)) goto err;
+ }
+ sign = -1;
+ /* From B = a mod |n|, A = |n| it follows that
+ *
+ * 0 <= B < A,
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ */
+
+ if (BN_is_odd(n) && (BN_num_bits(n) <= (BN_BITS <= 32 ? 450 : 2048)))
+ {
+ /* Binary inversion algorithm; requires odd modulus.
+ * This is faster than the general algorithm if the modulus
+ * is sufficiently small (about 400 .. 500 bits on 32-bit
+ * sytems, but much more on 64-bit systems) */
+ int shift;
+
+ while (!BN_is_zero(B))
+ {
+ /*
+ * 0 < B < |n|,
+ * 0 < A <= |n|,
+ * (1) -sign*X*a == B (mod |n|),
+ * (2) sign*Y*a == A (mod |n|)
+ */
+
+ /* Now divide B by the maximum possible power of two in the integers,
+ * and divide X by the same value mod |n|.
+ * When we're done, (1) still holds. */
+ shift = 0;
+ while (!BN_is_bit_set(B, shift)) /* note that 0 < B */
+ {
+ shift++;
+
+ if (BN_is_odd(X))
+ {
+ if (!BN_uadd(X, X, n)) goto err;
+ }
+ /* now X is even, so we can easily divide it by two */
+ if (!BN_rshift1(X, X)) goto err;
+ }
+ if (shift > 0)
+ {
+ if (!BN_rshift(B, B, shift)) goto err;
+ }
+
+
+ /* Same for A and Y. Afterwards, (2) still holds. */
+ shift = 0;
+ while (!BN_is_bit_set(A, shift)) /* note that 0 < A */
+ {
+ shift++;
+
+ if (BN_is_odd(Y))
+ {
+ if (!BN_uadd(Y, Y, n)) goto err;
+ }
+ /* now Y is even */
+ if (!BN_rshift1(Y, Y)) goto err;
+ }
+ if (shift > 0)
+ {
+ if (!BN_rshift(A, A, shift)) goto err;
+ }
+
+
+ /* We still have (1) and (2).
+ * Both A and B are odd.
+ * The following computations ensure that
+ *
+ * 0 <= B < |n|,
+ * 0 < A < |n|,
+ * (1) -sign*X*a == B (mod |n|),
+ * (2) sign*Y*a == A (mod |n|),
+ *
+ * and that either A or B is even in the next iteration.
+ */
+ if (BN_ucmp(B, A) >= 0)
+ {
+ /* -sign*(X + Y)*a == B - A (mod |n|) */
+ if (!BN_uadd(X, X, Y)) goto err;
+ /* NB: we could use BN_mod_add_quick(X, X, Y, n), but that
+ * actually makes the algorithm slower */
+ if (!BN_usub(B, B, A)) goto err;
+ }
+ else
+ {
+ /* sign*(X + Y)*a == A - B (mod |n|) */
+ if (!BN_uadd(Y, Y, X)) goto err;
+ /* as above, BN_mod_add_quick(Y, Y, X, n) would slow things down */
+ if (!BN_usub(A, A, B)) goto err;
+ }
+ }
+ }
+ else
+ {
+ /* general inversion algorithm */
+
+ while (!BN_is_zero(B))
+ {
+ BIGNUM *tmp;
+
+ /*
+ * 0 < B < A,
+ * (*) -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|)
+ */
+
+ /* (D, M) := (A/B, A%B) ... */
+ if (BN_num_bits(A) == BN_num_bits(B))
+ {
+ if (!BN_one(D)) goto err;
+ if (!BN_sub(M,A,B)) goto err;
+ }
+ else if (BN_num_bits(A) == BN_num_bits(B) + 1)
+ {
+ /* A/B is 1, 2, or 3 */
+ if (!BN_lshift1(T,B)) goto err;
+ if (BN_ucmp(A,T) < 0)
+ {
+ /* A < 2*B, so D=1 */
+ if (!BN_one(D)) goto err;
+ if (!BN_sub(M,A,B)) goto err;
+ }
+ else
+ {
+ /* A >= 2*B, so D=2 or D=3 */
+ if (!BN_sub(M,A,T)) goto err;
+ if (!BN_add(D,T,B)) goto err; /* use D (:= 3*B) as temp */
+ if (BN_ucmp(A,D) < 0)
+ {
+ /* A < 3*B, so D=2 */
+ if (!BN_set_word(D,2)) goto err;
+ /* M (= A - 2*B) already has the correct value */
+ }
+ else
+ {
+ /* only D=3 remains */
+ if (!BN_set_word(D,3)) goto err;
+ /* currently M = A - 2*B, but we need M = A - 3*B */
+ if (!BN_sub(M,M,B)) goto err;
+ }
+ }
+ }
+ else
+ {
+ if (!BN_div(D,M,A,B,ctx)) goto err;
+ }
+
+ /* Now
+ * A = D*B + M;
+ * thus we have
+ * (**) sign*Y*a == D*B + M (mod |n|).
+ */
+
+ tmp=A; /* keep the BIGNUM object, the value does not matter */
+
+ /* (A, B) := (B, A mod B) ... */
+ A=B;
+ B=M;
+ /* ... so we have 0 <= B < A again */
+
+ /* Since the former M is now B and the former B is now A,
+ * (**) translates into
+ * sign*Y*a == D*A + B (mod |n|),
+ * i.e.
+ * sign*Y*a - D*A == B (mod |n|).
+ * Similarly, (*) translates into
+ * -sign*X*a == A (mod |n|).
+ *
+ * Thus,
+ * sign*Y*a + D*sign*X*a == B (mod |n|),
+ * i.e.
+ * sign*(Y + D*X)*a == B (mod |n|).
+ *
+ * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ * Note that X and Y stay non-negative all the time.
+ */
+
+ /* most of the time D is very small, so we can optimize tmp := D*X+Y */
+ if (BN_is_one(D))
+ {
+ if (!BN_add(tmp,X,Y)) goto err;
+ }
+ else
+ {
+ if (BN_is_word(D,2))
+ {
+ if (!BN_lshift1(tmp,X)) goto err;
+ }
+ else if (BN_is_word(D,4))
+ {
+ if (!BN_lshift(tmp,X,2)) goto err;
+ }
+ else if (D->top == 1)
+ {
+ if (!BN_copy(tmp,X)) goto err;
+ if (!BN_mul_word(tmp,D->d[0])) goto err;
+ }
+ else
+ {
+ if (!BN_mul(tmp,D,X,ctx)) goto err;
+ }
+ if (!BN_add(tmp,tmp,Y)) goto err;
+ }
+
+ M=Y; /* keep the BIGNUM object, the value does not matter */
+ Y=X;
+ X=tmp;
+ sign = -sign;
+ }
+ }
+
+ /*
+ * The while loop (Euclid's algorithm) ends when
+ * A == gcd(a,n);
+ * we have
+ * sign*Y*a == A (mod |n|),
+ * where Y is non-negative.
+ */
+
+ if (sign < 0)
+ {
+ if (!BN_sub(Y,n,Y)) goto err;
+ }
+ /* Now Y*a == A (mod |n|). */
+
+
+ if (BN_is_one(A))
+ {
+ /* Y*a == 1 (mod |n|) */
+ if (!Y->neg && BN_ucmp(Y,n) < 0)
+ {
+ if (!BN_copy(R,Y)) goto err;
+ }
+ else
+ {
+ if (!BN_nnmod(R,Y,n,ctx)) goto err;
+ }
+ }
+ else
+ {
+ BNerr(BN_F_BN_MOD_INVERSE,BN_R_NO_INVERSE);
+ goto err;
+ }
+ ret=R;
+err:
+ if ((ret == NULL) && (in == NULL)) BN_free(R);
+ BN_CTX_end(ctx);
+ bn_check_top(ret);
+ return(ret);
+ }
+
+
+/* BN_mod_inverse_no_branch is a special version of BN_mod_inverse.
+ * It does not contain branches that may leak sensitive information.
+ */
+static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
+ const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx)
+ {
+ BIGNUM *A,*B,*X,*Y,*M,*D,*T,*R=NULL;
+ BIGNUM local_A, local_B;
+ BIGNUM *pA, *pB;
+ BIGNUM *ret=NULL;
+ int sign;
+
+ bn_check_top(a);
+ bn_check_top(n);
+
+ BN_CTX_start(ctx);
+ A = BN_CTX_get(ctx);
+ B = BN_CTX_get(ctx);
+ X = BN_CTX_get(ctx);
+ D = BN_CTX_get(ctx);
+ M = BN_CTX_get(ctx);
+ Y = BN_CTX_get(ctx);
+ T = BN_CTX_get(ctx);
+ if (T == NULL) goto err;
+
+ if (in == NULL)
+ R=BN_new();
+ else
+ R=in;
+ if (R == NULL) goto err;
+
+ BN_one(X);
+ BN_zero(Y);
+ if (BN_copy(B,a) == NULL) goto err;
+ if (BN_copy(A,n) == NULL) goto err;
+ A->neg = 0;
+
+ if (B->neg || (BN_ucmp(B, A) >= 0))
+ {
+ /* Turn BN_FLG_CONSTTIME flag on, so that when BN_div is invoked,
+ * BN_div_no_branch will be called eventually.
+ */
+ pB = &local_B;
+ BN_with_flags(pB, B, BN_FLG_CONSTTIME);
+ if (!BN_nnmod(B, pB, A, ctx)) goto err;
+ }
+ sign = -1;
+ /* From B = a mod |n|, A = |n| it follows that
+ *
+ * 0 <= B < A,
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ */
+
+ while (!BN_is_zero(B))
+ {
+ BIGNUM *tmp;
+
+ /*
+ * 0 < B < A,
+ * (*) -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|)
+ */
+
+ /* Turn BN_FLG_CONSTTIME flag on, so that when BN_div is invoked,
+ * BN_div_no_branch will be called eventually.
+ */
+ pA = &local_A;
+ BN_with_flags(pA, A, BN_FLG_CONSTTIME);
+
+ /* (D, M) := (A/B, A%B) ... */
+ if (!BN_div(D,M,pA,B,ctx)) goto err;
+
+ /* Now
+ * A = D*B + M;
+ * thus we have
+ * (**) sign*Y*a == D*B + M (mod |n|).
+ */
+
+ tmp=A; /* keep the BIGNUM object, the value does not matter */
+
+ /* (A, B) := (B, A mod B) ... */
+ A=B;
+ B=M;
+ /* ... so we have 0 <= B < A again */
+
+ /* Since the former M is now B and the former B is now A,
+ * (**) translates into
+ * sign*Y*a == D*A + B (mod |n|),
+ * i.e.
+ * sign*Y*a - D*A == B (mod |n|).
+ * Similarly, (*) translates into
+ * -sign*X*a == A (mod |n|).
+ *
+ * Thus,
+ * sign*Y*a + D*sign*X*a == B (mod |n|),
+ * i.e.
+ * sign*(Y + D*X)*a == B (mod |n|).
+ *
+ * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ * Note that X and Y stay non-negative all the time.
+ */
+
+ if (!BN_mul(tmp,D,X,ctx)) goto err;
+ if (!BN_add(tmp,tmp,Y)) goto err;
+
+ M=Y; /* keep the BIGNUM object, the value does not matter */
+ Y=X;
+ X=tmp;
+ sign = -sign;
+ }
+
+ /*
+ * The while loop (Euclid's algorithm) ends when
+ * A == gcd(a,n);
+ * we have
+ * sign*Y*a == A (mod |n|),
+ * where Y is non-negative.
+ */
+
+ if (sign < 0)
+ {
+ if (!BN_sub(Y,n,Y)) goto err;
+ }
+ /* Now Y*a == A (mod |n|). */
+
+ if (BN_is_one(A))
+ {
+ /* Y*a == 1 (mod |n|) */
+ if (!Y->neg && BN_ucmp(Y,n) < 0)
+ {
+ if (!BN_copy(R,Y)) goto err;
+ }
+ else
+ {
+ if (!BN_nnmod(R,Y,n,ctx)) goto err;
+ }
+ }
+ else
+ {
+ BNerr(BN_F_BN_MOD_INVERSE_NO_BRANCH,BN_R_NO_INVERSE);
+ goto err;
+ }
+ ret=R;
+err:
+ if ((ret == NULL) && (in == NULL)) BN_free(R);
+ BN_CTX_end(ctx);
+ bn_check_top(ret);
+ return(ret);
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_gf2m.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_gf2m.c
new file mode 100644
index 00000000..8a4dc20a
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_gf2m.c
@@ -0,0 +1,1113 @@
+/* crypto/bn/bn_gf2m.c */
+/* ====================================================================
+ * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
+ *
+ * The Elliptic Curve Public-Key Crypto Library (ECC Code) included
+ * herein is developed by SUN MICROSYSTEMS, INC., and is contributed
+ * to the OpenSSL project.
+ *
+ * The ECC Code is licensed pursuant to the OpenSSL open source
+ * license provided below.
+ *
+ * In addition, Sun covenants to all licensees who provide a reciprocal
+ * covenant with respect to their own patents if any, not to sue under
+ * current and future patent claims necessarily infringed by the making,
+ * using, practicing, selling, offering for sale and/or otherwise
+ * disposing of the ECC Code as delivered hereunder (or portions thereof),
+ * provided that such covenant shall not apply:
+ * 1) for code that a licensee deletes from the ECC Code;
+ * 2) separates from the ECC Code; or
+ * 3) for infringements caused by:
+ * i) the modification of the ECC Code or
+ * ii) the combination of the ECC Code with other software or
+ * devices where such combination causes the infringement.
+ *
+ * The software is originally written by Sheueling Chang Shantz and
+ * Douglas Stebila of Sun Microsystems Laboratories.
+ *
+ */
+
+/* NOTE: This file is licensed pursuant to the OpenSSL license below
+ * and may be modified; but after modifications, the above covenant
+ * may no longer apply! In such cases, the corresponding paragraph
+ * ["In addition, Sun covenants ... causes the infringement."] and
+ * this note can be edited out; but please keep the Sun copyright
+ * notice and attribution. */
+
+/* ====================================================================
+ * Copyright (c) 1998-2002 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include <stdio.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+#ifndef OPENSSL_NO_EC2M
+
+/* Maximum number of iterations before BN_GF2m_mod_solve_quad_arr should fail. */
+#define MAX_ITERATIONS 50
+
+static const BN_ULONG SQR_tb[16] =
+ { 0, 1, 4, 5, 16, 17, 20, 21,
+ 64, 65, 68, 69, 80, 81, 84, 85 };
+/* Platform-specific macros to accelerate squaring. */
+#if defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)
+#define SQR1(w) \
+ SQR_tb[(w) >> 60 & 0xF] << 56 | SQR_tb[(w) >> 56 & 0xF] << 48 | \
+ SQR_tb[(w) >> 52 & 0xF] << 40 | SQR_tb[(w) >> 48 & 0xF] << 32 | \
+ SQR_tb[(w) >> 44 & 0xF] << 24 | SQR_tb[(w) >> 40 & 0xF] << 16 | \
+ SQR_tb[(w) >> 36 & 0xF] << 8 | SQR_tb[(w) >> 32 & 0xF]
+#define SQR0(w) \
+ SQR_tb[(w) >> 28 & 0xF] << 56 | SQR_tb[(w) >> 24 & 0xF] << 48 | \
+ SQR_tb[(w) >> 20 & 0xF] << 40 | SQR_tb[(w) >> 16 & 0xF] << 32 | \
+ SQR_tb[(w) >> 12 & 0xF] << 24 | SQR_tb[(w) >> 8 & 0xF] << 16 | \
+ SQR_tb[(w) >> 4 & 0xF] << 8 | SQR_tb[(w) & 0xF]
+#endif
+#ifdef THIRTY_TWO_BIT
+#define SQR1(w) \
+ SQR_tb[(w) >> 28 & 0xF] << 24 | SQR_tb[(w) >> 24 & 0xF] << 16 | \
+ SQR_tb[(w) >> 20 & 0xF] << 8 | SQR_tb[(w) >> 16 & 0xF]
+#define SQR0(w) \
+ SQR_tb[(w) >> 12 & 0xF] << 24 | SQR_tb[(w) >> 8 & 0xF] << 16 | \
+ SQR_tb[(w) >> 4 & 0xF] << 8 | SQR_tb[(w) & 0xF]
+#endif
+
+#if !defined(OPENSSL_BN_ASM_GF2m)
+/* Product of two polynomials a, b each with degree < BN_BITS2 - 1,
+ * result is a polynomial r with degree < 2 * BN_BITS - 1
+ * The caller MUST ensure that the variables have the right amount
+ * of space allocated.
+ */
+#ifdef THIRTY_TWO_BIT
+static void bn_GF2m_mul_1x1(BN_ULONG *r1, BN_ULONG *r0, const BN_ULONG a, const BN_ULONG b)
+ {
+ register BN_ULONG h, l, s;
+ BN_ULONG tab[8], top2b = a >> 30;
+ register BN_ULONG a1, a2, a4;
+
+ a1 = a & (0x3FFFFFFF); a2 = a1 << 1; a4 = a2 << 1;
+
+ tab[0] = 0; tab[1] = a1; tab[2] = a2; tab[3] = a1^a2;
+ tab[4] = a4; tab[5] = a1^a4; tab[6] = a2^a4; tab[7] = a1^a2^a4;
+
+ s = tab[b & 0x7]; l = s;
+ s = tab[b >> 3 & 0x7]; l ^= s << 3; h = s >> 29;
+ s = tab[b >> 6 & 0x7]; l ^= s << 6; h ^= s >> 26;
+ s = tab[b >> 9 & 0x7]; l ^= s << 9; h ^= s >> 23;
+ s = tab[b >> 12 & 0x7]; l ^= s << 12; h ^= s >> 20;
+ s = tab[b >> 15 & 0x7]; l ^= s << 15; h ^= s >> 17;
+ s = tab[b >> 18 & 0x7]; l ^= s << 18; h ^= s >> 14;
+ s = tab[b >> 21 & 0x7]; l ^= s << 21; h ^= s >> 11;
+ s = tab[b >> 24 & 0x7]; l ^= s << 24; h ^= s >> 8;
+ s = tab[b >> 27 & 0x7]; l ^= s << 27; h ^= s >> 5;
+ s = tab[b >> 30 ]; l ^= s << 30; h ^= s >> 2;
+
+ /* compensate for the top two bits of a */
+
+ if (top2b & 01) { l ^= b << 30; h ^= b >> 2; }
+ if (top2b & 02) { l ^= b << 31; h ^= b >> 1; }
+
+ *r1 = h; *r0 = l;
+ }
+#endif
+#if defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)
+static void bn_GF2m_mul_1x1(BN_ULONG *r1, BN_ULONG *r0, const BN_ULONG a, const BN_ULONG b)
+ {
+ register BN_ULONG h, l, s;
+ BN_ULONG tab[16], top3b = a >> 61;
+ register BN_ULONG a1, a2, a4, a8;
+
+ a1 = a & (0x1FFFFFFFFFFFFFFFULL); a2 = a1 << 1; a4 = a2 << 1; a8 = a4 << 1;
+
+ tab[ 0] = 0; tab[ 1] = a1; tab[ 2] = a2; tab[ 3] = a1^a2;
+ tab[ 4] = a4; tab[ 5] = a1^a4; tab[ 6] = a2^a4; tab[ 7] = a1^a2^a4;
+ tab[ 8] = a8; tab[ 9] = a1^a8; tab[10] = a2^a8; tab[11] = a1^a2^a8;
+ tab[12] = a4^a8; tab[13] = a1^a4^a8; tab[14] = a2^a4^a8; tab[15] = a1^a2^a4^a8;
+
+ s = tab[b & 0xF]; l = s;
+ s = tab[b >> 4 & 0xF]; l ^= s << 4; h = s >> 60;
+ s = tab[b >> 8 & 0xF]; l ^= s << 8; h ^= s >> 56;
+ s = tab[b >> 12 & 0xF]; l ^= s << 12; h ^= s >> 52;
+ s = tab[b >> 16 & 0xF]; l ^= s << 16; h ^= s >> 48;
+ s = tab[b >> 20 & 0xF]; l ^= s << 20; h ^= s >> 44;
+ s = tab[b >> 24 & 0xF]; l ^= s << 24; h ^= s >> 40;
+ s = tab[b >> 28 & 0xF]; l ^= s << 28; h ^= s >> 36;
+ s = tab[b >> 32 & 0xF]; l ^= s << 32; h ^= s >> 32;
+ s = tab[b >> 36 & 0xF]; l ^= s << 36; h ^= s >> 28;
+ s = tab[b >> 40 & 0xF]; l ^= s << 40; h ^= s >> 24;
+ s = tab[b >> 44 & 0xF]; l ^= s << 44; h ^= s >> 20;
+ s = tab[b >> 48 & 0xF]; l ^= s << 48; h ^= s >> 16;
+ s = tab[b >> 52 & 0xF]; l ^= s << 52; h ^= s >> 12;
+ s = tab[b >> 56 & 0xF]; l ^= s << 56; h ^= s >> 8;
+ s = tab[b >> 60 ]; l ^= s << 60; h ^= s >> 4;
+
+ /* compensate for the top three bits of a */
+
+ if (top3b & 01) { l ^= b << 61; h ^= b >> 3; }
+ if (top3b & 02) { l ^= b << 62; h ^= b >> 2; }
+ if (top3b & 04) { l ^= b << 63; h ^= b >> 1; }
+
+ *r1 = h; *r0 = l;
+ }
+#endif
+
+/* Product of two polynomials a, b each with degree < 2 * BN_BITS2 - 1,
+ * result is a polynomial r with degree < 4 * BN_BITS2 - 1
+ * The caller MUST ensure that the variables have the right amount
+ * of space allocated.
+ */
+static void bn_GF2m_mul_2x2(BN_ULONG *r, const BN_ULONG a1, const BN_ULONG a0, const BN_ULONG b1, const BN_ULONG b0)
+ {
+ BN_ULONG m1, m0;
+ /* r[3] = h1, r[2] = h0; r[1] = l1; r[0] = l0 */
+ bn_GF2m_mul_1x1(r+3, r+2, a1, b1);
+ bn_GF2m_mul_1x1(r+1, r, a0, b0);
+ bn_GF2m_mul_1x1(&m1, &m0, a0 ^ a1, b0 ^ b1);
+ /* Correction on m1 ^= l1 ^ h1; m0 ^= l0 ^ h0; */
+ r[2] ^= m1 ^ r[1] ^ r[3]; /* h0 ^= m1 ^ l1 ^ h1; */
+ r[1] = r[3] ^ r[2] ^ r[0] ^ m1 ^ m0; /* l1 ^= l0 ^ h0 ^ m0; */
+ }
+#else
+void bn_GF2m_mul_2x2(BN_ULONG *r, BN_ULONG a1, BN_ULONG a0, BN_ULONG b1, BN_ULONG b0);
+#endif
+
+/* Add polynomials a and b and store result in r; r could be a or b, a and b
+ * could be equal; r is the bitwise XOR of a and b.
+ */
+int BN_GF2m_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b)
+ {
+ int i;
+ const BIGNUM *at, *bt;
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ if (a->top < b->top) { at = b; bt = a; }
+ else { at = a; bt = b; }
+
+ if(bn_wexpand(r, at->top) == NULL)
+ return 0;
+
+ for (i = 0; i < bt->top; i++)
+ {
+ r->d[i] = at->d[i] ^ bt->d[i];
+ }
+ for (; i < at->top; i++)
+ {
+ r->d[i] = at->d[i];
+ }
+
+ r->top = at->top;
+ bn_correct_top(r);
+
+ return 1;
+ }
+
+
+/* Some functions allow for representation of the irreducible polynomials
+ * as an int[], say p. The irreducible f(t) is then of the form:
+ * t^p[0] + t^p[1] + ... + t^p[k]
+ * where m = p[0] > p[1] > ... > p[k] = 0.
+ */
+
+
+/* Performs modular reduction of a and store result in r. r could be a. */
+int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const int p[])
+ {
+ int j, k;
+ int n, dN, d0, d1;
+ BN_ULONG zz, *z;
+
+ bn_check_top(a);
+
+ if (!p[0])
+ {
+ /* reduction mod 1 => return 0 */
+ BN_zero(r);
+ return 1;
+ }
+
+ /* Since the algorithm does reduction in the r value, if a != r, copy
+ * the contents of a into r so we can do reduction in r.
+ */
+ if (a != r)
+ {
+ if (!bn_wexpand(r, a->top)) return 0;
+ for (j = 0; j < a->top; j++)
+ {
+ r->d[j] = a->d[j];
+ }
+ r->top = a->top;
+ }
+ z = r->d;
+
+ /* start reduction */
+ dN = p[0] / BN_BITS2;
+ for (j = r->top - 1; j > dN;)
+ {
+ zz = z[j];
+ if (z[j] == 0) { j--; continue; }
+ z[j] = 0;
+
+ for (k = 1; p[k] != 0; k++)
+ {
+ /* reducing component t^p[k] */
+ n = p[0] - p[k];
+ d0 = n % BN_BITS2; d1 = BN_BITS2 - d0;
+ n /= BN_BITS2;
+ z[j-n] ^= (zz>>d0);
+ if (d0) z[j-n-1] ^= (zz<<d1);
+ }
+
+ /* reducing component t^0 */
+ n = dN;
+ d0 = p[0] % BN_BITS2;
+ d1 = BN_BITS2 - d0;
+ z[j-n] ^= (zz >> d0);
+ if (d0) z[j-n-1] ^= (zz << d1);
+ }
+
+ /* final round of reduction */
+ while (j == dN)
+ {
+
+ d0 = p[0] % BN_BITS2;
+ zz = z[dN] >> d0;
+ if (zz == 0) break;
+ d1 = BN_BITS2 - d0;
+
+ /* clear up the top d1 bits */
+ if (d0)
+ z[dN] = (z[dN] << d1) >> d1;
+ else
+ z[dN] = 0;
+ z[0] ^= zz; /* reduction t^0 component */
+
+ for (k = 1; p[k] != 0; k++)
+ {
+ BN_ULONG tmp_ulong;
+
+ /* reducing component t^p[k]*/
+ n = p[k] / BN_BITS2;
+ d0 = p[k] % BN_BITS2;
+ d1 = BN_BITS2 - d0;
+ z[n] ^= (zz << d0);
+ tmp_ulong = zz >> d1;
+ if (d0 && tmp_ulong)
+ z[n+1] ^= tmp_ulong;
+ }
+
+
+ }
+
+ bn_correct_top(r);
+ return 1;
+ }
+
+/* Performs modular reduction of a by p and store result in r. r could be a.
+ *
+ * This function calls down to the BN_GF2m_mod_arr implementation; this wrapper
+ * function is only provided for convenience; for best performance, use the
+ * BN_GF2m_mod_arr function.
+ */
+int BN_GF2m_mod(BIGNUM *r, const BIGNUM *a, const BIGNUM *p)
+ {
+ int ret = 0;
+ int arr[6];
+ bn_check_top(a);
+ bn_check_top(p);
+ ret = BN_GF2m_poly2arr(p, arr, sizeof(arr)/sizeof(arr[0]));
+ if (!ret || ret > (int)(sizeof(arr)/sizeof(arr[0])))
+ {
+ BNerr(BN_F_BN_GF2M_MOD,BN_R_INVALID_LENGTH);
+ return 0;
+ }
+ ret = BN_GF2m_mod_arr(r, a, arr);
+ bn_check_top(r);
+ return ret;
+ }
+
+
+/* Compute the product of two polynomials a and b, reduce modulo p, and store
+ * the result in r. r could be a or b; a could be b.
+ */
+int BN_GF2m_mod_mul_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const int p[], BN_CTX *ctx)
+ {
+ int zlen, i, j, k, ret = 0;
+ BIGNUM *s;
+ BN_ULONG x1, x0, y1, y0, zz[4];
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ if (a == b)
+ {
+ return BN_GF2m_mod_sqr_arr(r, a, p, ctx);
+ }
+
+ BN_CTX_start(ctx);
+ if ((s = BN_CTX_get(ctx)) == NULL) goto err;
+
+ zlen = a->top + b->top + 4;
+ if (!bn_wexpand(s, zlen)) goto err;
+ s->top = zlen;
+
+ for (i = 0; i < zlen; i++) s->d[i] = 0;
+
+ for (j = 0; j < b->top; j += 2)
+ {
+ y0 = b->d[j];
+ y1 = ((j+1) == b->top) ? 0 : b->d[j+1];
+ for (i = 0; i < a->top; i += 2)
+ {
+ x0 = a->d[i];
+ x1 = ((i+1) == a->top) ? 0 : a->d[i+1];
+ bn_GF2m_mul_2x2(zz, x1, x0, y1, y0);
+ for (k = 0; k < 4; k++) s->d[i+j+k] ^= zz[k];
+ }
+ }
+
+ bn_correct_top(s);
+ if (BN_GF2m_mod_arr(r, s, p))
+ ret = 1;
+ bn_check_top(r);
+
+err:
+ BN_CTX_end(ctx);
+ return ret;
+ }
+
+/* Compute the product of two polynomials a and b, reduce modulo p, and store
+ * the result in r. r could be a or b; a could equal b.
+ *
+ * This function calls down to the BN_GF2m_mod_mul_arr implementation; this wrapper
+ * function is only provided for convenience; for best performance, use the
+ * BN_GF2m_mod_mul_arr function.
+ */
+int BN_GF2m_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *p, BN_CTX *ctx)
+ {
+ int ret = 0;
+ const int max = BN_num_bits(p) + 1;
+ int *arr=NULL;
+ bn_check_top(a);
+ bn_check_top(b);
+ bn_check_top(p);
+ if ((arr = (int *)OPENSSL_malloc(sizeof(int) * max)) == NULL) goto err;
+ ret = BN_GF2m_poly2arr(p, arr, max);
+ if (!ret || ret > max)
+ {
+ BNerr(BN_F_BN_GF2M_MOD_MUL,BN_R_INVALID_LENGTH);
+ goto err;
+ }
+ ret = BN_GF2m_mod_mul_arr(r, a, b, arr, ctx);
+ bn_check_top(r);
+err:
+ if (arr) OPENSSL_free(arr);
+ return ret;
+ }
+
+
+/* Square a, reduce the result mod p, and store it in a. r could be a. */
+int BN_GF2m_mod_sqr_arr(BIGNUM *r, const BIGNUM *a, const int p[], BN_CTX *ctx)
+ {
+ int i, ret = 0;
+ BIGNUM *s;
+
+ bn_check_top(a);
+ BN_CTX_start(ctx);
+ if ((s = BN_CTX_get(ctx)) == NULL) return 0;
+ if (!bn_wexpand(s, 2 * a->top)) goto err;
+
+ for (i = a->top - 1; i >= 0; i--)
+ {
+ s->d[2*i+1] = SQR1(a->d[i]);
+ s->d[2*i ] = SQR0(a->d[i]);
+ }
+
+ s->top = 2 * a->top;
+ bn_correct_top(s);
+ if (!BN_GF2m_mod_arr(r, s, p)) goto err;
+ bn_check_top(r);
+ ret = 1;
+err:
+ BN_CTX_end(ctx);
+ return ret;
+ }
+
+/* Square a, reduce the result mod p, and store it in a. r could be a.
+ *
+ * This function calls down to the BN_GF2m_mod_sqr_arr implementation; this wrapper
+ * function is only provided for convenience; for best performance, use the
+ * BN_GF2m_mod_sqr_arr function.
+ */
+int BN_GF2m_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
+ {
+ int ret = 0;
+ const int max = BN_num_bits(p) + 1;
+ int *arr=NULL;
+
+ bn_check_top(a);
+ bn_check_top(p);
+ if ((arr = (int *)OPENSSL_malloc(sizeof(int) * max)) == NULL) goto err;
+ ret = BN_GF2m_poly2arr(p, arr, max);
+ if (!ret || ret > max)
+ {
+ BNerr(BN_F_BN_GF2M_MOD_SQR,BN_R_INVALID_LENGTH);
+ goto err;
+ }
+ ret = BN_GF2m_mod_sqr_arr(r, a, arr, ctx);
+ bn_check_top(r);
+err:
+ if (arr) OPENSSL_free(arr);
+ return ret;
+ }
+
+
+/* Invert a, reduce modulo p, and store the result in r. r could be a.
+ * Uses Modified Almost Inverse Algorithm (Algorithm 10) from
+ * Hankerson, D., Hernandez, J.L., and Menezes, A. "Software Implementation
+ * of Elliptic Curve Cryptography Over Binary Fields".
+ */
+int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
+ {
+ BIGNUM *b, *c = NULL, *u = NULL, *v = NULL, *tmp;
+ int ret = 0;
+
+ bn_check_top(a);
+ bn_check_top(p);
+
+ BN_CTX_start(ctx);
+
+ if ((b = BN_CTX_get(ctx))==NULL) goto err;
+ if ((c = BN_CTX_get(ctx))==NULL) goto err;
+ if ((u = BN_CTX_get(ctx))==NULL) goto err;
+ if ((v = BN_CTX_get(ctx))==NULL) goto err;
+
+ if (!BN_GF2m_mod(u, a, p)) goto err;
+ if (BN_is_zero(u)) goto err;
+
+ if (!BN_copy(v, p)) goto err;
+#if 0
+ if (!BN_one(b)) goto err;
+
+ while (1)
+ {
+ while (!BN_is_odd(u))
+ {
+ if (BN_is_zero(u)) goto err;
+ if (!BN_rshift1(u, u)) goto err;
+ if (BN_is_odd(b))
+ {
+ if (!BN_GF2m_add(b, b, p)) goto err;
+ }
+ if (!BN_rshift1(b, b)) goto err;
+ }
+
+ if (BN_abs_is_word(u, 1)) break;
+
+ if (BN_num_bits(u) < BN_num_bits(v))
+ {
+ tmp = u; u = v; v = tmp;
+ tmp = b; b = c; c = tmp;
+ }
+
+ if (!BN_GF2m_add(u, u, v)) goto err;
+ if (!BN_GF2m_add(b, b, c)) goto err;
+ }
+#else
+ {
+ int i, ubits = BN_num_bits(u),
+ vbits = BN_num_bits(v), /* v is copy of p */
+ top = p->top;
+ BN_ULONG *udp,*bdp,*vdp,*cdp;
+
+ bn_wexpand(u,top); udp = u->d;
+ for (i=u->top;i<top;i++) udp[i] = 0;
+ u->top = top;
+ bn_wexpand(b,top); bdp = b->d;
+ bdp[0] = 1;
+ for (i=1;i<top;i++) bdp[i] = 0;
+ b->top = top;
+ bn_wexpand(c,top); cdp = c->d;
+ for (i=0;i<top;i++) cdp[i] = 0;
+ c->top = top;
+ vdp = v->d; /* It pays off to "cache" *->d pointers, because
+ * it allows optimizer to be more aggressive.
+ * But we don't have to "cache" p->d, because *p
+ * is declared 'const'... */
+ while (1)
+ {
+ while (ubits && !(udp[0]&1))
+ {
+ BN_ULONG u0,u1,b0,b1,mask;
+
+ u0 = udp[0];
+ b0 = bdp[0];
+ mask = (BN_ULONG)0-(b0&1);
+ b0 ^= p->d[0]&mask;
+ for (i=0;i<top-1;i++)
+ {
+ u1 = udp[i+1];
+ udp[i] = ((u0>>1)|(u1<<(BN_BITS2-1)))&BN_MASK2;
+ u0 = u1;
+ b1 = bdp[i+1]^(p->d[i+1]&mask);
+ bdp[i] = ((b0>>1)|(b1<<(BN_BITS2-1)))&BN_MASK2;
+ b0 = b1;
+ }
+ udp[i] = u0>>1;
+ bdp[i] = b0>>1;
+ ubits--;
+ }
+
+ if (ubits<=BN_BITS2 && udp[0]==1) break;
+
+ if (ubits<vbits)
+ {
+ i = ubits; ubits = vbits; vbits = i;
+ tmp = u; u = v; v = tmp;
+ tmp = b; b = c; c = tmp;
+ udp = vdp; vdp = v->d;
+ bdp = cdp; cdp = c->d;
+ }
+ for(i=0;i<top;i++)
+ {
+ udp[i] ^= vdp[i];
+ bdp[i] ^= cdp[i];
+ }
+ if (ubits==vbits)
+ {
+ BN_ULONG ul;
+ int utop = (ubits-1)/BN_BITS2;
+
+ while ((ul=udp[utop])==0 && utop) utop--;
+ ubits = utop*BN_BITS2 + BN_num_bits_word(ul);
+ }
+ }
+ bn_correct_top(b);
+ }
+#endif
+
+ if (!BN_copy(r, b)) goto err;
+ bn_check_top(r);
+ ret = 1;
+
+err:
+#ifdef BN_DEBUG /* BN_CTX_end would complain about the expanded form */
+ bn_correct_top(c);
+ bn_correct_top(u);
+ bn_correct_top(v);
+#endif
+ BN_CTX_end(ctx);
+ return ret;
+ }
+
+/* Invert xx, reduce modulo p, and store the result in r. r could be xx.
+ *
+ * This function calls down to the BN_GF2m_mod_inv implementation; this wrapper
+ * function is only provided for convenience; for best performance, use the
+ * BN_GF2m_mod_inv function.
+ */
+int BN_GF2m_mod_inv_arr(BIGNUM *r, const BIGNUM *xx, const int p[], BN_CTX *ctx)
+ {
+ BIGNUM *field;
+ int ret = 0;
+
+ bn_check_top(xx);
+ BN_CTX_start(ctx);
+ if ((field = BN_CTX_get(ctx)) == NULL) goto err;
+ if (!BN_GF2m_arr2poly(p, field)) goto err;
+
+ ret = BN_GF2m_mod_inv(r, xx, field, ctx);
+ bn_check_top(r);
+
+err:
+ BN_CTX_end(ctx);
+ return ret;
+ }
+
+
+#ifndef OPENSSL_SUN_GF2M_DIV
+/* Divide y by x, reduce modulo p, and store the result in r. r could be x
+ * or y, x could equal y.
+ */
+int BN_GF2m_mod_div(BIGNUM *r, const BIGNUM *y, const BIGNUM *x, const BIGNUM *p, BN_CTX *ctx)
+ {
+ BIGNUM *xinv = NULL;
+ int ret = 0;
+
+ bn_check_top(y);
+ bn_check_top(x);
+ bn_check_top(p);
+
+ BN_CTX_start(ctx);
+ xinv = BN_CTX_get(ctx);
+ if (xinv == NULL) goto err;
+
+ if (!BN_GF2m_mod_inv(xinv, x, p, ctx)) goto err;
+ if (!BN_GF2m_mod_mul(r, y, xinv, p, ctx)) goto err;
+ bn_check_top(r);
+ ret = 1;
+
+err:
+ BN_CTX_end(ctx);
+ return ret;
+ }
+#else
+/* Divide y by x, reduce modulo p, and store the result in r. r could be x
+ * or y, x could equal y.
+ * Uses algorithm Modular_Division_GF(2^m) from
+ * Chang-Shantz, S. "From Euclid's GCD to Montgomery Multiplication to
+ * the Great Divide".
+ */
+int BN_GF2m_mod_div(BIGNUM *r, const BIGNUM *y, const BIGNUM *x, const BIGNUM *p, BN_CTX *ctx)
+ {
+ BIGNUM *a, *b, *u, *v;
+ int ret = 0;
+
+ bn_check_top(y);
+ bn_check_top(x);
+ bn_check_top(p);
+
+ BN_CTX_start(ctx);
+
+ a = BN_CTX_get(ctx);
+ b = BN_CTX_get(ctx);
+ u = BN_CTX_get(ctx);
+ v = BN_CTX_get(ctx);
+ if (v == NULL) goto err;
+
+ /* reduce x and y mod p */
+ if (!BN_GF2m_mod(u, y, p)) goto err;
+ if (!BN_GF2m_mod(a, x, p)) goto err;
+ if (!BN_copy(b, p)) goto err;
+
+ while (!BN_is_odd(a))
+ {
+ if (!BN_rshift1(a, a)) goto err;
+ if (BN_is_odd(u)) if (!BN_GF2m_add(u, u, p)) goto err;
+ if (!BN_rshift1(u, u)) goto err;
+ }
+
+ do
+ {
+ if (BN_GF2m_cmp(b, a) > 0)
+ {
+ if (!BN_GF2m_add(b, b, a)) goto err;
+ if (!BN_GF2m_add(v, v, u)) goto err;
+ do
+ {
+ if (!BN_rshift1(b, b)) goto err;
+ if (BN_is_odd(v)) if (!BN_GF2m_add(v, v, p)) goto err;
+ if (!BN_rshift1(v, v)) goto err;
+ } while (!BN_is_odd(b));
+ }
+ else if (BN_abs_is_word(a, 1))
+ break;
+ else
+ {
+ if (!BN_GF2m_add(a, a, b)) goto err;
+ if (!BN_GF2m_add(u, u, v)) goto err;
+ do
+ {
+ if (!BN_rshift1(a, a)) goto err;
+ if (BN_is_odd(u)) if (!BN_GF2m_add(u, u, p)) goto err;
+ if (!BN_rshift1(u, u)) goto err;
+ } while (!BN_is_odd(a));
+ }
+ } while (1);
+
+ if (!BN_copy(r, u)) goto err;
+ bn_check_top(r);
+ ret = 1;
+
+err:
+ BN_CTX_end(ctx);
+ return ret;
+ }
+#endif
+
+/* Divide yy by xx, reduce modulo p, and store the result in r. r could be xx
+ * or yy, xx could equal yy.
+ *
+ * This function calls down to the BN_GF2m_mod_div implementation; this wrapper
+ * function is only provided for convenience; for best performance, use the
+ * BN_GF2m_mod_div function.
+ */
+int BN_GF2m_mod_div_arr(BIGNUM *r, const BIGNUM *yy, const BIGNUM *xx, const int p[], BN_CTX *ctx)
+ {
+ BIGNUM *field;
+ int ret = 0;
+
+ bn_check_top(yy);
+ bn_check_top(xx);
+
+ BN_CTX_start(ctx);
+ if ((field = BN_CTX_get(ctx)) == NULL) goto err;
+ if (!BN_GF2m_arr2poly(p, field)) goto err;
+
+ ret = BN_GF2m_mod_div(r, yy, xx, field, ctx);
+ bn_check_top(r);
+
+err:
+ BN_CTX_end(ctx);
+ return ret;
+ }
+
+
+/* Compute the bth power of a, reduce modulo p, and store
+ * the result in r. r could be a.
+ * Uses simple square-and-multiply algorithm A.5.1 from IEEE P1363.
+ */
+int BN_GF2m_mod_exp_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const int p[], BN_CTX *ctx)
+ {
+ int ret = 0, i, n;
+ BIGNUM *u;
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ if (BN_is_zero(b))
+ return(BN_one(r));
+
+ if (BN_abs_is_word(b, 1))
+ return (BN_copy(r, a) != NULL);
+
+ BN_CTX_start(ctx);
+ if ((u = BN_CTX_get(ctx)) == NULL) goto err;
+
+ if (!BN_GF2m_mod_arr(u, a, p)) goto err;
+
+ n = BN_num_bits(b) - 1;
+ for (i = n - 1; i >= 0; i--)
+ {
+ if (!BN_GF2m_mod_sqr_arr(u, u, p, ctx)) goto err;
+ if (BN_is_bit_set(b, i))
+ {
+ if (!BN_GF2m_mod_mul_arr(u, u, a, p, ctx)) goto err;
+ }
+ }
+ if (!BN_copy(r, u)) goto err;
+ bn_check_top(r);
+ ret = 1;
+err:
+ BN_CTX_end(ctx);
+ return ret;
+ }
+
+/* Compute the bth power of a, reduce modulo p, and store
+ * the result in r. r could be a.
+ *
+ * This function calls down to the BN_GF2m_mod_exp_arr implementation; this wrapper
+ * function is only provided for convenience; for best performance, use the
+ * BN_GF2m_mod_exp_arr function.
+ */
+int BN_GF2m_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *p, BN_CTX *ctx)
+ {
+ int ret = 0;
+ const int max = BN_num_bits(p) + 1;
+ int *arr=NULL;
+ bn_check_top(a);
+ bn_check_top(b);
+ bn_check_top(p);
+ if ((arr = (int *)OPENSSL_malloc(sizeof(int) * max)) == NULL) goto err;
+ ret = BN_GF2m_poly2arr(p, arr, max);
+ if (!ret || ret > max)
+ {
+ BNerr(BN_F_BN_GF2M_MOD_EXP,BN_R_INVALID_LENGTH);
+ goto err;
+ }
+ ret = BN_GF2m_mod_exp_arr(r, a, b, arr, ctx);
+ bn_check_top(r);
+err:
+ if (arr) OPENSSL_free(arr);
+ return ret;
+ }
+
+/* Compute the square root of a, reduce modulo p, and store
+ * the result in r. r could be a.
+ * Uses exponentiation as in algorithm A.4.1 from IEEE P1363.
+ */
+int BN_GF2m_mod_sqrt_arr(BIGNUM *r, const BIGNUM *a, const int p[], BN_CTX *ctx)
+ {
+ int ret = 0;
+ BIGNUM *u;
+
+ bn_check_top(a);
+
+ if (!p[0])
+ {
+ /* reduction mod 1 => return 0 */
+ BN_zero(r);
+ return 1;
+ }
+
+ BN_CTX_start(ctx);
+ if ((u = BN_CTX_get(ctx)) == NULL) goto err;
+
+ if (!BN_set_bit(u, p[0] - 1)) goto err;
+ ret = BN_GF2m_mod_exp_arr(r, a, u, p, ctx);
+ bn_check_top(r);
+
+err:
+ BN_CTX_end(ctx);
+ return ret;
+ }
+
+/* Compute the square root of a, reduce modulo p, and store
+ * the result in r. r could be a.
+ *
+ * This function calls down to the BN_GF2m_mod_sqrt_arr implementation; this wrapper
+ * function is only provided for convenience; for best performance, use the
+ * BN_GF2m_mod_sqrt_arr function.
+ */
+int BN_GF2m_mod_sqrt(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
+ {
+ int ret = 0;
+ const int max = BN_num_bits(p) + 1;
+ int *arr=NULL;
+ bn_check_top(a);
+ bn_check_top(p);
+ if ((arr = (int *)OPENSSL_malloc(sizeof(int) * max)) == NULL) goto err;
+ ret = BN_GF2m_poly2arr(p, arr, max);
+ if (!ret || ret > max)
+ {
+ BNerr(BN_F_BN_GF2M_MOD_SQRT,BN_R_INVALID_LENGTH);
+ goto err;
+ }
+ ret = BN_GF2m_mod_sqrt_arr(r, a, arr, ctx);
+ bn_check_top(r);
+err:
+ if (arr) OPENSSL_free(arr);
+ return ret;
+ }
+
+/* Find r such that r^2 + r = a mod p. r could be a. If no r exists returns 0.
+ * Uses algorithms A.4.7 and A.4.6 from IEEE P1363.
+ */
+int BN_GF2m_mod_solve_quad_arr(BIGNUM *r, const BIGNUM *a_, const int p[], BN_CTX *ctx)
+ {
+ int ret = 0, count = 0, j;
+ BIGNUM *a, *z, *rho, *w, *w2, *tmp;
+
+ bn_check_top(a_);
+
+ if (!p[0])
+ {
+ /* reduction mod 1 => return 0 */
+ BN_zero(r);
+ return 1;
+ }
+
+ BN_CTX_start(ctx);
+ a = BN_CTX_get(ctx);
+ z = BN_CTX_get(ctx);
+ w = BN_CTX_get(ctx);
+ if (w == NULL) goto err;
+
+ if (!BN_GF2m_mod_arr(a, a_, p)) goto err;
+
+ if (BN_is_zero(a))
+ {
+ BN_zero(r);
+ ret = 1;
+ goto err;
+ }
+
+ if (p[0] & 0x1) /* m is odd */
+ {
+ /* compute half-trace of a */
+ if (!BN_copy(z, a)) goto err;
+ for (j = 1; j <= (p[0] - 1) / 2; j++)
+ {
+ if (!BN_GF2m_mod_sqr_arr(z, z, p, ctx)) goto err;
+ if (!BN_GF2m_mod_sqr_arr(z, z, p, ctx)) goto err;
+ if (!BN_GF2m_add(z, z, a)) goto err;
+ }
+
+ }
+ else /* m is even */
+ {
+ rho = BN_CTX_get(ctx);
+ w2 = BN_CTX_get(ctx);
+ tmp = BN_CTX_get(ctx);
+ if (tmp == NULL) goto err;
+ do
+ {
+ if (!BN_rand(rho, p[0], 0, 0)) goto err;
+ if (!BN_GF2m_mod_arr(rho, rho, p)) goto err;
+ BN_zero(z);
+ if (!BN_copy(w, rho)) goto err;
+ for (j = 1; j <= p[0] - 1; j++)
+ {
+ if (!BN_GF2m_mod_sqr_arr(z, z, p, ctx)) goto err;
+ if (!BN_GF2m_mod_sqr_arr(w2, w, p, ctx)) goto err;
+ if (!BN_GF2m_mod_mul_arr(tmp, w2, a, p, ctx)) goto err;
+ if (!BN_GF2m_add(z, z, tmp)) goto err;
+ if (!BN_GF2m_add(w, w2, rho)) goto err;
+ }
+ count++;
+ } while (BN_is_zero(w) && (count < MAX_ITERATIONS));
+ if (BN_is_zero(w))
+ {
+ BNerr(BN_F_BN_GF2M_MOD_SOLVE_QUAD_ARR,BN_R_TOO_MANY_ITERATIONS);
+ goto err;
+ }
+ }
+
+ if (!BN_GF2m_mod_sqr_arr(w, z, p, ctx)) goto err;
+ if (!BN_GF2m_add(w, z, w)) goto err;
+ if (BN_GF2m_cmp(w, a))
+ {
+ BNerr(BN_F_BN_GF2M_MOD_SOLVE_QUAD_ARR, BN_R_NO_SOLUTION);
+ goto err;
+ }
+
+ if (!BN_copy(r, z)) goto err;
+ bn_check_top(r);
+
+ ret = 1;
+
+err:
+ BN_CTX_end(ctx);
+ return ret;
+ }
+
+/* Find r such that r^2 + r = a mod p. r could be a. If no r exists returns 0.
+ *
+ * This function calls down to the BN_GF2m_mod_solve_quad_arr implementation; this wrapper
+ * function is only provided for convenience; for best performance, use the
+ * BN_GF2m_mod_solve_quad_arr function.
+ */
+int BN_GF2m_mod_solve_quad(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
+ {
+ int ret = 0;
+ const int max = BN_num_bits(p) + 1;
+ int *arr=NULL;
+ bn_check_top(a);
+ bn_check_top(p);
+ if ((arr = (int *)OPENSSL_malloc(sizeof(int) *
+ max)) == NULL) goto err;
+ ret = BN_GF2m_poly2arr(p, arr, max);
+ if (!ret || ret > max)
+ {
+ BNerr(BN_F_BN_GF2M_MOD_SOLVE_QUAD,BN_R_INVALID_LENGTH);
+ goto err;
+ }
+ ret = BN_GF2m_mod_solve_quad_arr(r, a, arr, ctx);
+ bn_check_top(r);
+err:
+ if (arr) OPENSSL_free(arr);
+ return ret;
+ }
+
+/* Convert the bit-string representation of a polynomial
+ * ( \sum_{i=0}^n a_i * x^i) into an array of integers corresponding
+ * to the bits with non-zero coefficient. Array is terminated with -1.
+ * Up to max elements of the array will be filled. Return value is total
+ * number of array elements that would be filled if array was large enough.
+ */
+int BN_GF2m_poly2arr(const BIGNUM *a, int p[], int max)
+ {
+ int i, j, k = 0;
+ BN_ULONG mask;
+
+ if (BN_is_zero(a))
+ return 0;
+
+ for (i = a->top - 1; i >= 0; i--)
+ {
+ if (!a->d[i])
+ /* skip word if a->d[i] == 0 */
+ continue;
+ mask = BN_TBIT;
+ for (j = BN_BITS2 - 1; j >= 0; j--)
+ {
+ if (a->d[i] & mask)
+ {
+ if (k < max) p[k] = BN_BITS2 * i + j;
+ k++;
+ }
+ mask >>= 1;
+ }
+ }
+
+ if (k < max) {
+ p[k] = -1;
+ k++;
+ }
+
+ return k;
+ }
+
+/* Convert the coefficient array representation of a polynomial to a
+ * bit-string. The array must be terminated by -1.
+ */
+int BN_GF2m_arr2poly(const int p[], BIGNUM *a)
+ {
+ int i;
+
+ bn_check_top(a);
+ BN_zero(a);
+ for (i = 0; p[i] != -1; i++)
+ {
+ if (BN_set_bit(a, p[i]) == 0)
+ return 0;
+ }
+ bn_check_top(a);
+
+ return 1;
+ }
+
+#endif
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_kron.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_kron.c
new file mode 100644
index 00000000..740359b7
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_kron.c
@@ -0,0 +1,184 @@
+/* crypto/bn/bn_kron.c */
+/* ====================================================================
+ * Copyright (c) 1998-2000 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+/* least significant word */
+#define BN_lsw(n) (((n)->top == 0) ? (BN_ULONG) 0 : (n)->d[0])
+
+/* Returns -2 for errors because both -1 and 0 are valid results. */
+int BN_kronecker(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
+ {
+ int i;
+ int ret = -2; /* avoid 'uninitialized' warning */
+ int err = 0;
+ BIGNUM *A, *B, *tmp;
+ /* In 'tab', only odd-indexed entries are relevant:
+ * For any odd BIGNUM n,
+ * tab[BN_lsw(n) & 7]
+ * is $(-1)^{(n^2-1)/8}$ (using TeX notation).
+ * Note that the sign of n does not matter.
+ */
+ static const int tab[8] = {0, 1, 0, -1, 0, -1, 0, 1};
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ BN_CTX_start(ctx);
+ A = BN_CTX_get(ctx);
+ B = BN_CTX_get(ctx);
+ if (B == NULL) goto end;
+
+ err = !BN_copy(A, a);
+ if (err) goto end;
+ err = !BN_copy(B, b);
+ if (err) goto end;
+
+ /*
+ * Kronecker symbol, imlemented according to Henri Cohen,
+ * "A Course in Computational Algebraic Number Theory"
+ * (algorithm 1.4.10).
+ */
+
+ /* Cohen's step 1: */
+
+ if (BN_is_zero(B))
+ {
+ ret = BN_abs_is_word(A, 1);
+ goto end;
+ }
+
+ /* Cohen's step 2: */
+
+ if (!BN_is_odd(A) && !BN_is_odd(B))
+ {
+ ret = 0;
+ goto end;
+ }
+
+ /* now B is non-zero */
+ i = 0;
+ while (!BN_is_bit_set(B, i))
+ i++;
+ err = !BN_rshift(B, B, i);
+ if (err) goto end;
+ if (i & 1)
+ {
+ /* i is odd */
+ /* (thus B was even, thus A must be odd!) */
+
+ /* set 'ret' to $(-1)^{(A^2-1)/8}$ */
+ ret = tab[BN_lsw(A) & 7];
+ }
+ else
+ {
+ /* i is even */
+ ret = 1;
+ }
+
+ if (B->neg)
+ {
+ B->neg = 0;
+ if (A->neg)
+ ret = -ret;
+ }
+
+ /* now B is positive and odd, so what remains to be done is
+ * to compute the Jacobi symbol (A/B) and multiply it by 'ret' */
+
+ while (1)
+ {
+ /* Cohen's step 3: */
+
+ /* B is positive and odd */
+
+ if (BN_is_zero(A))
+ {
+ ret = BN_is_one(B) ? ret : 0;
+ goto end;
+ }
+
+ /* now A is non-zero */
+ i = 0;
+ while (!BN_is_bit_set(A, i))
+ i++;
+ err = !BN_rshift(A, A, i);
+ if (err) goto end;
+ if (i & 1)
+ {
+ /* i is odd */
+ /* multiply 'ret' by $(-1)^{(B^2-1)/8}$ */
+ ret = ret * tab[BN_lsw(B) & 7];
+ }
+
+ /* Cohen's step 4: */
+ /* multiply 'ret' by $(-1)^{(A-1)(B-1)/4}$ */
+ if ((A->neg ? ~BN_lsw(A) : BN_lsw(A)) & BN_lsw(B) & 2)
+ ret = -ret;
+
+ /* (A, B) := (B mod |A|, |A|) */
+ err = !BN_nnmod(B, B, A, ctx);
+ if (err) goto end;
+ tmp = A; A = B; B = tmp;
+ tmp->neg = 0;
+ }
+end:
+ BN_CTX_end(ctx);
+ if (err)
+ return -2;
+ else
+ return ret;
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_lcl.h b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_lcl.h
new file mode 100644
index 00000000..817c773b
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_lcl.h
@@ -0,0 +1,515 @@
+/* crypto/bn/bn_lcl.h */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+/* ====================================================================
+ * Copyright (c) 1998-2000 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+#ifndef HEADER_BN_LCL_H
+#define HEADER_BN_LCL_H
+
+#include <openssl/bn.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * BN_window_bits_for_exponent_size -- macro for sliding window mod_exp functions
+ *
+ *
+ * For window size 'w' (w >= 2) and a random 'b' bits exponent,
+ * the number of multiplications is a constant plus on average
+ *
+ * 2^(w-1) + (b-w)/(w+1);
+ *
+ * here 2^(w-1) is for precomputing the table (we actually need
+ * entries only for windows that have the lowest bit set), and
+ * (b-w)/(w+1) is an approximation for the expected number of
+ * w-bit windows, not counting the first one.
+ *
+ * Thus we should use
+ *
+ * w >= 6 if b > 671
+ * w = 5 if 671 > b > 239
+ * w = 4 if 239 > b > 79
+ * w = 3 if 79 > b > 23
+ * w <= 2 if 23 > b
+ *
+ * (with draws in between). Very small exponents are often selected
+ * with low Hamming weight, so we use w = 1 for b <= 23.
+ */
+#if 1
+#define BN_window_bits_for_exponent_size(b) \
+ ((b) > 671 ? 6 : \
+ (b) > 239 ? 5 : \
+ (b) > 79 ? 4 : \
+ (b) > 23 ? 3 : 1)
+#else
+/* Old SSLeay/OpenSSL table.
+ * Maximum window size was 5, so this table differs for b==1024;
+ * but it coincides for other interesting values (b==160, b==512).
+ */
+#define BN_window_bits_for_exponent_size(b) \
+ ((b) > 255 ? 5 : \
+ (b) > 127 ? 4 : \
+ (b) > 17 ? 3 : 1)
+#endif
+
+
+
+/* BN_mod_exp_mont_conttime is based on the assumption that the
+ * L1 data cache line width of the target processor is at least
+ * the following value.
+ */
+#define MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH ( 64 )
+#define MOD_EXP_CTIME_MIN_CACHE_LINE_MASK (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - 1)
+
+/* Window sizes optimized for fixed window size modular exponentiation
+ * algorithm (BN_mod_exp_mont_consttime).
+ *
+ * To achieve the security goals of BN_mode_exp_mont_consttime, the
+ * maximum size of the window must not exceed
+ * log_2(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH).
+ *
+ * Window size thresholds are defined for cache line sizes of 32 and 64,
+ * cache line sizes where log_2(32)=5 and log_2(64)=6 respectively. A
+ * window size of 7 should only be used on processors that have a 128
+ * byte or greater cache line size.
+ */
+#if MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH == 64
+
+# define BN_window_bits_for_ctime_exponent_size(b) \
+ ((b) > 937 ? 6 : \
+ (b) > 306 ? 5 : \
+ (b) > 89 ? 4 : \
+ (b) > 22 ? 3 : 1)
+# define BN_MAX_WINDOW_BITS_FOR_CTIME_EXPONENT_SIZE (6)
+
+#elif MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH == 32
+
+# define BN_window_bits_for_ctime_exponent_size(b) \
+ ((b) > 306 ? 5 : \
+ (b) > 89 ? 4 : \
+ (b) > 22 ? 3 : 1)
+# define BN_MAX_WINDOW_BITS_FOR_CTIME_EXPONENT_SIZE (5)
+
+#endif
+
+
+/* Pentium pro 16,16,16,32,64 */
+/* Alpha 16,16,16,16.64 */
+#define BN_MULL_SIZE_NORMAL (16) /* 32 */
+#define BN_MUL_RECURSIVE_SIZE_NORMAL (16) /* 32 less than */
+#define BN_SQR_RECURSIVE_SIZE_NORMAL (16) /* 32 */
+#define BN_MUL_LOW_RECURSIVE_SIZE_NORMAL (32) /* 32 */
+#define BN_MONT_CTX_SET_SIZE_WORD (64) /* 32 */
+
+#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC)
+/*
+ * BN_UMULT_HIGH section.
+ *
+ * No, I'm not trying to overwhelm you when stating that the
+ * product of N-bit numbers is 2*N bits wide:-) No, I don't expect
+ * you to be impressed when I say that if the compiler doesn't
+ * support 2*N integer type, then you have to replace every N*N
+ * multiplication with 4 (N/2)*(N/2) accompanied by some shifts
+ * and additions which unavoidably results in severe performance
+ * penalties. Of course provided that the hardware is capable of
+ * producing 2*N result... That's when you normally start
+ * considering assembler implementation. However! It should be
+ * pointed out that some CPUs (most notably Alpha, PowerPC and
+ * upcoming IA-64 family:-) provide *separate* instruction
+ * calculating the upper half of the product placing the result
+ * into a general purpose register. Now *if* the compiler supports
+ * inline assembler, then it's not impossible to implement the
+ * "bignum" routines (and have the compiler optimize 'em)
+ * exhibiting "native" performance in C. That's what BN_UMULT_HIGH
+ * macro is about:-)
+ *
+ * <appro@fy.chalmers.se>
+ */
+# if defined(__alpha) && (defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT))
+# if defined(__DECC)
+# include <c_asm.h>
+# define BN_UMULT_HIGH(a,b) (BN_ULONG)asm("umulh %a0,%a1,%v0",(a),(b))
+# elif defined(__GNUC__) && __GNUC__>=2
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret; \
+ asm ("umulh %1,%2,%0" \
+ : "=r"(ret) \
+ : "r"(a), "r"(b)); \
+ ret; })
+# endif /* compiler */
+# elif defined(_ARCH_PPC) && defined(__64BIT__) && defined(SIXTY_FOUR_BIT_LONG)
+# if defined(__GNUC__) && __GNUC__>=2
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret; \
+ asm ("mulhdu %0,%1,%2" \
+ : "=r"(ret) \
+ : "r"(a), "r"(b)); \
+ ret; })
+# endif /* compiler */
+# elif (defined(__x86_64) || defined(__x86_64__)) && \
+ (defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT))
+# if defined(__GNUC__) && __GNUC__>=2
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret,discard; \
+ asm ("mulq %3" \
+ : "=a"(discard),"=d"(ret) \
+ : "a"(a), "g"(b) \
+ : "cc"); \
+ ret; })
+# define BN_UMULT_LOHI(low,high,a,b) \
+ asm ("mulq %3" \
+ : "=a"(low),"=d"(high) \
+ : "a"(a),"g"(b) \
+ : "cc");
+# endif
+# elif (defined(_M_AMD64) || defined(_M_X64)) && defined(SIXTY_FOUR_BIT)
+# if defined(_MSC_VER) && _MSC_VER>=1400
+ unsigned __int64 __umulh (unsigned __int64 a,unsigned __int64 b);
+ unsigned __int64 _umul128 (unsigned __int64 a,unsigned __int64 b,
+ unsigned __int64 *h);
+# pragma intrinsic(__umulh,_umul128)
+# define BN_UMULT_HIGH(a,b) __umulh((a),(b))
+# define BN_UMULT_LOHI(low,high,a,b) ((low)=_umul128((a),(b),&(high)))
+# endif
+# elif defined(__mips) && (defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG))
+# if defined(__GNUC__) && __GNUC__>=2
+# if __GNUC__>=4 && __GNUC_MINOR__>=4 /* "h" constraint is no more since 4.4 */
+# define BN_UMULT_HIGH(a,b) (((__uint128_t)(a)*(b))>>64)
+# define BN_UMULT_LOHI(low,high,a,b) ({ \
+ __uint128_t ret=(__uint128_t)(a)*(b); \
+ (high)=ret>>64; (low)=ret; })
+# else
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret; \
+ asm ("dmultu %1,%2" \
+ : "=h"(ret) \
+ : "r"(a), "r"(b) : "l"); \
+ ret; })
+# define BN_UMULT_LOHI(low,high,a,b)\
+ asm ("dmultu %2,%3" \
+ : "=l"(low),"=h"(high) \
+ : "r"(a), "r"(b));
+# endif
+# endif
+# endif /* cpu */
+#endif /* OPENSSL_NO_ASM */
+
+/*************************************************************
+ * Using the long long type
+ */
+#define Lw(t) (((BN_ULONG)(t))&BN_MASK2)
+#define Hw(t) (((BN_ULONG)((t)>>BN_BITS2))&BN_MASK2)
+
+#ifdef BN_DEBUG_RAND
+#define bn_clear_top2max(a) \
+ { \
+ int ind = (a)->dmax - (a)->top; \
+ BN_ULONG *ftl = &(a)->d[(a)->top-1]; \
+ for (; ind != 0; ind--) \
+ *(++ftl) = 0x0; \
+ }
+#else
+#define bn_clear_top2max(a)
+#endif
+
+#ifdef BN_LLONG
+#define mul_add(r,a,w,c) { \
+ BN_ULLONG t; \
+ t=(BN_ULLONG)w * (a) + (r) + (c); \
+ (r)= Lw(t); \
+ (c)= Hw(t); \
+ }
+
+#define mul(r,a,w,c) { \
+ BN_ULLONG t; \
+ t=(BN_ULLONG)w * (a) + (c); \
+ (r)= Lw(t); \
+ (c)= Hw(t); \
+ }
+
+#define sqr(r0,r1,a) { \
+ BN_ULLONG t; \
+ t=(BN_ULLONG)(a)*(a); \
+ (r0)=Lw(t); \
+ (r1)=Hw(t); \
+ }
+
+#elif defined(BN_UMULT_LOHI)
+#define mul_add(r,a,w,c) { \
+ BN_ULONG high,low,ret,tmp=(a); \
+ ret = (r); \
+ BN_UMULT_LOHI(low,high,w,tmp); \
+ ret += (c); \
+ (c) = (ret<(c))?1:0; \
+ (c) += high; \
+ ret += low; \
+ (c) += (ret<low)?1:0; \
+ (r) = ret; \
+ }
+
+#define mul(r,a,w,c) { \
+ BN_ULONG high,low,ret,ta=(a); \
+ BN_UMULT_LOHI(low,high,w,ta); \
+ ret = low + (c); \
+ (c) = high; \
+ (c) += (ret<low)?1:0; \
+ (r) = ret; \
+ }
+
+#define sqr(r0,r1,a) { \
+ BN_ULONG tmp=(a); \
+ BN_UMULT_LOHI(r0,r1,tmp,tmp); \
+ }
+
+#elif defined(BN_UMULT_HIGH)
+#define mul_add(r,a,w,c) { \
+ BN_ULONG high,low,ret,tmp=(a); \
+ ret = (r); \
+ high= BN_UMULT_HIGH(w,tmp); \
+ ret += (c); \
+ low = (w) * tmp; \
+ (c) = (ret<(c))?1:0; \
+ (c) += high; \
+ ret += low; \
+ (c) += (ret<low)?1:0; \
+ (r) = ret; \
+ }
+
+#define mul(r,a,w,c) { \
+ BN_ULONG high,low,ret,ta=(a); \
+ low = (w) * ta; \
+ high= BN_UMULT_HIGH(w,ta); \
+ ret = low + (c); \
+ (c) = high; \
+ (c) += (ret<low)?1:0; \
+ (r) = ret; \
+ }
+
+#define sqr(r0,r1,a) { \
+ BN_ULONG tmp=(a); \
+ (r0) = tmp * tmp; \
+ (r1) = BN_UMULT_HIGH(tmp,tmp); \
+ }
+
+#else
+/*************************************************************
+ * No long long type
+ */
+
+#define LBITS(a) ((a)&BN_MASK2l)
+#define HBITS(a) (((a)>>BN_BITS4)&BN_MASK2l)
+#define L2HBITS(a) (((a)<<BN_BITS4)&BN_MASK2)
+
+#define LLBITS(a) ((a)&BN_MASKl)
+#define LHBITS(a) (((a)>>BN_BITS2)&BN_MASKl)
+#define LL2HBITS(a) ((BN_ULLONG)((a)&BN_MASKl)<<BN_BITS2)
+
+#define mul64(l,h,bl,bh) \
+ { \
+ BN_ULONG m,m1,lt,ht; \
+ \
+ lt=l; \
+ ht=h; \
+ m =(bh)*(lt); \
+ lt=(bl)*(lt); \
+ m1=(bl)*(ht); \
+ ht =(bh)*(ht); \
+ m=(m+m1)&BN_MASK2; if (m < m1) ht+=L2HBITS((BN_ULONG)1); \
+ ht+=HBITS(m); \
+ m1=L2HBITS(m); \
+ lt=(lt+m1)&BN_MASK2; if (lt < m1) ht++; \
+ (l)=lt; \
+ (h)=ht; \
+ }
+
+#define sqr64(lo,ho,in) \
+ { \
+ BN_ULONG l,h,m; \
+ \
+ h=(in); \
+ l=LBITS(h); \
+ h=HBITS(h); \
+ m =(l)*(h); \
+ l*=l; \
+ h*=h; \
+ h+=(m&BN_MASK2h1)>>(BN_BITS4-1); \
+ m =(m&BN_MASK2l)<<(BN_BITS4+1); \
+ l=(l+m)&BN_MASK2; if (l < m) h++; \
+ (lo)=l; \
+ (ho)=h; \
+ }
+
+#define mul_add(r,a,bl,bh,c) { \
+ BN_ULONG l,h; \
+ \
+ h= (a); \
+ l=LBITS(h); \
+ h=HBITS(h); \
+ mul64(l,h,(bl),(bh)); \
+ \
+ /* non-multiply part */ \
+ l=(l+(c))&BN_MASK2; if (l < (c)) h++; \
+ (c)=(r); \
+ l=(l+(c))&BN_MASK2; if (l < (c)) h++; \
+ (c)=h&BN_MASK2; \
+ (r)=l; \
+ }
+
+#define mul(r,a,bl,bh,c) { \
+ BN_ULONG l,h; \
+ \
+ h= (a); \
+ l=LBITS(h); \
+ h=HBITS(h); \
+ mul64(l,h,(bl),(bh)); \
+ \
+ /* non-multiply part */ \
+ l+=(c); if ((l&BN_MASK2) < (c)) h++; \
+ (c)=h&BN_MASK2; \
+ (r)=l&BN_MASK2; \
+ }
+#endif /* !BN_LLONG */
+
+#if defined(OPENSSL_DOING_MAKEDEPEND) && defined(OPENSSL_FIPS)
+#undef bn_div_words
+#endif
+
+void bn_mul_normal(BN_ULONG *r,BN_ULONG *a,int na,BN_ULONG *b,int nb);
+void bn_mul_comba8(BN_ULONG *r,BN_ULONG *a,BN_ULONG *b);
+void bn_mul_comba4(BN_ULONG *r,BN_ULONG *a,BN_ULONG *b);
+void bn_sqr_normal(BN_ULONG *r, const BN_ULONG *a, int n, BN_ULONG *tmp);
+void bn_sqr_comba8(BN_ULONG *r,const BN_ULONG *a);
+void bn_sqr_comba4(BN_ULONG *r,const BN_ULONG *a);
+int bn_cmp_words(const BN_ULONG *a,const BN_ULONG *b,int n);
+int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b,
+ int cl, int dl);
+void bn_mul_recursive(BN_ULONG *r,BN_ULONG *a,BN_ULONG *b,int n2,
+ int dna,int dnb,BN_ULONG *t);
+void bn_mul_part_recursive(BN_ULONG *r,BN_ULONG *a,BN_ULONG *b,
+ int n,int tna,int tnb,BN_ULONG *t);
+void bn_sqr_recursive(BN_ULONG *r,const BN_ULONG *a, int n2, BN_ULONG *t);
+void bn_mul_low_normal(BN_ULONG *r,BN_ULONG *a,BN_ULONG *b, int n);
+void bn_mul_low_recursive(BN_ULONG *r,BN_ULONG *a,BN_ULONG *b,int n2,
+ BN_ULONG *t);
+void bn_mul_high(BN_ULONG *r,BN_ULONG *a,BN_ULONG *b,BN_ULONG *l,int n2,
+ BN_ULONG *t);
+BN_ULONG bn_add_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
+ int cl, int dl);
+BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
+ int cl, int dl);
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_lib.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_lib.c
new file mode 100644
index 00000000..5461e6ee
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_lib.c
@@ -0,0 +1,878 @@
+/* crypto/bn/bn_lib.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#ifndef BN_DEBUG
+# undef NDEBUG /* avoid conflicting definitions */
+# define NDEBUG
+#endif
+
+#include <assert.h>
+#include <limits.h>
+#include <stdio.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+const char BN_version[]="Big Number" OPENSSL_VERSION_PTEXT;
+
+/* This stuff appears to be completely unused, so is deprecated */
+#ifndef OPENSSL_NO_DEPRECATED
+/* For a 32 bit machine
+ * 2 - 4 == 128
+ * 3 - 8 == 256
+ * 4 - 16 == 512
+ * 5 - 32 == 1024
+ * 6 - 64 == 2048
+ * 7 - 128 == 4096
+ * 8 - 256 == 8192
+ */
+static int bn_limit_bits=0;
+static int bn_limit_num=8; /* (1<<bn_limit_bits) */
+static int bn_limit_bits_low=0;
+static int bn_limit_num_low=8; /* (1<<bn_limit_bits_low) */
+static int bn_limit_bits_high=0;
+static int bn_limit_num_high=8; /* (1<<bn_limit_bits_high) */
+static int bn_limit_bits_mont=0;
+static int bn_limit_num_mont=8; /* (1<<bn_limit_bits_mont) */
+
+void BN_set_params(int mult, int high, int low, int mont)
+ {
+ if (mult >= 0)
+ {
+ if (mult > (int)(sizeof(int)*8)-1)
+ mult=sizeof(int)*8-1;
+ bn_limit_bits=mult;
+ bn_limit_num=1<<mult;
+ }
+ if (high >= 0)
+ {
+ if (high > (int)(sizeof(int)*8)-1)
+ high=sizeof(int)*8-1;
+ bn_limit_bits_high=high;
+ bn_limit_num_high=1<<high;
+ }
+ if (low >= 0)
+ {
+ if (low > (int)(sizeof(int)*8)-1)
+ low=sizeof(int)*8-1;
+ bn_limit_bits_low=low;
+ bn_limit_num_low=1<<low;
+ }
+ if (mont >= 0)
+ {
+ if (mont > (int)(sizeof(int)*8)-1)
+ mont=sizeof(int)*8-1;
+ bn_limit_bits_mont=mont;
+ bn_limit_num_mont=1<<mont;
+ }
+ }
+
+int BN_get_params(int which)
+ {
+ if (which == 0) return(bn_limit_bits);
+ else if (which == 1) return(bn_limit_bits_high);
+ else if (which == 2) return(bn_limit_bits_low);
+ else if (which == 3) return(bn_limit_bits_mont);
+ else return(0);
+ }
+#endif
+
+const BIGNUM *BN_value_one(void)
+ {
+ static const BN_ULONG data_one=1L;
+ static const BIGNUM const_one={(BN_ULONG *)&data_one,1,1,0,BN_FLG_STATIC_DATA};
+
+ return(&const_one);
+ }
+
+int BN_num_bits_word(BN_ULONG l)
+ {
+ static const unsigned char bits[256]={
+ 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ };
+
+#if defined(SIXTY_FOUR_BIT_LONG)
+ if (l & 0xffffffff00000000L)
+ {
+ if (l & 0xffff000000000000L)
+ {
+ if (l & 0xff00000000000000L)
+ {
+ return(bits[(int)(l>>56)]+56);
+ }
+ else return(bits[(int)(l>>48)]+48);
+ }
+ else
+ {
+ if (l & 0x0000ff0000000000L)
+ {
+ return(bits[(int)(l>>40)]+40);
+ }
+ else return(bits[(int)(l>>32)]+32);
+ }
+ }
+ else
+#else
+#ifdef SIXTY_FOUR_BIT
+ if (l & 0xffffffff00000000LL)
+ {
+ if (l & 0xffff000000000000LL)
+ {
+ if (l & 0xff00000000000000LL)
+ {
+ return(bits[(int)(l>>56)]+56);
+ }
+ else return(bits[(int)(l>>48)]+48);
+ }
+ else
+ {
+ if (l & 0x0000ff0000000000LL)
+ {
+ return(bits[(int)(l>>40)]+40);
+ }
+ else return(bits[(int)(l>>32)]+32);
+ }
+ }
+ else
+#endif
+#endif
+ {
+#if defined(THIRTY_TWO_BIT) || defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)
+ if (l & 0xffff0000L)
+ {
+ if (l & 0xff000000L)
+ return(bits[(int)(l>>24L)]+24);
+ else return(bits[(int)(l>>16L)]+16);
+ }
+ else
+#endif
+ {
+#if defined(THIRTY_TWO_BIT) || defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)
+ if (l & 0xff00L)
+ return(bits[(int)(l>>8)]+8);
+ else
+#endif
+ return(bits[(int)(l )] );
+ }
+ }
+ }
+
+int BN_num_bits(const BIGNUM *a)
+ {
+ int i = a->top - 1;
+ bn_check_top(a);
+
+ if (BN_is_zero(a)) return 0;
+ return ((i*BN_BITS2) + BN_num_bits_word(a->d[i]));
+ }
+
+void BN_clear_free(BIGNUM *a)
+ {
+ int i;
+
+ if (a == NULL) return;
+ bn_check_top(a);
+ if (a->d != NULL)
+ {
+ OPENSSL_cleanse(a->d,a->dmax*sizeof(a->d[0]));
+ if (!(BN_get_flags(a,BN_FLG_STATIC_DATA)))
+ OPENSSL_free(a->d);
+ }
+ i=BN_get_flags(a,BN_FLG_MALLOCED);
+ OPENSSL_cleanse(a,sizeof(BIGNUM));
+ if (i)
+ OPENSSL_free(a);
+ }
+
+void BN_free(BIGNUM *a)
+ {
+ if (a == NULL) return;
+ bn_check_top(a);
+ if ((a->d != NULL) && !(BN_get_flags(a,BN_FLG_STATIC_DATA)))
+ OPENSSL_free(a->d);
+ if (a->flags & BN_FLG_MALLOCED)
+ OPENSSL_free(a);
+ else
+ {
+#ifndef OPENSSL_NO_DEPRECATED
+ a->flags|=BN_FLG_FREE;
+#endif
+ a->d = NULL;
+ }
+ }
+
+void BN_init(BIGNUM *a)
+ {
+ memset(a,0,sizeof(BIGNUM));
+ bn_check_top(a);
+ }
+
+BIGNUM *BN_new(void)
+ {
+ BIGNUM *ret;
+
+ if ((ret=(BIGNUM *)OPENSSL_malloc(sizeof(BIGNUM))) == NULL)
+ {
+ BNerr(BN_F_BN_NEW,ERR_R_MALLOC_FAILURE);
+ return(NULL);
+ }
+ ret->flags=BN_FLG_MALLOCED;
+ ret->top=0;
+ ret->neg=0;
+ ret->dmax=0;
+ ret->d=NULL;
+ bn_check_top(ret);
+ return(ret);
+ }
+
+/* This is used both by bn_expand2() and bn_dup_expand() */
+/* The caller MUST check that words > b->dmax before calling this */
+static BN_ULONG *bn_expand_internal(const BIGNUM *b, int words)
+ {
+ BN_ULONG *A,*a = NULL;
+ const BN_ULONG *B;
+ int i;
+
+ bn_check_top(b);
+
+ if (words > (INT_MAX/(4*BN_BITS2)))
+ {
+ BNerr(BN_F_BN_EXPAND_INTERNAL,BN_R_BIGNUM_TOO_LONG);
+ return NULL;
+ }
+ if (BN_get_flags(b,BN_FLG_STATIC_DATA))
+ {
+ BNerr(BN_F_BN_EXPAND_INTERNAL,BN_R_EXPAND_ON_STATIC_BIGNUM_DATA);
+ return(NULL);
+ }
+ a=A=(BN_ULONG *)OPENSSL_malloc(sizeof(BN_ULONG)*words);
+ if (A == NULL)
+ {
+ BNerr(BN_F_BN_EXPAND_INTERNAL,ERR_R_MALLOC_FAILURE);
+ return(NULL);
+ }
+#if 1
+ B=b->d;
+ /* Check if the previous number needs to be copied */
+ if (B != NULL)
+ {
+ for (i=b->top>>2; i>0; i--,A+=4,B+=4)
+ {
+ /*
+ * The fact that the loop is unrolled
+ * 4-wise is a tribute to Intel. It's
+ * the one that doesn't have enough
+ * registers to accomodate more data.
+ * I'd unroll it 8-wise otherwise:-)
+ *
+ * <appro@fy.chalmers.se>
+ */
+ BN_ULONG a0,a1,a2,a3;
+ a0=B[0]; a1=B[1]; a2=B[2]; a3=B[3];
+ A[0]=a0; A[1]=a1; A[2]=a2; A[3]=a3;
+ }
+ switch (b->top&3)
+ {
+ case 3: A[2]=B[2];
+ case 2: A[1]=B[1];
+ case 1: A[0]=B[0];
+ case 0: /* workaround for ultrix cc: without 'case 0', the optimizer does
+ * the switch table by doing a=top&3; a--; goto jump_table[a];
+ * which fails for top== 0 */
+ ;
+ }
+ }
+
+#else
+ memset(A,0,sizeof(BN_ULONG)*words);
+ memcpy(A,b->d,sizeof(b->d[0])*b->top);
+#endif
+
+ return(a);
+ }
+
+/* This is an internal function that can be used instead of bn_expand2()
+ * when there is a need to copy BIGNUMs instead of only expanding the
+ * data part, while still expanding them.
+ * Especially useful when needing to expand BIGNUMs that are declared
+ * 'const' and should therefore not be changed.
+ * The reason to use this instead of a BN_dup() followed by a bn_expand2()
+ * is memory allocation overhead. A BN_dup() followed by a bn_expand2()
+ * will allocate new memory for the BIGNUM data twice, and free it once,
+ * while bn_dup_expand() makes sure allocation is made only once.
+ */
+
+#ifndef OPENSSL_NO_DEPRECATED
+BIGNUM *bn_dup_expand(const BIGNUM *b, int words)
+ {
+ BIGNUM *r = NULL;
+
+ bn_check_top(b);
+
+ /* This function does not work if
+ * words <= b->dmax && top < words
+ * because BN_dup() does not preserve 'dmax'!
+ * (But bn_dup_expand() is not used anywhere yet.)
+ */
+
+ if (words > b->dmax)
+ {
+ BN_ULONG *a = bn_expand_internal(b, words);
+
+ if (a)
+ {
+ r = BN_new();
+ if (r)
+ {
+ r->top = b->top;
+ r->dmax = words;
+ r->neg = b->neg;
+ r->d = a;
+ }
+ else
+ {
+ /* r == NULL, BN_new failure */
+ OPENSSL_free(a);
+ }
+ }
+ /* If a == NULL, there was an error in allocation in
+ bn_expand_internal(), and NULL should be returned */
+ }
+ else
+ {
+ r = BN_dup(b);
+ }
+
+ bn_check_top(r);
+ return r;
+ }
+#endif
+
+/* This is an internal function that should not be used in applications.
+ * It ensures that 'b' has enough room for a 'words' word number
+ * and initialises any unused part of b->d with leading zeros.
+ * It is mostly used by the various BIGNUM routines. If there is an error,
+ * NULL is returned. If not, 'b' is returned. */
+
+BIGNUM *bn_expand2(BIGNUM *b, int words)
+ {
+ bn_check_top(b);
+
+ if (words > b->dmax)
+ {
+ BN_ULONG *a = bn_expand_internal(b, words);
+ if(!a) return NULL;
+ if(b->d) OPENSSL_free(b->d);
+ b->d=a;
+ b->dmax=words;
+ }
+
+/* None of this should be necessary because of what b->top means! */
+#if 0
+ /* NB: bn_wexpand() calls this only if the BIGNUM really has to grow */
+ if (b->top < b->dmax)
+ {
+ int i;
+ BN_ULONG *A = &(b->d[b->top]);
+ for (i=(b->dmax - b->top)>>3; i>0; i--,A+=8)
+ {
+ A[0]=0; A[1]=0; A[2]=0; A[3]=0;
+ A[4]=0; A[5]=0; A[6]=0; A[7]=0;
+ }
+ for (i=(b->dmax - b->top)&7; i>0; i--,A++)
+ A[0]=0;
+ assert(A == &(b->d[b->dmax]));
+ }
+#endif
+ bn_check_top(b);
+ return b;
+ }
+
+BIGNUM *BN_dup(const BIGNUM *a)
+ {
+ BIGNUM *t;
+
+ if (a == NULL) return NULL;
+ bn_check_top(a);
+
+ t = BN_new();
+ if (t == NULL) return NULL;
+ if(!BN_copy(t, a))
+ {
+ BN_free(t);
+ return NULL;
+ }
+ bn_check_top(t);
+ return t;
+ }
+
+BIGNUM *BN_copy(BIGNUM *a, const BIGNUM *b)
+ {
+ int i;
+ BN_ULONG *A;
+ const BN_ULONG *B;
+
+ bn_check_top(b);
+
+ if (a == b) return(a);
+ if (bn_wexpand(a,b->top) == NULL) return(NULL);
+
+#if 1
+ A=a->d;
+ B=b->d;
+ for (i=b->top>>2; i>0; i--,A+=4,B+=4)
+ {
+ BN_ULONG a0,a1,a2,a3;
+ a0=B[0]; a1=B[1]; a2=B[2]; a3=B[3];
+ A[0]=a0; A[1]=a1; A[2]=a2; A[3]=a3;
+ }
+ switch (b->top&3)
+ {
+ case 3: A[2]=B[2];
+ case 2: A[1]=B[1];
+ case 1: A[0]=B[0];
+ case 0: ; /* ultrix cc workaround, see comments in bn_expand_internal */
+ }
+#else
+ memcpy(a->d,b->d,sizeof(b->d[0])*b->top);
+#endif
+
+ a->top=b->top;
+ a->neg=b->neg;
+ bn_check_top(a);
+ return(a);
+ }
+
+void BN_swap(BIGNUM *a, BIGNUM *b)
+ {
+ int flags_old_a, flags_old_b;
+ BN_ULONG *tmp_d;
+ int tmp_top, tmp_dmax, tmp_neg;
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ flags_old_a = a->flags;
+ flags_old_b = b->flags;
+
+ tmp_d = a->d;
+ tmp_top = a->top;
+ tmp_dmax = a->dmax;
+ tmp_neg = a->neg;
+
+ a->d = b->d;
+ a->top = b->top;
+ a->dmax = b->dmax;
+ a->neg = b->neg;
+
+ b->d = tmp_d;
+ b->top = tmp_top;
+ b->dmax = tmp_dmax;
+ b->neg = tmp_neg;
+
+ a->flags = (flags_old_a & BN_FLG_MALLOCED) | (flags_old_b & BN_FLG_STATIC_DATA);
+ b->flags = (flags_old_b & BN_FLG_MALLOCED) | (flags_old_a & BN_FLG_STATIC_DATA);
+ bn_check_top(a);
+ bn_check_top(b);
+ }
+
+void BN_clear(BIGNUM *a)
+ {
+ bn_check_top(a);
+ if (a->d != NULL)
+ memset(a->d,0,a->dmax*sizeof(a->d[0]));
+ a->top=0;
+ a->neg=0;
+ }
+
+BN_ULONG BN_get_word(const BIGNUM *a)
+ {
+ if (a->top > 1)
+ return BN_MASK2;
+ else if (a->top == 1)
+ return a->d[0];
+ /* a->top == 0 */
+ return 0;
+ }
+
+int BN_set_word(BIGNUM *a, BN_ULONG w)
+ {
+ bn_check_top(a);
+ if (bn_expand(a,(int)sizeof(BN_ULONG)*8) == NULL) return(0);
+ a->neg = 0;
+ a->d[0] = w;
+ a->top = (w ? 1 : 0);
+ bn_check_top(a);
+ return(1);
+ }
+
+BIGNUM *BN_bin2bn(const unsigned char *s, int len, BIGNUM *ret)
+ {
+ unsigned int i,m;
+ unsigned int n;
+ BN_ULONG l;
+ BIGNUM *bn = NULL;
+
+ if (ret == NULL)
+ ret = bn = BN_new();
+ if (ret == NULL) return(NULL);
+ bn_check_top(ret);
+ l=0;
+ n=len;
+ if (n == 0)
+ {
+ ret->top=0;
+ return(ret);
+ }
+ i=((n-1)/BN_BYTES)+1;
+ m=((n-1)%(BN_BYTES));
+ if (bn_wexpand(ret, (int)i) == NULL)
+ {
+ if (bn) BN_free(bn);
+ return NULL;
+ }
+ ret->top=i;
+ ret->neg=0;
+ while (n--)
+ {
+ l=(l<<8L)| *(s++);
+ if (m-- == 0)
+ {
+ ret->d[--i]=l;
+ l=0;
+ m=BN_BYTES-1;
+ }
+ }
+ /* need to call this due to clear byte at top if avoiding
+ * having the top bit set (-ve number) */
+ bn_correct_top(ret);
+ return(ret);
+ }
+
+/* ignore negative */
+int BN_bn2bin(const BIGNUM *a, unsigned char *to)
+ {
+ int n,i;
+ BN_ULONG l;
+
+ bn_check_top(a);
+ n=i=BN_num_bytes(a);
+ while (i--)
+ {
+ l=a->d[i/BN_BYTES];
+ *(to++)=(unsigned char)(l>>(8*(i%BN_BYTES)))&0xff;
+ }
+ return(n);
+ }
+
+int BN_ucmp(const BIGNUM *a, const BIGNUM *b)
+ {
+ int i;
+ BN_ULONG t1,t2,*ap,*bp;
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ i=a->top-b->top;
+ if (i != 0) return(i);
+ ap=a->d;
+ bp=b->d;
+ for (i=a->top-1; i>=0; i--)
+ {
+ t1= ap[i];
+ t2= bp[i];
+ if (t1 != t2)
+ return((t1 > t2) ? 1 : -1);
+ }
+ return(0);
+ }
+
+int BN_cmp(const BIGNUM *a, const BIGNUM *b)
+ {
+ int i;
+ int gt,lt;
+ BN_ULONG t1,t2;
+
+ if ((a == NULL) || (b == NULL))
+ {
+ if (a != NULL)
+ return(-1);
+ else if (b != NULL)
+ return(1);
+ else
+ return(0);
+ }
+
+ bn_check_top(a);
+ bn_check_top(b);
+
+ if (a->neg != b->neg)
+ {
+ if (a->neg)
+ return(-1);
+ else return(1);
+ }
+ if (a->neg == 0)
+ { gt=1; lt= -1; }
+ else { gt= -1; lt=1; }
+
+ if (a->top > b->top) return(gt);
+ if (a->top < b->top) return(lt);
+ for (i=a->top-1; i>=0; i--)
+ {
+ t1=a->d[i];
+ t2=b->d[i];
+ if (t1 > t2) return(gt);
+ if (t1 < t2) return(lt);
+ }
+ return(0);
+ }
+
+int BN_set_bit(BIGNUM *a, int n)
+ {
+ int i,j,k;
+
+ if (n < 0)
+ return 0;
+
+ i=n/BN_BITS2;
+ j=n%BN_BITS2;
+ if (a->top <= i)
+ {
+ if (bn_wexpand(a,i+1) == NULL) return(0);
+ for(k=a->top; k<i+1; k++)
+ a->d[k]=0;
+ a->top=i+1;
+ }
+
+ a->d[i]|=(((BN_ULONG)1)<<j);
+ bn_check_top(a);
+ return(1);
+ }
+
+int BN_clear_bit(BIGNUM *a, int n)
+ {
+ int i,j;
+
+ bn_check_top(a);
+ if (n < 0) return 0;
+
+ i=n/BN_BITS2;
+ j=n%BN_BITS2;
+ if (a->top <= i) return(0);
+
+ a->d[i]&=(~(((BN_ULONG)1)<<j));
+ bn_correct_top(a);
+ return(1);
+ }
+
+int BN_is_bit_set(const BIGNUM *a, int n)
+ {
+ int i,j;
+
+ bn_check_top(a);
+ if (n < 0) return 0;
+ i=n/BN_BITS2;
+ j=n%BN_BITS2;
+ if (a->top <= i) return 0;
+ return (int)(((a->d[i])>>j)&((BN_ULONG)1));
+ }
+
+int BN_mask_bits(BIGNUM *a, int n)
+ {
+ int b,w;
+
+ bn_check_top(a);
+ if (n < 0) return 0;
+
+ w=n/BN_BITS2;
+ b=n%BN_BITS2;
+ if (w >= a->top) return 0;
+ if (b == 0)
+ a->top=w;
+ else
+ {
+ a->top=w+1;
+ a->d[w]&= ~(BN_MASK2<<b);
+ }
+ bn_correct_top(a);
+ return(1);
+ }
+
+void BN_set_negative(BIGNUM *a, int b)
+ {
+ if (b && !BN_is_zero(a))
+ a->neg = 1;
+ else
+ a->neg = 0;
+ }
+
+int bn_cmp_words(const BN_ULONG *a, const BN_ULONG *b, int n)
+ {
+ int i;
+ BN_ULONG aa,bb;
+
+ aa=a[n-1];
+ bb=b[n-1];
+ if (aa != bb) return((aa > bb)?1:-1);
+ for (i=n-2; i>=0; i--)
+ {
+ aa=a[i];
+ bb=b[i];
+ if (aa != bb) return((aa > bb)?1:-1);
+ }
+ return(0);
+ }
+
+/* Here follows a specialised variants of bn_cmp_words(). It has the
+ property of performing the operation on arrays of different sizes.
+ The sizes of those arrays is expressed through cl, which is the
+ common length ( basicall, min(len(a),len(b)) ), and dl, which is the
+ delta between the two lengths, calculated as len(a)-len(b).
+ All lengths are the number of BN_ULONGs... */
+
+int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b,
+ int cl, int dl)
+ {
+ int n,i;
+ n = cl-1;
+
+ if (dl < 0)
+ {
+ for (i=dl; i<0; i++)
+ {
+ if (b[n-i] != 0)
+ return -1; /* a < b */
+ }
+ }
+ if (dl > 0)
+ {
+ for (i=dl; i>0; i--)
+ {
+ if (a[n+i] != 0)
+ return 1; /* a > b */
+ }
+ }
+ return bn_cmp_words(a,b,cl);
+ }
+
+/*
+ * Constant-time conditional swap of a and b.
+ * a and b are swapped if condition is not 0. The code assumes that at most one bit of condition is set.
+ * nwords is the number of words to swap. The code assumes that at least nwords are allocated in both a and b,
+ * and that no more than nwords are used by either a or b.
+ * a and b cannot be the same number
+ */
+void BN_consttime_swap(BN_ULONG condition, BIGNUM *a, BIGNUM *b, int nwords)
+ {
+ BN_ULONG t;
+ int i;
+
+ bn_wcheck_size(a, nwords);
+ bn_wcheck_size(b, nwords);
+
+ assert(a != b);
+ assert((condition & (condition - 1)) == 0);
+ assert(sizeof(BN_ULONG) >= sizeof(int));
+
+ condition = ((condition - 1) >> (BN_BITS2 - 1)) - 1;
+
+ t = (a->top^b->top) & condition;
+ a->top ^= t;
+ b->top ^= t;
+
+#define BN_CONSTTIME_SWAP(ind) \
+ do { \
+ t = (a->d[ind] ^ b->d[ind]) & condition; \
+ a->d[ind] ^= t; \
+ b->d[ind] ^= t; \
+ } while (0)
+
+
+ switch (nwords) {
+ default:
+ for (i = 10; i < nwords; i++)
+ BN_CONSTTIME_SWAP(i);
+ /* Fallthrough */
+ case 10: BN_CONSTTIME_SWAP(9); /* Fallthrough */
+ case 9: BN_CONSTTIME_SWAP(8); /* Fallthrough */
+ case 8: BN_CONSTTIME_SWAP(7); /* Fallthrough */
+ case 7: BN_CONSTTIME_SWAP(6); /* Fallthrough */
+ case 6: BN_CONSTTIME_SWAP(5); /* Fallthrough */
+ case 5: BN_CONSTTIME_SWAP(4); /* Fallthrough */
+ case 4: BN_CONSTTIME_SWAP(3); /* Fallthrough */
+ case 3: BN_CONSTTIME_SWAP(2); /* Fallthrough */
+ case 2: BN_CONSTTIME_SWAP(1); /* Fallthrough */
+ case 1: BN_CONSTTIME_SWAP(0);
+ }
+#undef BN_CONSTTIME_SWAP
+}
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mod.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mod.c
new file mode 100644
index 00000000..77d6ddb9
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mod.c
@@ -0,0 +1,301 @@
+/* crypto/bn/bn_mod.c */
+/* Includes code written by Lenka Fibikova <fibikova@exp-math.uni-essen.de>
+ * for the OpenSSL project. */
+/* ====================================================================
+ * Copyright (c) 1998-2000 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+
+#if 0 /* now just a #define */
+int BN_mod(BIGNUM *rem, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx)
+ {
+ return(BN_div(NULL,rem,m,d,ctx));
+ /* note that rem->neg == m->neg (unless the remainder is zero) */
+ }
+#endif
+
+
+int BN_nnmod(BIGNUM *r, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx)
+ {
+ /* like BN_mod, but returns non-negative remainder
+ * (i.e., 0 <= r < |d| always holds) */
+
+ if (!(BN_mod(r,m,d,ctx)))
+ return 0;
+ if (!r->neg)
+ return 1;
+ /* now -|d| < r < 0, so we have to set r := r + |d| */
+ return (d->neg ? BN_sub : BN_add)(r, r, d);
+}
+
+
+int BN_mod_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx)
+ {
+ if (!BN_add(r, a, b)) return 0;
+ return BN_nnmod(r, r, m, ctx);
+ }
+
+
+/* BN_mod_add variant that may be used if both a and b are non-negative
+ * and less than m */
+int BN_mod_add_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m)
+ {
+ if (!BN_uadd(r, a, b)) return 0;
+ if (BN_ucmp(r, m) >= 0)
+ return BN_usub(r, r, m);
+ return 1;
+ }
+
+
+int BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx)
+ {
+ if (!BN_sub(r, a, b)) return 0;
+ return BN_nnmod(r, r, m, ctx);
+ }
+
+
+/* BN_mod_sub variant that may be used if both a and b are non-negative
+ * and less than m */
+int BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m)
+ {
+ if (!BN_sub(r, a, b)) return 0;
+ if (r->neg)
+ return BN_add(r, r, m);
+ return 1;
+ }
+
+
+/* slow but works */
+int BN_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m,
+ BN_CTX *ctx)
+ {
+ BIGNUM *t;
+ int ret=0;
+
+ bn_check_top(a);
+ bn_check_top(b);
+ bn_check_top(m);
+
+ BN_CTX_start(ctx);
+ if ((t = BN_CTX_get(ctx)) == NULL) goto err;
+ if (a == b)
+ { if (!BN_sqr(t,a,ctx)) goto err; }
+ else
+ { if (!BN_mul(t,a,b,ctx)) goto err; }
+ if (!BN_nnmod(r,t,m,ctx)) goto err;
+ bn_check_top(r);
+ ret=1;
+err:
+ BN_CTX_end(ctx);
+ return(ret);
+ }
+
+
+int BN_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx)
+ {
+ if (!BN_sqr(r, a, ctx)) return 0;
+ /* r->neg == 0, thus we don't need BN_nnmod */
+ return BN_mod(r, r, m, ctx);
+ }
+
+
+int BN_mod_lshift1(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx)
+ {
+ if (!BN_lshift1(r, a)) return 0;
+ bn_check_top(r);
+ return BN_nnmod(r, r, m, ctx);
+ }
+
+
+/* BN_mod_lshift1 variant that may be used if a is non-negative
+ * and less than m */
+int BN_mod_lshift1_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *m)
+ {
+ if (!BN_lshift1(r, a)) return 0;
+ bn_check_top(r);
+ if (BN_cmp(r, m) >= 0)
+ return BN_sub(r, r, m);
+ return 1;
+ }
+
+
+int BN_mod_lshift(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m, BN_CTX *ctx)
+ {
+ BIGNUM *abs_m = NULL;
+ int ret;
+
+ if (!BN_nnmod(r, a, m, ctx)) return 0;
+
+ if (m->neg)
+ {
+ abs_m = BN_dup(m);
+ if (abs_m == NULL) return 0;
+ abs_m->neg = 0;
+ }
+
+ ret = BN_mod_lshift_quick(r, r, n, (abs_m ? abs_m : m));
+ bn_check_top(r);
+
+ if (abs_m)
+ BN_free(abs_m);
+ return ret;
+ }
+
+
+/* BN_mod_lshift variant that may be used if a is non-negative
+ * and less than m */
+int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m)
+ {
+ if (r != a)
+ {
+ if (BN_copy(r, a) == NULL) return 0;
+ }
+
+ while (n > 0)
+ {
+ int max_shift;
+
+ /* 0 < r < m */
+ max_shift = BN_num_bits(m) - BN_num_bits(r);
+ /* max_shift >= 0 */
+
+ if (max_shift < 0)
+ {
+ BNerr(BN_F_BN_MOD_LSHIFT_QUICK, BN_R_INPUT_NOT_REDUCED);
+ return 0;
+ }
+
+ if (max_shift > n)
+ max_shift = n;
+
+ if (max_shift)
+ {
+ if (!BN_lshift(r, r, max_shift)) return 0;
+ n -= max_shift;
+ }
+ else
+ {
+ if (!BN_lshift1(r, r)) return 0;
+ --n;
+ }
+
+ /* BN_num_bits(r) <= BN_num_bits(m) */
+
+ if (BN_cmp(r, m) >= 0)
+ {
+ if (!BN_sub(r, r, m)) return 0;
+ }
+ }
+ bn_check_top(r);
+
+ return 1;
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mont.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mont.c
new file mode 100644
index 00000000..ee8532c7
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mont.c
@@ -0,0 +1,515 @@
+/* crypto/bn/bn_mont.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+/* ====================================================================
+ * Copyright (c) 1998-2006 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+/*
+ * Details about Montgomery multiplication algorithms can be found at
+ * http://security.ece.orst.edu/publications.html, e.g.
+ * http://security.ece.orst.edu/koc/papers/j37acmon.pdf and
+ * sections 3.8 and 4.2 in http://security.ece.orst.edu/koc/papers/r01rsasw.pdf
+ */
+
+#include <stdio.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+#define MONT_WORD /* use the faster word-based algorithm */
+
+#ifdef MONT_WORD
+static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont);
+#endif
+
+int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
+ BN_MONT_CTX *mont, BN_CTX *ctx)
+ {
+ BIGNUM *tmp;
+ int ret=0;
+#if defined(OPENSSL_BN_ASM_MONT) && defined(MONT_WORD)
+ int num = mont->N.top;
+
+ if (num>1 && a->top==num && b->top==num)
+ {
+ if (bn_wexpand(r,num) == NULL) return(0);
+ if (bn_mul_mont(r->d,a->d,b->d,mont->N.d,mont->n0,num))
+ {
+ r->neg = a->neg^b->neg;
+ r->top = num;
+ bn_correct_top(r);
+ return(1);
+ }
+ }
+#endif
+
+ BN_CTX_start(ctx);
+ tmp = BN_CTX_get(ctx);
+ if (tmp == NULL) goto err;
+
+ bn_check_top(tmp);
+ if (a == b)
+ {
+ if (!BN_sqr(tmp,a,ctx)) goto err;
+ }
+ else
+ {
+ if (!BN_mul(tmp,a,b,ctx)) goto err;
+ }
+ /* reduce from aRR to aR */
+#ifdef MONT_WORD
+ if (!BN_from_montgomery_word(r,tmp,mont)) goto err;
+#else
+ if (!BN_from_montgomery(r,tmp,mont,ctx)) goto err;
+#endif
+ bn_check_top(r);
+ ret=1;
+err:
+ BN_CTX_end(ctx);
+ return(ret);
+ }
+
+#ifdef MONT_WORD
+static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, BN_MONT_CTX *mont)
+ {
+ BIGNUM *n;
+ BN_ULONG *ap,*np,*rp,n0,v,carry;
+ int nl,max,i;
+
+ n= &(mont->N);
+ nl=n->top;
+ if (nl == 0) { ret->top=0; return(1); }
+
+ max=(2*nl); /* carry is stored separately */
+ if (bn_wexpand(r,max) == NULL) return(0);
+
+ r->neg^=n->neg;
+ np=n->d;
+ rp=r->d;
+
+ /* clear the top words of T */
+#if 1
+ for (i=r->top; i<max; i++) /* memset? XXX */
+ rp[i]=0;
+#else
+ memset(&(rp[r->top]),0,(max-r->top)*sizeof(BN_ULONG));
+#endif
+
+ r->top=max;
+ n0=mont->n0[0];
+
+#ifdef BN_COUNT
+ fprintf(stderr,"word BN_from_montgomery_word %d * %d\n",nl,nl);
+#endif
+ for (carry=0, i=0; i<nl; i++, rp++)
+ {
+#ifdef __TANDEM
+ {
+ long long t1;
+ long long t2;
+ long long t3;
+ t1 = rp[0] * (n0 & 0177777);
+ t2 = 037777600000l;
+ t2 = n0 & t2;
+ t3 = rp[0] & 0177777;
+ t2 = (t3 * t2) & BN_MASK2;
+ t1 = t1 + t2;
+ v=bn_mul_add_words(rp,np,nl,(BN_ULONG) t1);
+ }
+#else
+ v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2);
+#endif
+ v = (v+carry+rp[nl])&BN_MASK2;
+ carry |= (v != rp[nl]);
+ carry &= (v <= rp[nl]);
+ rp[nl]=v;
+ }
+
+ if (bn_wexpand(ret,nl) == NULL) return(0);
+ ret->top=nl;
+ ret->neg=r->neg;
+
+ rp=ret->d;
+ ap=&(r->d[nl]);
+
+#define BRANCH_FREE 1
+#if BRANCH_FREE
+ {
+ BN_ULONG *nrp;
+ size_t m;
+
+ v=bn_sub_words(rp,ap,np,nl)-carry;
+ /* if subtraction result is real, then
+ * trick unconditional memcpy below to perform in-place
+ * "refresh" instead of actual copy. */
+ m=(0-(size_t)v);
+ nrp=(BN_ULONG *)(((PTR_SIZE_INT)rp&~m)|((PTR_SIZE_INT)ap&m));
+
+ for (i=0,nl-=4; i<nl; i+=4)
+ {
+ BN_ULONG t1,t2,t3,t4;
+
+ t1=nrp[i+0];
+ t2=nrp[i+1];
+ t3=nrp[i+2]; ap[i+0]=0;
+ t4=nrp[i+3]; ap[i+1]=0;
+ rp[i+0]=t1; ap[i+2]=0;
+ rp[i+1]=t2; ap[i+3]=0;
+ rp[i+2]=t3;
+ rp[i+3]=t4;
+ }
+ for (nl+=4; i<nl; i++)
+ rp[i]=nrp[i], ap[i]=0;
+ }
+#else
+ if (bn_sub_words (rp,ap,np,nl)-carry)
+ memcpy(rp,ap,nl*sizeof(BN_ULONG));
+#endif
+ bn_correct_top(r);
+ bn_correct_top(ret);
+ bn_check_top(ret);
+
+ return(1);
+ }
+#endif /* MONT_WORD */
+
+int BN_from_montgomery(BIGNUM *ret, const BIGNUM *a, BN_MONT_CTX *mont,
+ BN_CTX *ctx)
+ {
+ int retn=0;
+#ifdef MONT_WORD
+ BIGNUM *t;
+
+ BN_CTX_start(ctx);
+ if ((t = BN_CTX_get(ctx)) && BN_copy(t,a))
+ retn = BN_from_montgomery_word(ret,t,mont);
+ BN_CTX_end(ctx);
+#else /* !MONT_WORD */
+ BIGNUM *t1,*t2;
+
+ BN_CTX_start(ctx);
+ t1 = BN_CTX_get(ctx);
+ t2 = BN_CTX_get(ctx);
+ if (t1 == NULL || t2 == NULL) goto err;
+
+ if (!BN_copy(t1,a)) goto err;
+ BN_mask_bits(t1,mont->ri);
+
+ if (!BN_mul(t2,t1,&mont->Ni,ctx)) goto err;
+ BN_mask_bits(t2,mont->ri);
+
+ if (!BN_mul(t1,t2,&mont->N,ctx)) goto err;
+ if (!BN_add(t2,a,t1)) goto err;
+ if (!BN_rshift(ret,t2,mont->ri)) goto err;
+
+ if (BN_ucmp(ret, &(mont->N)) >= 0)
+ {
+ if (!BN_usub(ret,ret,&(mont->N))) goto err;
+ }
+ retn=1;
+ bn_check_top(ret);
+ err:
+ BN_CTX_end(ctx);
+#endif /* MONT_WORD */
+ return(retn);
+ }
+
+BN_MONT_CTX *BN_MONT_CTX_new(void)
+ {
+ BN_MONT_CTX *ret;
+
+ if ((ret=(BN_MONT_CTX *)OPENSSL_malloc(sizeof(BN_MONT_CTX))) == NULL)
+ return(NULL);
+
+ BN_MONT_CTX_init(ret);
+ ret->flags=BN_FLG_MALLOCED;
+ return(ret);
+ }
+
+void BN_MONT_CTX_init(BN_MONT_CTX *ctx)
+ {
+ ctx->ri=0;
+ BN_init(&(ctx->RR));
+ BN_init(&(ctx->N));
+ BN_init(&(ctx->Ni));
+ ctx->n0[0] = ctx->n0[1] = 0;
+ ctx->flags=0;
+ }
+
+void BN_MONT_CTX_free(BN_MONT_CTX *mont)
+ {
+ if(mont == NULL)
+ return;
+
+ BN_free(&(mont->RR));
+ BN_free(&(mont->N));
+ BN_free(&(mont->Ni));
+ if (mont->flags & BN_FLG_MALLOCED)
+ OPENSSL_free(mont);
+ }
+
+int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx)
+ {
+ int ret = 0;
+ BIGNUM *Ri,*R;
+
+ BN_CTX_start(ctx);
+ if((Ri = BN_CTX_get(ctx)) == NULL) goto err;
+ R= &(mont->RR); /* grab RR as a temp */
+ if (!BN_copy(&(mont->N),mod)) goto err; /* Set N */
+ mont->N.neg = 0;
+
+#ifdef MONT_WORD
+ {
+ BIGNUM tmod;
+ BN_ULONG buf[2];
+
+ BN_init(&tmod);
+ tmod.d=buf;
+ tmod.dmax=2;
+ tmod.neg=0;
+
+ mont->ri=(BN_num_bits(mod)+(BN_BITS2-1))/BN_BITS2*BN_BITS2;
+
+#if defined(OPENSSL_BN_ASM_MONT) && (BN_BITS2<=32)
+ /* Only certain BN_BITS2<=32 platforms actually make use of
+ * n0[1], and we could use the #else case (with a shorter R
+ * value) for the others. However, currently only the assembler
+ * files do know which is which. */
+
+ BN_zero(R);
+ if (!(BN_set_bit(R,2*BN_BITS2))) goto err;
+
+ tmod.top=0;
+ if ((buf[0] = mod->d[0])) tmod.top=1;
+ if ((buf[1] = mod->top>1 ? mod->d[1] : 0)) tmod.top=2;
+
+ if ((BN_mod_inverse(Ri,R,&tmod,ctx)) == NULL)
+ goto err;
+ if (!BN_lshift(Ri,Ri,2*BN_BITS2)) goto err; /* R*Ri */
+ if (!BN_is_zero(Ri))
+ {
+ if (!BN_sub_word(Ri,1)) goto err;
+ }
+ else /* if N mod word size == 1 */
+ {
+ if (bn_expand(Ri,(int)sizeof(BN_ULONG)*2) == NULL)
+ goto err;
+ /* Ri-- (mod double word size) */
+ Ri->neg=0;
+ Ri->d[0]=BN_MASK2;
+ Ri->d[1]=BN_MASK2;
+ Ri->top=2;
+ }
+ if (!BN_div(Ri,NULL,Ri,&tmod,ctx)) goto err;
+ /* Ni = (R*Ri-1)/N,
+ * keep only couple of least significant words: */
+ mont->n0[0] = (Ri->top > 0) ? Ri->d[0] : 0;
+ mont->n0[1] = (Ri->top > 1) ? Ri->d[1] : 0;
+#else
+ BN_zero(R);
+ if (!(BN_set_bit(R,BN_BITS2))) goto err; /* R */
+
+ buf[0]=mod->d[0]; /* tmod = N mod word size */
+ buf[1]=0;
+ tmod.top = buf[0] != 0 ? 1 : 0;
+ /* Ri = R^-1 mod N*/
+ if ((BN_mod_inverse(Ri,R,&tmod,ctx)) == NULL)
+ goto err;
+ if (!BN_lshift(Ri,Ri,BN_BITS2)) goto err; /* R*Ri */
+ if (!BN_is_zero(Ri))
+ {
+ if (!BN_sub_word(Ri,1)) goto err;
+ }
+ else /* if N mod word size == 1 */
+ {
+ if (!BN_set_word(Ri,BN_MASK2)) goto err; /* Ri-- (mod word size) */
+ }
+ if (!BN_div(Ri,NULL,Ri,&tmod,ctx)) goto err;
+ /* Ni = (R*Ri-1)/N,
+ * keep only least significant word: */
+ mont->n0[0] = (Ri->top > 0) ? Ri->d[0] : 0;
+ mont->n0[1] = 0;
+#endif
+ }
+#else /* !MONT_WORD */
+ { /* bignum version */
+ mont->ri=BN_num_bits(&mont->N);
+ BN_zero(R);
+ if (!BN_set_bit(R,mont->ri)) goto err; /* R = 2^ri */
+ /* Ri = R^-1 mod N*/
+ if ((BN_mod_inverse(Ri,R,&mont->N,ctx)) == NULL)
+ goto err;
+ if (!BN_lshift(Ri,Ri,mont->ri)) goto err; /* R*Ri */
+ if (!BN_sub_word(Ri,1)) goto err;
+ /* Ni = (R*Ri-1) / N */
+ if (!BN_div(&(mont->Ni),NULL,Ri,&mont->N,ctx)) goto err;
+ }
+#endif
+
+ /* setup RR for conversions */
+ BN_zero(&(mont->RR));
+ if (!BN_set_bit(&(mont->RR),mont->ri*2)) goto err;
+ if (!BN_mod(&(mont->RR),&(mont->RR),&(mont->N),ctx)) goto err;
+
+ ret = 1;
+err:
+ BN_CTX_end(ctx);
+ return ret;
+ }
+
+BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to, BN_MONT_CTX *from)
+ {
+ if (to == from) return(to);
+
+ if (!BN_copy(&(to->RR),&(from->RR))) return NULL;
+ if (!BN_copy(&(to->N),&(from->N))) return NULL;
+ if (!BN_copy(&(to->Ni),&(from->Ni))) return NULL;
+ to->ri=from->ri;
+ to->n0[0]=from->n0[0];
+ to->n0[1]=from->n0[1];
+ return(to);
+ }
+
+BN_MONT_CTX *BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, int lock,
+ const BIGNUM *mod, BN_CTX *ctx)
+ {
+ BN_MONT_CTX *ret;
+
+ CRYPTO_r_lock(lock);
+ ret = *pmont;
+ CRYPTO_r_unlock(lock);
+ if (ret)
+ return ret;
+
+ /* We don't want to serialise globally while doing our lazy-init math in
+ * BN_MONT_CTX_set. That punishes threads that are doing independent
+ * things. Instead, punish the case where more than one thread tries to
+ * lazy-init the same 'pmont', by having each do the lazy-init math work
+ * independently and only use the one from the thread that wins the race
+ * (the losers throw away the work they've done). */
+ ret = BN_MONT_CTX_new();
+ if (!ret)
+ return NULL;
+ if (!BN_MONT_CTX_set(ret, mod, ctx))
+ {
+ BN_MONT_CTX_free(ret);
+ return NULL;
+ }
+
+ /* The locked compare-and-set, after the local work is done. */
+ CRYPTO_w_lock(lock);
+ if (*pmont)
+ {
+ BN_MONT_CTX_free(ret);
+ ret = *pmont;
+ }
+ else
+ *pmont = ret;
+ CRYPTO_w_unlock(lock);
+ return ret;
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mpi.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mpi.c
new file mode 100644
index 00000000..a054d21a
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mpi.c
@@ -0,0 +1,130 @@
+/* crypto/bn/bn_mpi.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+int BN_bn2mpi(const BIGNUM *a, unsigned char *d)
+ {
+ int bits;
+ int num=0;
+ int ext=0;
+ long l;
+
+ bits=BN_num_bits(a);
+ num=(bits+7)/8;
+ if (bits > 0)
+ {
+ ext=((bits & 0x07) == 0);
+ }
+ if (d == NULL)
+ return(num+4+ext);
+
+ l=num+ext;
+ d[0]=(unsigned char)(l>>24)&0xff;
+ d[1]=(unsigned char)(l>>16)&0xff;
+ d[2]=(unsigned char)(l>> 8)&0xff;
+ d[3]=(unsigned char)(l )&0xff;
+ if (ext) d[4]=0;
+ num=BN_bn2bin(a,&(d[4+ext]));
+ if (a->neg)
+ d[4]|=0x80;
+ return(num+4+ext);
+ }
+
+BIGNUM *BN_mpi2bn(const unsigned char *d, int n, BIGNUM *a)
+ {
+ long len;
+ int neg=0;
+
+ if (n < 4)
+ {
+ BNerr(BN_F_BN_MPI2BN,BN_R_INVALID_LENGTH);
+ return(NULL);
+ }
+ len=((long)d[0]<<24)|((long)d[1]<<16)|((int)d[2]<<8)|(int)d[3];
+ if ((len+4) != n)
+ {
+ BNerr(BN_F_BN_MPI2BN,BN_R_ENCODING_ERROR);
+ return(NULL);
+ }
+
+ if (a == NULL) a=BN_new();
+ if (a == NULL) return(NULL);
+
+ if (len == 0)
+ {
+ a->neg=0;
+ a->top=0;
+ return(a);
+ }
+ d+=4;
+ if ((*d) & 0x80)
+ neg=1;
+ if (BN_bin2bn(d,(int)len,a) == NULL)
+ return(NULL);
+ a->neg=neg;
+ if (neg)
+ {
+ BN_clear_bit(a,BN_num_bits(a)-1);
+ }
+ bn_check_top(a);
+ return(a);
+ }
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mul.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mul.c
new file mode 100644
index 00000000..12e5be80
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_mul.c
@@ -0,0 +1,1166 @@
+/* crypto/bn/bn_mul.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#ifndef BN_DEBUG
+# undef NDEBUG /* avoid conflicting definitions */
+# define NDEBUG
+#endif
+
+#include <stdio.h>
+#include <assert.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+#if defined(OPENSSL_NO_ASM) || !defined(OPENSSL_BN_ASM_PART_WORDS)
+/* Here follows specialised variants of bn_add_words() and
+ bn_sub_words(). They have the property performing operations on
+ arrays of different sizes. The sizes of those arrays is expressed through
+ cl, which is the common length ( basicall, min(len(a),len(b)) ), and dl,
+ which is the delta between the two lengths, calculated as len(a)-len(b).
+ All lengths are the number of BN_ULONGs... For the operations that require
+ a result array as parameter, it must have the length cl+abs(dl).
+ These functions should probably end up in bn_asm.c as soon as there are
+ assembler counterparts for the systems that use assembler files. */
+
+BN_ULONG bn_sub_part_words(BN_ULONG *r,
+ const BN_ULONG *a, const BN_ULONG *b,
+ int cl, int dl)
+ {
+ BN_ULONG c, t;
+
+ assert(cl >= 0);
+ c = bn_sub_words(r, a, b, cl);
+
+ if (dl == 0)
+ return c;
+
+ r += cl;
+ a += cl;
+ b += cl;
+
+ if (dl < 0)
+ {
+#ifdef BN_COUNT
+ fprintf(stderr, " bn_sub_part_words %d + %d (dl < 0, c = %d)\n", cl, dl, c);
+#endif
+ for (;;)
+ {
+ t = b[0];
+ r[0] = (0-t-c)&BN_MASK2;
+ if (t != 0) c=1;
+ if (++dl >= 0) break;
+
+ t = b[1];
+ r[1] = (0-t-c)&BN_MASK2;
+ if (t != 0) c=1;
+ if (++dl >= 0) break;
+
+ t = b[2];
+ r[2] = (0-t-c)&BN_MASK2;
+ if (t != 0) c=1;
+ if (++dl >= 0) break;
+
+ t = b[3];
+ r[3] = (0-t-c)&BN_MASK2;
+ if (t != 0) c=1;
+ if (++dl >= 0) break;
+
+ b += 4;
+ r += 4;
+ }
+ }
+ else
+ {
+ int save_dl = dl;
+#ifdef BN_COUNT
+ fprintf(stderr, " bn_sub_part_words %d + %d (dl > 0, c = %d)\n", cl, dl, c);
+#endif
+ while(c)
+ {
+ t = a[0];
+ r[0] = (t-c)&BN_MASK2;
+ if (t != 0) c=0;
+ if (--dl <= 0) break;
+
+ t = a[1];
+ r[1] = (t-c)&BN_MASK2;
+ if (t != 0) c=0;
+ if (--dl <= 0) break;
+
+ t = a[2];
+ r[2] = (t-c)&BN_MASK2;
+ if (t != 0) c=0;
+ if (--dl <= 0) break;
+
+ t = a[3];
+ r[3] = (t-c)&BN_MASK2;
+ if (t != 0) c=0;
+ if (--dl <= 0) break;
+
+ save_dl = dl;
+ a += 4;
+ r += 4;
+ }
+ if (dl > 0)
+ {
+#ifdef BN_COUNT
+ fprintf(stderr, " bn_sub_part_words %d + %d (dl > 0, c == 0)\n", cl, dl);
+#endif
+ if (save_dl > dl)
+ {
+ switch (save_dl - dl)
+ {
+ case 1:
+ r[1] = a[1];
+ if (--dl <= 0) break;
+ case 2:
+ r[2] = a[2];
+ if (--dl <= 0) break;
+ case 3:
+ r[3] = a[3];
+ if (--dl <= 0) break;
+ }
+ a += 4;
+ r += 4;
+ }
+ }
+ if (dl > 0)
+ {
+#ifdef BN_COUNT
+ fprintf(stderr, " bn_sub_part_words %d + %d (dl > 0, copy)\n", cl, dl);
+#endif
+ for(;;)
+ {
+ r[0] = a[0];
+ if (--dl <= 0) break;
+ r[1] = a[1];
+ if (--dl <= 0) break;
+ r[2] = a[2];
+ if (--dl <= 0) break;
+ r[3] = a[3];
+ if (--dl <= 0) break;
+
+ a += 4;
+ r += 4;
+ }
+ }
+ }
+ return c;
+ }
+#endif
+
+BN_ULONG bn_add_part_words(BN_ULONG *r,
+ const BN_ULONG *a, const BN_ULONG *b,
+ int cl, int dl)
+ {
+ BN_ULONG c, l, t;
+
+ assert(cl >= 0);
+ c = bn_add_words(r, a, b, cl);
+
+ if (dl == 0)
+ return c;
+
+ r += cl;
+ a += cl;
+ b += cl;
+
+ if (dl < 0)
+ {
+ int save_dl = dl;
+#ifdef BN_COUNT
+ fprintf(stderr, " bn_add_part_words %d + %d (dl < 0, c = %d)\n", cl, dl, c);
+#endif
+ while (c)
+ {
+ l=(c+b[0])&BN_MASK2;
+ c=(l < c);
+ r[0]=l;
+ if (++dl >= 0) break;
+
+ l=(c+b[1])&BN_MASK2;
+ c=(l < c);
+ r[1]=l;
+ if (++dl >= 0) break;
+
+ l=(c+b[2])&BN_MASK2;
+ c=(l < c);
+ r[2]=l;
+ if (++dl >= 0) break;
+
+ l=(c+b[3])&BN_MASK2;
+ c=(l < c);
+ r[3]=l;
+ if (++dl >= 0) break;
+
+ save_dl = dl;
+ b+=4;
+ r+=4;
+ }
+ if (dl < 0)
+ {
+#ifdef BN_COUNT
+ fprintf(stderr, " bn_add_part_words %d + %d (dl < 0, c == 0)\n", cl, dl);
+#endif
+ if (save_dl < dl)
+ {
+ switch (dl - save_dl)
+ {
+ case 1:
+ r[1] = b[1];
+ if (++dl >= 0) break;
+ case 2:
+ r[2] = b[2];
+ if (++dl >= 0) break;
+ case 3:
+ r[3] = b[3];
+ if (++dl >= 0) break;
+ }
+ b += 4;
+ r += 4;
+ }
+ }
+ if (dl < 0)
+ {
+#ifdef BN_COUNT
+ fprintf(stderr, " bn_add_part_words %d + %d (dl < 0, copy)\n", cl, dl);
+#endif
+ for(;;)
+ {
+ r[0] = b[0];
+ if (++dl >= 0) break;
+ r[1] = b[1];
+ if (++dl >= 0) break;
+ r[2] = b[2];
+ if (++dl >= 0) break;
+ r[3] = b[3];
+ if (++dl >= 0) break;
+
+ b += 4;
+ r += 4;
+ }
+ }
+ }
+ else
+ {
+ int save_dl = dl;
+#ifdef BN_COUNT
+ fprintf(stderr, " bn_add_part_words %d + %d (dl > 0)\n", cl, dl);
+#endif
+ while (c)
+ {
+ t=(a[0]+c)&BN_MASK2;
+ c=(t < c);
+ r[0]=t;
+ if (--dl <= 0) break;
+
+ t=(a[1]+c)&BN_MASK2;
+ c=(t < c);
+ r[1]=t;
+ if (--dl <= 0) break;
+
+ t=(a[2]+c)&BN_MASK2;
+ c=(t < c);
+ r[2]=t;
+ if (--dl <= 0) break;
+
+ t=(a[3]+c)&BN_MASK2;
+ c=(t < c);
+ r[3]=t;
+ if (--dl <= 0) break;
+
+ save_dl = dl;
+ a+=4;
+ r+=4;
+ }
+#ifdef BN_COUNT
+ fprintf(stderr, " bn_add_part_words %d + %d (dl > 0, c == 0)\n", cl, dl);
+#endif
+ if (dl > 0)
+ {
+ if (save_dl > dl)
+ {
+ switch (save_dl - dl)
+ {
+ case 1:
+ r[1] = a[1];
+ if (--dl <= 0) break;
+ case 2:
+ r[2] = a[2];
+ if (--dl <= 0) break;
+ case 3:
+ r[3] = a[3];
+ if (--dl <= 0) break;
+ }
+ a += 4;
+ r += 4;
+ }
+ }
+ if (dl > 0)
+ {
+#ifdef BN_COUNT
+ fprintf(stderr, " bn_add_part_words %d + %d (dl > 0, copy)\n", cl, dl);
+#endif
+ for(;;)
+ {
+ r[0] = a[0];
+ if (--dl <= 0) break;
+ r[1] = a[1];
+ if (--dl <= 0) break;
+ r[2] = a[2];
+ if (--dl <= 0) break;
+ r[3] = a[3];
+ if (--dl <= 0) break;
+
+ a += 4;
+ r += 4;
+ }
+ }
+ }
+ return c;
+ }
+
+#ifdef BN_RECURSION
+/* Karatsuba recursive multiplication algorithm
+ * (cf. Knuth, The Art of Computer Programming, Vol. 2) */
+
+/* r is 2*n2 words in size,
+ * a and b are both n2 words in size.
+ * n2 must be a power of 2.
+ * We multiply and return the result.
+ * t must be 2*n2 words in size
+ * We calculate
+ * a[0]*b[0]
+ * a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0])
+ * a[1]*b[1]
+ */
+/* dnX may not be positive, but n2/2+dnX has to be */
+void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
+ int dna, int dnb, BN_ULONG *t)
+ {
+ int n=n2/2,c1,c2;
+ int tna=n+dna, tnb=n+dnb;
+ unsigned int neg,zero;
+ BN_ULONG ln,lo,*p;
+
+# ifdef BN_COUNT
+ fprintf(stderr," bn_mul_recursive %d%+d * %d%+d\n",n2,dna,n2,dnb);
+# endif
+# ifdef BN_MUL_COMBA
+# if 0
+ if (n2 == 4)
+ {
+ bn_mul_comba4(r,a,b);
+ return;
+ }
+# endif
+ /* Only call bn_mul_comba 8 if n2 == 8 and the
+ * two arrays are complete [steve]
+ */
+ if (n2 == 8 && dna == 0 && dnb == 0)
+ {
+ bn_mul_comba8(r,a,b);
+ return;
+ }
+# endif /* BN_MUL_COMBA */
+ /* Else do normal multiply */
+ if (n2 < BN_MUL_RECURSIVE_SIZE_NORMAL)
+ {
+ bn_mul_normal(r,a,n2+dna,b,n2+dnb);
+ if ((dna + dnb) < 0)
+ memset(&r[2*n2 + dna + dnb], 0,
+ sizeof(BN_ULONG) * -(dna + dnb));
+ return;
+ }
+ /* r=(a[0]-a[1])*(b[1]-b[0]) */
+ c1=bn_cmp_part_words(a,&(a[n]),tna,n-tna);
+ c2=bn_cmp_part_words(&(b[n]),b,tnb,tnb-n);
+ zero=neg=0;
+ switch (c1*3+c2)
+ {
+ case -4:
+ bn_sub_part_words(t, &(a[n]),a, tna,tna-n); /* - */
+ bn_sub_part_words(&(t[n]),b, &(b[n]),tnb,n-tnb); /* - */
+ break;
+ case -3:
+ zero=1;
+ break;
+ case -2:
+ bn_sub_part_words(t, &(a[n]),a, tna,tna-n); /* - */
+ bn_sub_part_words(&(t[n]),&(b[n]),b, tnb,tnb-n); /* + */
+ neg=1;
+ break;
+ case -1:
+ case 0:
+ case 1:
+ zero=1;
+ break;
+ case 2:
+ bn_sub_part_words(t, a, &(a[n]),tna,n-tna); /* + */
+ bn_sub_part_words(&(t[n]),b, &(b[n]),tnb,n-tnb); /* - */
+ neg=1;
+ break;
+ case 3:
+ zero=1;
+ break;
+ case 4:
+ bn_sub_part_words(t, a, &(a[n]),tna,n-tna);
+ bn_sub_part_words(&(t[n]),&(b[n]),b, tnb,tnb-n);
+ break;
+ }
+
+# ifdef BN_MUL_COMBA
+ if (n == 4 && dna == 0 && dnb == 0) /* XXX: bn_mul_comba4 could take
+ extra args to do this well */
+ {
+ if (!zero)
+ bn_mul_comba4(&(t[n2]),t,&(t[n]));
+ else
+ memset(&(t[n2]),0,8*sizeof(BN_ULONG));
+
+ bn_mul_comba4(r,a,b);
+ bn_mul_comba4(&(r[n2]),&(a[n]),&(b[n]));
+ }
+ else if (n == 8 && dna == 0 && dnb == 0) /* XXX: bn_mul_comba8 could
+ take extra args to do this
+ well */
+ {
+ if (!zero)
+ bn_mul_comba8(&(t[n2]),t,&(t[n]));
+ else
+ memset(&(t[n2]),0,16*sizeof(BN_ULONG));
+
+ bn_mul_comba8(r,a,b);
+ bn_mul_comba8(&(r[n2]),&(a[n]),&(b[n]));
+ }
+ else
+# endif /* BN_MUL_COMBA */
+ {
+ p= &(t[n2*2]);
+ if (!zero)
+ bn_mul_recursive(&(t[n2]),t,&(t[n]),n,0,0,p);
+ else
+ memset(&(t[n2]),0,n2*sizeof(BN_ULONG));
+ bn_mul_recursive(r,a,b,n,0,0,p);
+ bn_mul_recursive(&(r[n2]),&(a[n]),&(b[n]),n,dna,dnb,p);
+ }
+
+ /* t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ */
+
+ c1=(int)(bn_add_words(t,r,&(r[n2]),n2));
+
+ if (neg) /* if t[32] is negative */
+ {
+ c1-=(int)(bn_sub_words(&(t[n2]),t,&(t[n2]),n2));
+ }
+ else
+ {
+ /* Might have a carry */
+ c1+=(int)(bn_add_words(&(t[n2]),&(t[n2]),t,n2));
+ }
+
+ /* t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ * c1 holds the carry bits
+ */
+ c1+=(int)(bn_add_words(&(r[n]),&(r[n]),&(t[n2]),n2));
+ if (c1)
+ {
+ p= &(r[n+n2]);
+ lo= *p;
+ ln=(lo+c1)&BN_MASK2;
+ *p=ln;
+
+ /* The overflow will stop before we over write
+ * words we should not overwrite */
+ if (ln < (BN_ULONG)c1)
+ {
+ do {
+ p++;
+ lo= *p;
+ ln=(lo+1)&BN_MASK2;
+ *p=ln;
+ } while (ln == 0);
+ }
+ }
+ }
+
+/* n+tn is the word length
+ * t needs to be n*4 is size, as does r */
+/* tnX may not be negative but less than n */
+void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
+ int tna, int tnb, BN_ULONG *t)
+ {
+ int i,j,n2=n*2;
+ int c1,c2,neg;
+ BN_ULONG ln,lo,*p;
+
+# ifdef BN_COUNT
+ fprintf(stderr," bn_mul_part_recursive (%d%+d) * (%d%+d)\n",
+ n, tna, n, tnb);
+# endif
+ if (n < 8)
+ {
+ bn_mul_normal(r,a,n+tna,b,n+tnb);
+ return;
+ }
+
+ /* r=(a[0]-a[1])*(b[1]-b[0]) */
+ c1=bn_cmp_part_words(a,&(a[n]),tna,n-tna);
+ c2=bn_cmp_part_words(&(b[n]),b,tnb,tnb-n);
+ neg=0;
+ switch (c1*3+c2)
+ {
+ case -4:
+ bn_sub_part_words(t, &(a[n]),a, tna,tna-n); /* - */
+ bn_sub_part_words(&(t[n]),b, &(b[n]),tnb,n-tnb); /* - */
+ break;
+ case -3:
+ /* break; */
+ case -2:
+ bn_sub_part_words(t, &(a[n]),a, tna,tna-n); /* - */
+ bn_sub_part_words(&(t[n]),&(b[n]),b, tnb,tnb-n); /* + */
+ neg=1;
+ break;
+ case -1:
+ case 0:
+ case 1:
+ /* break; */
+ case 2:
+ bn_sub_part_words(t, a, &(a[n]),tna,n-tna); /* + */
+ bn_sub_part_words(&(t[n]),b, &(b[n]),tnb,n-tnb); /* - */
+ neg=1;
+ break;
+ case 3:
+ /* break; */
+ case 4:
+ bn_sub_part_words(t, a, &(a[n]),tna,n-tna);
+ bn_sub_part_words(&(t[n]),&(b[n]),b, tnb,tnb-n);
+ break;
+ }
+ /* The zero case isn't yet implemented here. The speedup
+ would probably be negligible. */
+# if 0
+ if (n == 4)
+ {
+ bn_mul_comba4(&(t[n2]),t,&(t[n]));
+ bn_mul_comba4(r,a,b);
+ bn_mul_normal(&(r[n2]),&(a[n]),tn,&(b[n]),tn);
+ memset(&(r[n2+tn*2]),0,sizeof(BN_ULONG)*(n2-tn*2));
+ }
+ else
+# endif
+ if (n == 8)
+ {
+ bn_mul_comba8(&(t[n2]),t,&(t[n]));
+ bn_mul_comba8(r,a,b);
+ bn_mul_normal(&(r[n2]),&(a[n]),tna,&(b[n]),tnb);
+ memset(&(r[n2+tna+tnb]),0,sizeof(BN_ULONG)*(n2-tna-tnb));
+ }
+ else
+ {
+ p= &(t[n2*2]);
+ bn_mul_recursive(&(t[n2]),t,&(t[n]),n,0,0,p);
+ bn_mul_recursive(r,a,b,n,0,0,p);
+ i=n/2;
+ /* If there is only a bottom half to the number,
+ * just do it */
+ if (tna > tnb)
+ j = tna - i;
+ else
+ j = tnb - i;
+ if (j == 0)
+ {
+ bn_mul_recursive(&(r[n2]),&(a[n]),&(b[n]),
+ i,tna-i,tnb-i,p);
+ memset(&(r[n2+i*2]),0,sizeof(BN_ULONG)*(n2-i*2));
+ }
+ else if (j > 0) /* eg, n == 16, i == 8 and tn == 11 */
+ {
+ bn_mul_part_recursive(&(r[n2]),&(a[n]),&(b[n]),
+ i,tna-i,tnb-i,p);
+ memset(&(r[n2+tna+tnb]),0,
+ sizeof(BN_ULONG)*(n2-tna-tnb));
+ }
+ else /* (j < 0) eg, n == 16, i == 8 and tn == 5 */
+ {
+ memset(&(r[n2]),0,sizeof(BN_ULONG)*n2);
+ if (tna < BN_MUL_RECURSIVE_SIZE_NORMAL
+ && tnb < BN_MUL_RECURSIVE_SIZE_NORMAL)
+ {
+ bn_mul_normal(&(r[n2]),&(a[n]),tna,&(b[n]),tnb);
+ }
+ else
+ {
+ for (;;)
+ {
+ i/=2;
+ /* these simplified conditions work
+ * exclusively because difference
+ * between tna and tnb is 1 or 0 */
+ if (i < tna || i < tnb)
+ {
+ bn_mul_part_recursive(&(r[n2]),
+ &(a[n]),&(b[n]),
+ i,tna-i,tnb-i,p);
+ break;
+ }
+ else if (i == tna || i == tnb)
+ {
+ bn_mul_recursive(&(r[n2]),
+ &(a[n]),&(b[n]),
+ i,tna-i,tnb-i,p);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /* t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ */
+
+ c1=(int)(bn_add_words(t,r,&(r[n2]),n2));
+
+ if (neg) /* if t[32] is negative */
+ {
+ c1-=(int)(bn_sub_words(&(t[n2]),t,&(t[n2]),n2));
+ }
+ else
+ {
+ /* Might have a carry */
+ c1+=(int)(bn_add_words(&(t[n2]),&(t[n2]),t,n2));
+ }
+
+ /* t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ * c1 holds the carry bits
+ */
+ c1+=(int)(bn_add_words(&(r[n]),&(r[n]),&(t[n2]),n2));
+ if (c1)
+ {
+ p= &(r[n+n2]);
+ lo= *p;
+ ln=(lo+c1)&BN_MASK2;
+ *p=ln;
+
+ /* The overflow will stop before we over write
+ * words we should not overwrite */
+ if (ln < (BN_ULONG)c1)
+ {
+ do {
+ p++;
+ lo= *p;
+ ln=(lo+1)&BN_MASK2;
+ *p=ln;
+ } while (ln == 0);
+ }
+ }
+ }
+
+/* a and b must be the same size, which is n2.
+ * r needs to be n2 words and t needs to be n2*2
+ */
+void bn_mul_low_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
+ BN_ULONG *t)
+ {
+ int n=n2/2;
+
+# ifdef BN_COUNT
+ fprintf(stderr," bn_mul_low_recursive %d * %d\n",n2,n2);
+# endif
+
+ bn_mul_recursive(r,a,b,n,0,0,&(t[0]));
+ if (n >= BN_MUL_LOW_RECURSIVE_SIZE_NORMAL)
+ {
+ bn_mul_low_recursive(&(t[0]),&(a[0]),&(b[n]),n,&(t[n2]));
+ bn_add_words(&(r[n]),&(r[n]),&(t[0]),n);
+ bn_mul_low_recursive(&(t[0]),&(a[n]),&(b[0]),n,&(t[n2]));
+ bn_add_words(&(r[n]),&(r[n]),&(t[0]),n);
+ }
+ else
+ {
+ bn_mul_low_normal(&(t[0]),&(a[0]),&(b[n]),n);
+ bn_mul_low_normal(&(t[n]),&(a[n]),&(b[0]),n);
+ bn_add_words(&(r[n]),&(r[n]),&(t[0]),n);
+ bn_add_words(&(r[n]),&(r[n]),&(t[n]),n);
+ }
+ }
+
+/* a and b must be the same size, which is n2.
+ * r needs to be n2 words and t needs to be n2*2
+ * l is the low words of the output.
+ * t needs to be n2*3
+ */
+void bn_mul_high(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, BN_ULONG *l, int n2,
+ BN_ULONG *t)
+ {
+ int i,n;
+ int c1,c2;
+ int neg,oneg,zero;
+ BN_ULONG ll,lc,*lp,*mp;
+
+# ifdef BN_COUNT
+ fprintf(stderr," bn_mul_high %d * %d\n",n2,n2);
+# endif
+ n=n2/2;
+
+ /* Calculate (al-ah)*(bh-bl) */
+ neg=zero=0;
+ c1=bn_cmp_words(&(a[0]),&(a[n]),n);
+ c2=bn_cmp_words(&(b[n]),&(b[0]),n);
+ switch (c1*3+c2)
+ {
+ case -4:
+ bn_sub_words(&(r[0]),&(a[n]),&(a[0]),n);
+ bn_sub_words(&(r[n]),&(b[0]),&(b[n]),n);
+ break;
+ case -3:
+ zero=1;
+ break;
+ case -2:
+ bn_sub_words(&(r[0]),&(a[n]),&(a[0]),n);
+ bn_sub_words(&(r[n]),&(b[n]),&(b[0]),n);
+ neg=1;
+ break;
+ case -1:
+ case 0:
+ case 1:
+ zero=1;
+ break;
+ case 2:
+ bn_sub_words(&(r[0]),&(a[0]),&(a[n]),n);
+ bn_sub_words(&(r[n]),&(b[0]),&(b[n]),n);
+ neg=1;
+ break;
+ case 3:
+ zero=1;
+ break;
+ case 4:
+ bn_sub_words(&(r[0]),&(a[0]),&(a[n]),n);
+ bn_sub_words(&(r[n]),&(b[n]),&(b[0]),n);
+ break;
+ }
+
+ oneg=neg;
+ /* t[10] = (a[0]-a[1])*(b[1]-b[0]) */
+ /* r[10] = (a[1]*b[1]) */
+# ifdef BN_MUL_COMBA
+ if (n == 8)
+ {
+ bn_mul_comba8(&(t[0]),&(r[0]),&(r[n]));
+ bn_mul_comba8(r,&(a[n]),&(b[n]));
+ }
+ else
+# endif
+ {
+ bn_mul_recursive(&(t[0]),&(r[0]),&(r[n]),n,0,0,&(t[n2]));
+ bn_mul_recursive(r,&(a[n]),&(b[n]),n,0,0,&(t[n2]));
+ }
+
+ /* s0 == low(al*bl)
+ * s1 == low(ah*bh)+low((al-ah)*(bh-bl))+low(al*bl)+high(al*bl)
+ * We know s0 and s1 so the only unknown is high(al*bl)
+ * high(al*bl) == s1 - low(ah*bh+s0+(al-ah)*(bh-bl))
+ * high(al*bl) == s1 - (r[0]+l[0]+t[0])
+ */
+ if (l != NULL)
+ {
+ lp= &(t[n2+n]);
+ c1=(int)(bn_add_words(lp,&(r[0]),&(l[0]),n));
+ }
+ else
+ {
+ c1=0;
+ lp= &(r[0]);
+ }
+
+ if (neg)
+ neg=(int)(bn_sub_words(&(t[n2]),lp,&(t[0]),n));
+ else
+ {
+ bn_add_words(&(t[n2]),lp,&(t[0]),n);
+ neg=0;
+ }
+
+ if (l != NULL)
+ {
+ bn_sub_words(&(t[n2+n]),&(l[n]),&(t[n2]),n);
+ }
+ else
+ {
+ lp= &(t[n2+n]);
+ mp= &(t[n2]);
+ for (i=0; i<n; i++)
+ lp[i]=((~mp[i])+1)&BN_MASK2;
+ }
+
+ /* s[0] = low(al*bl)
+ * t[3] = high(al*bl)
+ * t[10] = (a[0]-a[1])*(b[1]-b[0]) neg is the sign
+ * r[10] = (a[1]*b[1])
+ */
+ /* R[10] = al*bl
+ * R[21] = al*bl + ah*bh + (a[0]-a[1])*(b[1]-b[0])
+ * R[32] = ah*bh
+ */
+ /* R[1]=t[3]+l[0]+r[0](+-)t[0] (have carry/borrow)
+ * R[2]=r[0]+t[3]+r[1](+-)t[1] (have carry/borrow)
+ * R[3]=r[1]+(carry/borrow)
+ */
+ if (l != NULL)
+ {
+ lp= &(t[n2]);
+ c1= (int)(bn_add_words(lp,&(t[n2+n]),&(l[0]),n));
+ }
+ else
+ {
+ lp= &(t[n2+n]);
+ c1=0;
+ }
+ c1+=(int)(bn_add_words(&(t[n2]),lp, &(r[0]),n));
+ if (oneg)
+ c1-=(int)(bn_sub_words(&(t[n2]),&(t[n2]),&(t[0]),n));
+ else
+ c1+=(int)(bn_add_words(&(t[n2]),&(t[n2]),&(t[0]),n));
+
+ c2 =(int)(bn_add_words(&(r[0]),&(r[0]),&(t[n2+n]),n));
+ c2+=(int)(bn_add_words(&(r[0]),&(r[0]),&(r[n]),n));
+ if (oneg)
+ c2-=(int)(bn_sub_words(&(r[0]),&(r[0]),&(t[n]),n));
+ else
+ c2+=(int)(bn_add_words(&(r[0]),&(r[0]),&(t[n]),n));
+
+ if (c1 != 0) /* Add starting at r[0], could be +ve or -ve */
+ {
+ i=0;
+ if (c1 > 0)
+ {
+ lc=c1;
+ do {
+ ll=(r[i]+lc)&BN_MASK2;
+ r[i++]=ll;
+ lc=(lc > ll);
+ } while (lc);
+ }
+ else
+ {
+ lc= -c1;
+ do {
+ ll=r[i];
+ r[i++]=(ll-lc)&BN_MASK2;
+ lc=(lc > ll);
+ } while (lc);
+ }
+ }
+ if (c2 != 0) /* Add starting at r[1] */
+ {
+ i=n;
+ if (c2 > 0)
+ {
+ lc=c2;
+ do {
+ ll=(r[i]+lc)&BN_MASK2;
+ r[i++]=ll;
+ lc=(lc > ll);
+ } while (lc);
+ }
+ else
+ {
+ lc= -c2;
+ do {
+ ll=r[i];
+ r[i++]=(ll-lc)&BN_MASK2;
+ lc=(lc > ll);
+ } while (lc);
+ }
+ }
+ }
+#endif /* BN_RECURSION */
+
+int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
+ {
+ int ret=0;
+ int top,al,bl;
+ BIGNUM *rr;
+#if defined(BN_MUL_COMBA) || defined(BN_RECURSION)
+ int i;
+#endif
+#ifdef BN_RECURSION
+ BIGNUM *t=NULL;
+ int j=0,k;
+#endif
+
+#ifdef BN_COUNT
+ fprintf(stderr,"BN_mul %d * %d\n",a->top,b->top);
+#endif
+
+ bn_check_top(a);
+ bn_check_top(b);
+ bn_check_top(r);
+
+ al=a->top;
+ bl=b->top;
+
+ if ((al == 0) || (bl == 0))
+ {
+ BN_zero(r);
+ return(1);
+ }
+ top=al+bl;
+
+ BN_CTX_start(ctx);
+ if ((r == a) || (r == b))
+ {
+ if ((rr = BN_CTX_get(ctx)) == NULL) goto err;
+ }
+ else
+ rr = r;
+ rr->neg=a->neg^b->neg;
+
+#if defined(BN_MUL_COMBA) || defined(BN_RECURSION)
+ i = al-bl;
+#endif
+#ifdef BN_MUL_COMBA
+ if (i == 0)
+ {
+# if 0
+ if (al == 4)
+ {
+ if (bn_wexpand(rr,8) == NULL) goto err;
+ rr->top=8;
+ bn_mul_comba4(rr->d,a->d,b->d);
+ goto end;
+ }
+# endif
+ if (al == 8)
+ {
+ if (bn_wexpand(rr,16) == NULL) goto err;
+ rr->top=16;
+ bn_mul_comba8(rr->d,a->d,b->d);
+ goto end;
+ }
+ }
+#endif /* BN_MUL_COMBA */
+#ifdef BN_RECURSION
+ if ((al >= BN_MULL_SIZE_NORMAL) && (bl >= BN_MULL_SIZE_NORMAL))
+ {
+ if (i >= -1 && i <= 1)
+ {
+ /* Find out the power of two lower or equal
+ to the longest of the two numbers */
+ if (i >= 0)
+ {
+ j = BN_num_bits_word((BN_ULONG)al);
+ }
+ if (i == -1)
+ {
+ j = BN_num_bits_word((BN_ULONG)bl);
+ }
+ j = 1<<(j-1);
+ assert(j <= al || j <= bl);
+ k = j+j;
+ t = BN_CTX_get(ctx);
+ if (t == NULL)
+ goto err;
+ if (al > j || bl > j)
+ {
+ if (bn_wexpand(t,k*4) == NULL) goto err;
+ if (bn_wexpand(rr,k*4) == NULL) goto err;
+ bn_mul_part_recursive(rr->d,a->d,b->d,
+ j,al-j,bl-j,t->d);
+ }
+ else /* al <= j || bl <= j */
+ {
+ if (bn_wexpand(t,k*2) == NULL) goto err;
+ if (bn_wexpand(rr,k*2) == NULL) goto err;
+ bn_mul_recursive(rr->d,a->d,b->d,
+ j,al-j,bl-j,t->d);
+ }
+ rr->top=top;
+ goto end;
+ }
+#if 0
+ if (i == 1 && !BN_get_flags(b,BN_FLG_STATIC_DATA))
+ {
+ BIGNUM *tmp_bn = (BIGNUM *)b;
+ if (bn_wexpand(tmp_bn,al) == NULL) goto err;
+ tmp_bn->d[bl]=0;
+ bl++;
+ i--;
+ }
+ else if (i == -1 && !BN_get_flags(a,BN_FLG_STATIC_DATA))
+ {
+ BIGNUM *tmp_bn = (BIGNUM *)a;
+ if (bn_wexpand(tmp_bn,bl) == NULL) goto err;
+ tmp_bn->d[al]=0;
+ al++;
+ i++;
+ }
+ if (i == 0)
+ {
+ /* symmetric and > 4 */
+ /* 16 or larger */
+ j=BN_num_bits_word((BN_ULONG)al);
+ j=1<<(j-1);
+ k=j+j;
+ t = BN_CTX_get(ctx);
+ if (al == j) /* exact multiple */
+ {
+ if (bn_wexpand(t,k*2) == NULL) goto err;
+ if (bn_wexpand(rr,k*2) == NULL) goto err;
+ bn_mul_recursive(rr->d,a->d,b->d,al,t->d);
+ }
+ else
+ {
+ if (bn_wexpand(t,k*4) == NULL) goto err;
+ if (bn_wexpand(rr,k*4) == NULL) goto err;
+ bn_mul_part_recursive(rr->d,a->d,b->d,al-j,j,t->d);
+ }
+ rr->top=top;
+ goto end;
+ }
+#endif
+ }
+#endif /* BN_RECURSION */
+ if (bn_wexpand(rr,top) == NULL) goto err;
+ rr->top=top;
+ bn_mul_normal(rr->d,a->d,al,b->d,bl);
+
+#if defined(BN_MUL_COMBA) || defined(BN_RECURSION)
+end:
+#endif
+ bn_correct_top(rr);
+ if (r != rr) BN_copy(r,rr);
+ ret=1;
+err:
+ bn_check_top(r);
+ BN_CTX_end(ctx);
+ return(ret);
+ }
+
+void bn_mul_normal(BN_ULONG *r, BN_ULONG *a, int na, BN_ULONG *b, int nb)
+ {
+ BN_ULONG *rr;
+
+#ifdef BN_COUNT
+ fprintf(stderr," bn_mul_normal %d * %d\n",na,nb);
+#endif
+
+ if (na < nb)
+ {
+ int itmp;
+ BN_ULONG *ltmp;
+
+ itmp=na; na=nb; nb=itmp;
+ ltmp=a; a=b; b=ltmp;
+
+ }
+ rr= &(r[na]);
+ if (nb <= 0)
+ {
+ (void)bn_mul_words(r,a,na,0);
+ return;
+ }
+ else
+ rr[0]=bn_mul_words(r,a,na,b[0]);
+
+ for (;;)
+ {
+ if (--nb <= 0) return;
+ rr[1]=bn_mul_add_words(&(r[1]),a,na,b[1]);
+ if (--nb <= 0) return;
+ rr[2]=bn_mul_add_words(&(r[2]),a,na,b[2]);
+ if (--nb <= 0) return;
+ rr[3]=bn_mul_add_words(&(r[3]),a,na,b[3]);
+ if (--nb <= 0) return;
+ rr[4]=bn_mul_add_words(&(r[4]),a,na,b[4]);
+ rr+=4;
+ r+=4;
+ b+=4;
+ }
+ }
+
+void bn_mul_low_normal(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+ {
+#ifdef BN_COUNT
+ fprintf(stderr," bn_mul_low_normal %d * %d\n",n,n);
+#endif
+ bn_mul_words(r,a,n,b[0]);
+
+ for (;;)
+ {
+ if (--n <= 0) return;
+ bn_mul_add_words(&(r[1]),a,n,b[1]);
+ if (--n <= 0) return;
+ bn_mul_add_words(&(r[2]),a,n,b[2]);
+ if (--n <= 0) return;
+ bn_mul_add_words(&(r[3]),a,n,b[3]);
+ if (--n <= 0) return;
+ bn_mul_add_words(&(r[4]),a,n,b[4]);
+ r+=4;
+ b+=4;
+ }
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_nist.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_nist.c
new file mode 100644
index 00000000..e22968d4
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_nist.c
@@ -0,0 +1,1109 @@
+/* crypto/bn/bn_nist.c */
+/*
+ * Written by Nils Larsch for the OpenSSL project
+ */
+/* ====================================================================
+ * Copyright (c) 1998-2005 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+#include "bn_lcl.h"
+#include "cryptlib.h"
+
+
+#define BN_NIST_192_TOP (192+BN_BITS2-1)/BN_BITS2
+#define BN_NIST_224_TOP (224+BN_BITS2-1)/BN_BITS2
+#define BN_NIST_256_TOP (256+BN_BITS2-1)/BN_BITS2
+#define BN_NIST_384_TOP (384+BN_BITS2-1)/BN_BITS2
+#define BN_NIST_521_TOP (521+BN_BITS2-1)/BN_BITS2
+
+/* pre-computed tables are "carry-less" values of modulus*(i+1) */
+#if BN_BITS2 == 64
+static const BN_ULONG _nist_p_192[][BN_NIST_192_TOP] = {
+ {0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFEULL,0xFFFFFFFFFFFFFFFFULL},
+ {0xFFFFFFFFFFFFFFFEULL,0xFFFFFFFFFFFFFFFDULL,0xFFFFFFFFFFFFFFFFULL},
+ {0xFFFFFFFFFFFFFFFDULL,0xFFFFFFFFFFFFFFFCULL,0xFFFFFFFFFFFFFFFFULL}
+ };
+static const BN_ULONG _nist_p_192_sqr[] = {
+ 0x0000000000000001ULL,0x0000000000000002ULL,0x0000000000000001ULL,
+ 0xFFFFFFFFFFFFFFFEULL,0xFFFFFFFFFFFFFFFDULL,0xFFFFFFFFFFFFFFFFULL
+ };
+static const BN_ULONG _nist_p_224[][BN_NIST_224_TOP] = {
+ {0x0000000000000001ULL,0xFFFFFFFF00000000ULL,
+ 0xFFFFFFFFFFFFFFFFULL,0x00000000FFFFFFFFULL},
+ {0x0000000000000002ULL,0xFFFFFFFE00000000ULL,
+ 0xFFFFFFFFFFFFFFFFULL,0x00000001FFFFFFFFULL} /* this one is "carry-full" */
+ };
+static const BN_ULONG _nist_p_224_sqr[] = {
+ 0x0000000000000001ULL,0xFFFFFFFE00000000ULL,
+ 0xFFFFFFFFFFFFFFFFULL,0x0000000200000000ULL,
+ 0x0000000000000000ULL,0xFFFFFFFFFFFFFFFEULL,
+ 0xFFFFFFFFFFFFFFFFULL
+ };
+static const BN_ULONG _nist_p_256[][BN_NIST_256_TOP] = {
+ {0xFFFFFFFFFFFFFFFFULL,0x00000000FFFFFFFFULL,
+ 0x0000000000000000ULL,0xFFFFFFFF00000001ULL},
+ {0xFFFFFFFFFFFFFFFEULL,0x00000001FFFFFFFFULL,
+ 0x0000000000000000ULL,0xFFFFFFFE00000002ULL},
+ {0xFFFFFFFFFFFFFFFDULL,0x00000002FFFFFFFFULL,
+ 0x0000000000000000ULL,0xFFFFFFFD00000003ULL},
+ {0xFFFFFFFFFFFFFFFCULL,0x00000003FFFFFFFFULL,
+ 0x0000000000000000ULL,0xFFFFFFFC00000004ULL},
+ {0xFFFFFFFFFFFFFFFBULL,0x00000004FFFFFFFFULL,
+ 0x0000000000000000ULL,0xFFFFFFFB00000005ULL},
+ };
+static const BN_ULONG _nist_p_256_sqr[] = {
+ 0x0000000000000001ULL,0xFFFFFFFE00000000ULL,
+ 0xFFFFFFFFFFFFFFFFULL,0x00000001FFFFFFFEULL,
+ 0x00000001FFFFFFFEULL,0x00000001FFFFFFFEULL,
+ 0xFFFFFFFE00000001ULL,0xFFFFFFFE00000002ULL
+ };
+static const BN_ULONG _nist_p_384[][BN_NIST_384_TOP] = {
+ {0x00000000FFFFFFFFULL,0xFFFFFFFF00000000ULL,0xFFFFFFFFFFFFFFFEULL,
+ 0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL},
+ {0x00000001FFFFFFFEULL,0xFFFFFFFE00000000ULL,0xFFFFFFFFFFFFFFFDULL,
+ 0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL},
+ {0x00000002FFFFFFFDULL,0xFFFFFFFD00000000ULL,0xFFFFFFFFFFFFFFFCULL,
+ 0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL},
+ {0x00000003FFFFFFFCULL,0xFFFFFFFC00000000ULL,0xFFFFFFFFFFFFFFFBULL,
+ 0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL},
+ {0x00000004FFFFFFFBULL,0xFFFFFFFB00000000ULL,0xFFFFFFFFFFFFFFFAULL,
+ 0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL},
+ };
+static const BN_ULONG _nist_p_384_sqr[] = {
+ 0xFFFFFFFE00000001ULL,0x0000000200000000ULL,0xFFFFFFFE00000000ULL,
+ 0x0000000200000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,
+ 0x00000001FFFFFFFEULL,0xFFFFFFFE00000000ULL,0xFFFFFFFFFFFFFFFDULL,
+ 0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL
+ };
+static const BN_ULONG _nist_p_521[] =
+ {0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,
+ 0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,
+ 0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,
+ 0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,
+ 0x00000000000001FFULL};
+static const BN_ULONG _nist_p_521_sqr[] = {
+ 0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL,
+ 0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000000ULL,
+ 0x0000000000000000ULL,0x0000000000000000ULL,0xFFFFFFFFFFFFFC00ULL,
+ 0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,
+ 0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,0xFFFFFFFFFFFFFFFFULL,
+ 0xFFFFFFFFFFFFFFFFULL,0x000000000003FFFFULL
+ };
+#elif BN_BITS2 == 32
+static const BN_ULONG _nist_p_192[][BN_NIST_192_TOP] = {
+ {0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFE,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF},
+ {0xFFFFFFFE,0xFFFFFFFF,0xFFFFFFFD,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF},
+ {0xFFFFFFFD,0xFFFFFFFF,0xFFFFFFFC,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}
+ };
+static const BN_ULONG _nist_p_192_sqr[] = {
+ 0x00000001,0x00000000,0x00000002,0x00000000,0x00000001,0x00000000,
+ 0xFFFFFFFE,0xFFFFFFFF,0xFFFFFFFD,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF
+ };
+static const BN_ULONG _nist_p_224[][BN_NIST_224_TOP] = {
+ {0x00000001,0x00000000,0x00000000,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF},
+ {0x00000002,0x00000000,0x00000000,0xFFFFFFFE,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}
+ };
+static const BN_ULONG _nist_p_224_sqr[] = {
+ 0x00000001,0x00000000,0x00000000,0xFFFFFFFE,
+ 0xFFFFFFFF,0xFFFFFFFF,0x00000000,0x00000002,
+ 0x00000000,0x00000000,0xFFFFFFFE,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF
+ };
+static const BN_ULONG _nist_p_256[][BN_NIST_256_TOP] = {
+ {0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0x00000000,
+ 0x00000000,0x00000000,0x00000001,0xFFFFFFFF},
+ {0xFFFFFFFE,0xFFFFFFFF,0xFFFFFFFF,0x00000001,
+ 0x00000000,0x00000000,0x00000002,0xFFFFFFFE},
+ {0xFFFFFFFD,0xFFFFFFFF,0xFFFFFFFF,0x00000002,
+ 0x00000000,0x00000000,0x00000003,0xFFFFFFFD},
+ {0xFFFFFFFC,0xFFFFFFFF,0xFFFFFFFF,0x00000003,
+ 0x00000000,0x00000000,0x00000004,0xFFFFFFFC},
+ {0xFFFFFFFB,0xFFFFFFFF,0xFFFFFFFF,0x00000004,
+ 0x00000000,0x00000000,0x00000005,0xFFFFFFFB},
+ };
+static const BN_ULONG _nist_p_256_sqr[] = {
+ 0x00000001,0x00000000,0x00000000,0xFFFFFFFE,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFE,0x00000001,
+ 0xFFFFFFFE,0x00000001,0xFFFFFFFE,0x00000001,
+ 0x00000001,0xFFFFFFFE,0x00000002,0xFFFFFFFE
+ };
+static const BN_ULONG _nist_p_384[][BN_NIST_384_TOP] = {
+ {0xFFFFFFFF,0x00000000,0x00000000,0xFFFFFFFF,0xFFFFFFFE,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF},
+ {0xFFFFFFFE,0x00000001,0x00000000,0xFFFFFFFE,0xFFFFFFFD,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF},
+ {0xFFFFFFFD,0x00000002,0x00000000,0xFFFFFFFD,0xFFFFFFFC,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF},
+ {0xFFFFFFFC,0x00000003,0x00000000,0xFFFFFFFC,0xFFFFFFFB,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF},
+ {0xFFFFFFFB,0x00000004,0x00000000,0xFFFFFFFB,0xFFFFFFFA,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF},
+ };
+static const BN_ULONG _nist_p_384_sqr[] = {
+ 0x00000001,0xFFFFFFFE,0x00000000,0x00000002,0x00000000,0xFFFFFFFE,
+ 0x00000000,0x00000002,0x00000001,0x00000000,0x00000000,0x00000000,
+ 0xFFFFFFFE,0x00000001,0x00000000,0xFFFFFFFE,0xFFFFFFFD,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF
+ };
+static const BN_ULONG _nist_p_521[] = {0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
+ 0xFFFFFFFF,0x000001FF};
+static const BN_ULONG _nist_p_521_sqr[] = {
+ 0x00000001,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000000,0x00000000,0x00000000,0x00000000,0xFFFFFC00,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0x0003FFFF
+ };
+#else
+#error "unsupported BN_BITS2"
+#endif
+
+
+static const BIGNUM _bignum_nist_p_192 =
+ {
+ (BN_ULONG *)_nist_p_192[0],
+ BN_NIST_192_TOP,
+ BN_NIST_192_TOP,
+ 0,
+ BN_FLG_STATIC_DATA
+ };
+
+static const BIGNUM _bignum_nist_p_224 =
+ {
+ (BN_ULONG *)_nist_p_224[0],
+ BN_NIST_224_TOP,
+ BN_NIST_224_TOP,
+ 0,
+ BN_FLG_STATIC_DATA
+ };
+
+static const BIGNUM _bignum_nist_p_256 =
+ {
+ (BN_ULONG *)_nist_p_256[0],
+ BN_NIST_256_TOP,
+ BN_NIST_256_TOP,
+ 0,
+ BN_FLG_STATIC_DATA
+ };
+
+static const BIGNUM _bignum_nist_p_384 =
+ {
+ (BN_ULONG *)_nist_p_384[0],
+ BN_NIST_384_TOP,
+ BN_NIST_384_TOP,
+ 0,
+ BN_FLG_STATIC_DATA
+ };
+
+static const BIGNUM _bignum_nist_p_521 =
+ {
+ (BN_ULONG *)_nist_p_521,
+ BN_NIST_521_TOP,
+ BN_NIST_521_TOP,
+ 0,
+ BN_FLG_STATIC_DATA
+ };
+
+
+const BIGNUM *BN_get0_nist_prime_192(void)
+ {
+ return &_bignum_nist_p_192;
+ }
+
+const BIGNUM *BN_get0_nist_prime_224(void)
+ {
+ return &_bignum_nist_p_224;
+ }
+
+const BIGNUM *BN_get0_nist_prime_256(void)
+ {
+ return &_bignum_nist_p_256;
+ }
+
+const BIGNUM *BN_get0_nist_prime_384(void)
+ {
+ return &_bignum_nist_p_384;
+ }
+
+const BIGNUM *BN_get0_nist_prime_521(void)
+ {
+ return &_bignum_nist_p_521;
+ }
+
+
+static void nist_cp_bn_0(BN_ULONG *dst, const BN_ULONG *src, int top, int max)
+ {
+ int i;
+
+#ifdef BN_DEBUG
+ OPENSSL_assert(top <= max);
+#endif
+ for (i = 0; i < top; i++)
+ dst[i] = src[i];
+ for (; i < max; i++)
+ dst[i] = 0;
+ }
+
+static void nist_cp_bn(BN_ULONG *dst, const BN_ULONG *src, int top)
+ {
+ int i;
+
+ for (i = 0; i < top; i++)
+ dst[i] = src[i];
+ }
+
+#if BN_BITS2 == 64
+#define bn_cp_64(to, n, from, m) (to)[n] = (m>=0)?((from)[m]):0;
+#define bn_64_set_0(to, n) (to)[n] = (BN_ULONG)0;
+/*
+ * two following macros are implemented under assumption that they
+ * are called in a sequence with *ascending* n, i.e. as they are...
+ */
+#define bn_cp_32_naked(to, n, from, m) (((n)&1)?(to[(n)/2]|=((m)&1)?(from[(m)/2]&BN_MASK2h):(from[(m)/2]<<32))\
+ :(to[(n)/2] =((m)&1)?(from[(m)/2]>>32):(from[(m)/2]&BN_MASK2l)))
+#define bn_32_set_0(to, n) (((n)&1)?(to[(n)/2]&=BN_MASK2l):(to[(n)/2]=0));
+#define bn_cp_32(to,n,from,m) ((m)>=0)?bn_cp_32_naked(to,n,from,m):bn_32_set_0(to,n)
+# if defined(L_ENDIAN)
+# if defined(__arch64__)
+# define NIST_INT64 long
+# else
+# define NIST_INT64 long long
+# endif
+# endif
+#else
+#define bn_cp_64(to, n, from, m) \
+ { \
+ bn_cp_32(to, (n)*2, from, (m)*2); \
+ bn_cp_32(to, (n)*2+1, from, (m)*2+1); \
+ }
+#define bn_64_set_0(to, n) \
+ { \
+ bn_32_set_0(to, (n)*2); \
+ bn_32_set_0(to, (n)*2+1); \
+ }
+#define bn_cp_32(to, n, from, m) (to)[n] = (m>=0)?((from)[m]):0;
+#define bn_32_set_0(to, n) (to)[n] = (BN_ULONG)0;
+# if defined(_WIN32) && !defined(__GNUC__)
+# define NIST_INT64 __int64
+# elif defined(BN_LLONG)
+# define NIST_INT64 long long
+# endif
+#endif /* BN_BITS2 != 64 */
+
+#define nist_set_192(to, from, a1, a2, a3) \
+ { \
+ bn_cp_64(to, 0, from, (a3) - 3) \
+ bn_cp_64(to, 1, from, (a2) - 3) \
+ bn_cp_64(to, 2, from, (a1) - 3) \
+ }
+
+int BN_nist_mod_192(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
+ BN_CTX *ctx)
+ {
+ int top = a->top, i;
+ int carry;
+ register BN_ULONG *r_d, *a_d = a->d;
+ union {
+ BN_ULONG bn[BN_NIST_192_TOP];
+ unsigned int ui[BN_NIST_192_TOP*sizeof(BN_ULONG)/sizeof(unsigned int)];
+ } buf;
+ BN_ULONG c_d[BN_NIST_192_TOP],
+ *res;
+ PTR_SIZE_INT mask;
+ static const BIGNUM _bignum_nist_p_192_sqr = {
+ (BN_ULONG *)_nist_p_192_sqr,
+ sizeof(_nist_p_192_sqr)/sizeof(_nist_p_192_sqr[0]),
+ sizeof(_nist_p_192_sqr)/sizeof(_nist_p_192_sqr[0]),
+ 0,BN_FLG_STATIC_DATA };
+
+ field = &_bignum_nist_p_192; /* just to make sure */
+
+ if (BN_is_negative(a) || BN_ucmp(a,&_bignum_nist_p_192_sqr)>=0)
+ return BN_nnmod(r, a, field, ctx);
+
+ i = BN_ucmp(field, a);
+ if (i == 0)
+ {
+ BN_zero(r);
+ return 1;
+ }
+ else if (i > 0)
+ return (r == a) ? 1 : (BN_copy(r ,a) != NULL);
+
+ if (r != a)
+ {
+ if (!bn_wexpand(r, BN_NIST_192_TOP))
+ return 0;
+ r_d = r->d;
+ nist_cp_bn(r_d, a_d, BN_NIST_192_TOP);
+ }
+ else
+ r_d = a_d;
+
+ nist_cp_bn_0(buf.bn, a_d + BN_NIST_192_TOP, top - BN_NIST_192_TOP, BN_NIST_192_TOP);
+
+#if defined(NIST_INT64)
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf.ui;
+
+ acc = rp[0]; acc += bp[3*2-6];
+ acc += bp[5*2-6]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc += bp[3*2-5];
+ acc += bp[5*2-5]; rp[1] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[2]; acc += bp[3*2-6];
+ acc += bp[4*2-6];
+ acc += bp[5*2-6]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[3*2-5];
+ acc += bp[4*2-5];
+ acc += bp[5*2-5]; rp[3] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[4]; acc += bp[4*2-6];
+ acc += bp[5*2-6]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[4*2-5];
+ acc += bp[5*2-5]; rp[5] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_192_TOP];
+
+ nist_set_192(t_d, buf.bn, 0, 3, 3);
+ carry = (int)bn_add_words(r_d, r_d, t_d, BN_NIST_192_TOP);
+ nist_set_192(t_d, buf.bn, 4, 4, 0);
+ carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_192_TOP);
+ nist_set_192(t_d, buf.bn, 5, 5, 5)
+ carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_192_TOP);
+ }
+#endif
+ if (carry > 0)
+ carry = (int)bn_sub_words(r_d,r_d,_nist_p_192[carry-1],BN_NIST_192_TOP);
+ else
+ carry = 1;
+
+ /*
+ * we need 'if (carry==0 || result>=modulus) result-=modulus;'
+ * as comparison implies subtraction, we can write
+ * 'tmp=result-modulus; if (!carry || !borrow) result=tmp;'
+ * this is what happens below, but without explicit if:-) a.
+ */
+ mask = 0-(PTR_SIZE_INT)bn_sub_words(c_d,r_d,_nist_p_192[0],BN_NIST_192_TOP);
+ mask &= 0-(PTR_SIZE_INT)carry;
+ res = c_d;
+ res = (BN_ULONG *)
+ (((PTR_SIZE_INT)res&~mask) | ((PTR_SIZE_INT)r_d&mask));
+ nist_cp_bn(r_d, res, BN_NIST_192_TOP);
+ r->top = BN_NIST_192_TOP;
+ bn_correct_top(r);
+
+ return 1;
+ }
+
+typedef BN_ULONG (*bn_addsub_f)(BN_ULONG *,const BN_ULONG *,const BN_ULONG *,int);
+
+#define nist_set_224(to, from, a1, a2, a3, a4, a5, a6, a7) \
+ { \
+ bn_cp_32(to, 0, from, (a7) - 7) \
+ bn_cp_32(to, 1, from, (a6) - 7) \
+ bn_cp_32(to, 2, from, (a5) - 7) \
+ bn_cp_32(to, 3, from, (a4) - 7) \
+ bn_cp_32(to, 4, from, (a3) - 7) \
+ bn_cp_32(to, 5, from, (a2) - 7) \
+ bn_cp_32(to, 6, from, (a1) - 7) \
+ }
+
+int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
+ BN_CTX *ctx)
+ {
+ int top = a->top, i;
+ int carry;
+ BN_ULONG *r_d, *a_d = a->d;
+ union {
+ BN_ULONG bn[BN_NIST_224_TOP];
+ unsigned int ui[BN_NIST_224_TOP*sizeof(BN_ULONG)/sizeof(unsigned int)];
+ } buf;
+ BN_ULONG c_d[BN_NIST_224_TOP],
+ *res;
+ PTR_SIZE_INT mask;
+ union { bn_addsub_f f; PTR_SIZE_INT p; } u;
+ static const BIGNUM _bignum_nist_p_224_sqr = {
+ (BN_ULONG *)_nist_p_224_sqr,
+ sizeof(_nist_p_224_sqr)/sizeof(_nist_p_224_sqr[0]),
+ sizeof(_nist_p_224_sqr)/sizeof(_nist_p_224_sqr[0]),
+ 0,BN_FLG_STATIC_DATA };
+
+
+ field = &_bignum_nist_p_224; /* just to make sure */
+
+ if (BN_is_negative(a) || BN_ucmp(a,&_bignum_nist_p_224_sqr)>=0)
+ return BN_nnmod(r, a, field, ctx);
+
+ i = BN_ucmp(field, a);
+ if (i == 0)
+ {
+ BN_zero(r);
+ return 1;
+ }
+ else if (i > 0)
+ return (r == a)? 1 : (BN_copy(r ,a) != NULL);
+
+ if (r != a)
+ {
+ if (!bn_wexpand(r, BN_NIST_224_TOP))
+ return 0;
+ r_d = r->d;
+ nist_cp_bn(r_d, a_d, BN_NIST_224_TOP);
+ }
+ else
+ r_d = a_d;
+
+#if BN_BITS2==64
+ /* copy upper 256 bits of 448 bit number ... */
+ nist_cp_bn_0(c_d, a_d + (BN_NIST_224_TOP-1), top - (BN_NIST_224_TOP-1), BN_NIST_224_TOP);
+ /* ... and right shift by 32 to obtain upper 224 bits */
+ nist_set_224(buf.bn, c_d, 14, 13, 12, 11, 10, 9, 8);
+ /* truncate lower part to 224 bits too */
+ r_d[BN_NIST_224_TOP-1] &= BN_MASK2l;
+#else
+ nist_cp_bn_0(buf.bn, a_d + BN_NIST_224_TOP, top - BN_NIST_224_TOP, BN_NIST_224_TOP);
+#endif
+
+#if defined(NIST_INT64) && BN_BITS2!=64
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf.ui;
+
+ acc = rp[0]; acc -= bp[7-7];
+ acc -= bp[11-7]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc -= bp[8-7];
+ acc -= bp[12-7]; rp[1] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[2]; acc -= bp[9-7];
+ acc -= bp[13-7]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[7-7];
+ acc += bp[11-7];
+ acc -= bp[10-7]; rp[3] = (unsigned int)acc; acc>>= 32;
+
+ acc += rp[4]; acc += bp[8-7];
+ acc += bp[12-7];
+ acc -= bp[11-7]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[9-7];
+ acc += bp[13-7];
+ acc -= bp[12-7]; rp[5] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[6]; acc += bp[10-7];
+ acc -= bp[13-7]; rp[6] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+# if BN_BITS2==64
+ rp[7] = carry;
+# endif
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_224_TOP];
+
+ nist_set_224(t_d, buf.bn, 10, 9, 8, 7, 0, 0, 0);
+ carry = (int)bn_add_words(r_d, r_d, t_d, BN_NIST_224_TOP);
+ nist_set_224(t_d, buf.bn, 0, 13, 12, 11, 0, 0, 0);
+ carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_224_TOP);
+ nist_set_224(t_d, buf.bn, 13, 12, 11, 10, 9, 8, 7);
+ carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_224_TOP);
+ nist_set_224(t_d, buf.bn, 0, 0, 0, 0, 13, 12, 11);
+ carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_224_TOP);
+
+#if BN_BITS2==64
+ carry = (int)(r_d[BN_NIST_224_TOP-1]>>32);
+#endif
+ }
+#endif
+ u.f = bn_sub_words;
+ if (carry > 0)
+ {
+ carry = (int)bn_sub_words(r_d,r_d,_nist_p_224[carry-1],BN_NIST_224_TOP);
+#if BN_BITS2==64
+ carry=(int)(~(r_d[BN_NIST_224_TOP-1]>>32))&1;
+#endif
+ }
+ else if (carry < 0)
+ {
+ /* it's a bit more comlicated logic in this case.
+ * if bn_add_words yields no carry, then result
+ * has to be adjusted by unconditionally *adding*
+ * the modulus. but if it does, then result has
+ * to be compared to the modulus and conditionally
+ * adjusted by *subtracting* the latter. */
+ carry = (int)bn_add_words(r_d,r_d,_nist_p_224[-carry-1],BN_NIST_224_TOP);
+ mask = 0-(PTR_SIZE_INT)carry;
+ u.p = ((PTR_SIZE_INT)bn_sub_words&mask) |
+ ((PTR_SIZE_INT)bn_add_words&~mask);
+ }
+ else
+ carry = 1;
+
+ /* otherwise it's effectively same as in BN_nist_mod_192... */
+ mask = 0-(PTR_SIZE_INT)(*u.f)(c_d,r_d,_nist_p_224[0],BN_NIST_224_TOP);
+ mask &= 0-(PTR_SIZE_INT)carry;
+ res = c_d;
+ res = (BN_ULONG *)(((PTR_SIZE_INT)res&~mask) |
+ ((PTR_SIZE_INT)r_d&mask));
+ nist_cp_bn(r_d, res, BN_NIST_224_TOP);
+ r->top = BN_NIST_224_TOP;
+ bn_correct_top(r);
+
+ return 1;
+ }
+
+#define nist_set_256(to, from, a1, a2, a3, a4, a5, a6, a7, a8) \
+ { \
+ bn_cp_32(to, 0, from, (a8) - 8) \
+ bn_cp_32(to, 1, from, (a7) - 8) \
+ bn_cp_32(to, 2, from, (a6) - 8) \
+ bn_cp_32(to, 3, from, (a5) - 8) \
+ bn_cp_32(to, 4, from, (a4) - 8) \
+ bn_cp_32(to, 5, from, (a3) - 8) \
+ bn_cp_32(to, 6, from, (a2) - 8) \
+ bn_cp_32(to, 7, from, (a1) - 8) \
+ }
+
+int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
+ BN_CTX *ctx)
+ {
+ int i, top = a->top;
+ int carry = 0;
+ register BN_ULONG *a_d = a->d, *r_d;
+ union {
+ BN_ULONG bn[BN_NIST_256_TOP];
+ unsigned int ui[BN_NIST_256_TOP*sizeof(BN_ULONG)/sizeof(unsigned int)];
+ } buf;
+ BN_ULONG c_d[BN_NIST_256_TOP],
+ *res;
+ PTR_SIZE_INT mask;
+ union { bn_addsub_f f; PTR_SIZE_INT p; } u;
+ static const BIGNUM _bignum_nist_p_256_sqr = {
+ (BN_ULONG *)_nist_p_256_sqr,
+ sizeof(_nist_p_256_sqr)/sizeof(_nist_p_256_sqr[0]),
+ sizeof(_nist_p_256_sqr)/sizeof(_nist_p_256_sqr[0]),
+ 0,BN_FLG_STATIC_DATA };
+
+ field = &_bignum_nist_p_256; /* just to make sure */
+
+ if (BN_is_negative(a) || BN_ucmp(a,&_bignum_nist_p_256_sqr)>=0)
+ return BN_nnmod(r, a, field, ctx);
+
+ i = BN_ucmp(field, a);
+ if (i == 0)
+ {
+ BN_zero(r);
+ return 1;
+ }
+ else if (i > 0)
+ return (r == a)? 1 : (BN_copy(r ,a) != NULL);
+
+ if (r != a)
+ {
+ if (!bn_wexpand(r, BN_NIST_256_TOP))
+ return 0;
+ r_d = r->d;
+ nist_cp_bn(r_d, a_d, BN_NIST_256_TOP);
+ }
+ else
+ r_d = a_d;
+
+ nist_cp_bn_0(buf.bn, a_d + BN_NIST_256_TOP, top - BN_NIST_256_TOP, BN_NIST_256_TOP);
+
+#if defined(NIST_INT64)
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf.ui;
+
+ acc = rp[0]; acc += bp[8-8];
+ acc += bp[9-8];
+ acc -= bp[11-8];
+ acc -= bp[12-8];
+ acc -= bp[13-8];
+ acc -= bp[14-8]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc += bp[9-8];
+ acc += bp[10-8];
+ acc -= bp[12-8];
+ acc -= bp[13-8];
+ acc -= bp[14-8];
+ acc -= bp[15-8]; rp[1] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[2]; acc += bp[10-8];
+ acc += bp[11-8];
+ acc -= bp[13-8];
+ acc -= bp[14-8];
+ acc -= bp[15-8]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[11-8];
+ acc += bp[11-8];
+ acc += bp[12-8];
+ acc += bp[12-8];
+ acc += bp[13-8];
+ acc -= bp[15-8];
+ acc -= bp[8-8];
+ acc -= bp[9-8]; rp[3] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[4]; acc += bp[12-8];
+ acc += bp[12-8];
+ acc += bp[13-8];
+ acc += bp[13-8];
+ acc += bp[14-8];
+ acc -= bp[9-8];
+ acc -= bp[10-8]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[13-8];
+ acc += bp[13-8];
+ acc += bp[14-8];
+ acc += bp[14-8];
+ acc += bp[15-8];
+ acc -= bp[10-8];
+ acc -= bp[11-8]; rp[5] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[6]; acc += bp[14-8];
+ acc += bp[14-8];
+ acc += bp[15-8];
+ acc += bp[15-8];
+ acc += bp[14-8];
+ acc += bp[13-8];
+ acc -= bp[8-8];
+ acc -= bp[9-8]; rp[6] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[7]; acc += bp[15-8];
+ acc += bp[15-8];
+ acc += bp[15-8];
+ acc += bp[8 -8];
+ acc -= bp[10-8];
+ acc -= bp[11-8];
+ acc -= bp[12-8];
+ acc -= bp[13-8]; rp[7] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_256_TOP];
+
+ /*S1*/
+ nist_set_256(t_d, buf.bn, 15, 14, 13, 12, 11, 0, 0, 0);
+ /*S2*/
+ nist_set_256(c_d, buf.bn, 0, 15, 14, 13, 12, 0, 0, 0);
+ carry = (int)bn_add_words(t_d, t_d, c_d, BN_NIST_256_TOP);
+ /* left shift */
+ {
+ register BN_ULONG *ap,t,c;
+ ap = t_d;
+ c=0;
+ for (i = BN_NIST_256_TOP; i != 0; --i)
+ {
+ t= *ap;
+ *(ap++)=((t<<1)|c)&BN_MASK2;
+ c=(t & BN_TBIT)?1:0;
+ }
+ carry <<= 1;
+ carry |= c;
+ }
+ carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_256_TOP);
+ /*S3*/
+ nist_set_256(t_d, buf.bn, 15, 14, 0, 0, 0, 10, 9, 8);
+ carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_256_TOP);
+ /*S4*/
+ nist_set_256(t_d, buf.bn, 8, 13, 15, 14, 13, 11, 10, 9);
+ carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_256_TOP);
+ /*D1*/
+ nist_set_256(t_d, buf.bn, 10, 8, 0, 0, 0, 13, 12, 11);
+ carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
+ /*D2*/
+ nist_set_256(t_d, buf.bn, 11, 9, 0, 0, 15, 14, 13, 12);
+ carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
+ /*D3*/
+ nist_set_256(t_d, buf.bn, 12, 0, 10, 9, 8, 15, 14, 13);
+ carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
+ /*D4*/
+ nist_set_256(t_d, buf.bn, 13, 0, 11, 10, 9, 0, 15, 14);
+ carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_256_TOP);
+
+ }
+#endif
+ /* see BN_nist_mod_224 for explanation */
+ u.f = bn_sub_words;
+ if (carry > 0)
+ carry = (int)bn_sub_words(r_d,r_d,_nist_p_256[carry-1],BN_NIST_256_TOP);
+ else if (carry < 0)
+ {
+ carry = (int)bn_add_words(r_d,r_d,_nist_p_256[-carry-1],BN_NIST_256_TOP);
+ mask = 0-(PTR_SIZE_INT)carry;
+ u.p = ((PTR_SIZE_INT)bn_sub_words&mask) |
+ ((PTR_SIZE_INT)bn_add_words&~mask);
+ }
+ else
+ carry = 1;
+
+ mask = 0-(PTR_SIZE_INT)(*u.f)(c_d,r_d,_nist_p_256[0],BN_NIST_256_TOP);
+ mask &= 0-(PTR_SIZE_INT)carry;
+ res = c_d;
+ res = (BN_ULONG *)(((PTR_SIZE_INT)res&~mask) |
+ ((PTR_SIZE_INT)r_d&mask));
+ nist_cp_bn(r_d, res, BN_NIST_256_TOP);
+ r->top = BN_NIST_256_TOP;
+ bn_correct_top(r);
+
+ return 1;
+ }
+
+#define nist_set_384(to,from,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12) \
+ { \
+ bn_cp_32(to, 0, from, (a12) - 12) \
+ bn_cp_32(to, 1, from, (a11) - 12) \
+ bn_cp_32(to, 2, from, (a10) - 12) \
+ bn_cp_32(to, 3, from, (a9) - 12) \
+ bn_cp_32(to, 4, from, (a8) - 12) \
+ bn_cp_32(to, 5, from, (a7) - 12) \
+ bn_cp_32(to, 6, from, (a6) - 12) \
+ bn_cp_32(to, 7, from, (a5) - 12) \
+ bn_cp_32(to, 8, from, (a4) - 12) \
+ bn_cp_32(to, 9, from, (a3) - 12) \
+ bn_cp_32(to, 10, from, (a2) - 12) \
+ bn_cp_32(to, 11, from, (a1) - 12) \
+ }
+
+int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
+ BN_CTX *ctx)
+ {
+ int i, top = a->top;
+ int carry = 0;
+ register BN_ULONG *r_d, *a_d = a->d;
+ union {
+ BN_ULONG bn[BN_NIST_384_TOP];
+ unsigned int ui[BN_NIST_384_TOP*sizeof(BN_ULONG)/sizeof(unsigned int)];
+ } buf;
+ BN_ULONG c_d[BN_NIST_384_TOP],
+ *res;
+ PTR_SIZE_INT mask;
+ union { bn_addsub_f f; PTR_SIZE_INT p; } u;
+ static const BIGNUM _bignum_nist_p_384_sqr = {
+ (BN_ULONG *)_nist_p_384_sqr,
+ sizeof(_nist_p_384_sqr)/sizeof(_nist_p_384_sqr[0]),
+ sizeof(_nist_p_384_sqr)/sizeof(_nist_p_384_sqr[0]),
+ 0,BN_FLG_STATIC_DATA };
+
+
+ field = &_bignum_nist_p_384; /* just to make sure */
+
+ if (BN_is_negative(a) || BN_ucmp(a,&_bignum_nist_p_384_sqr)>=0)
+ return BN_nnmod(r, a, field, ctx);
+
+ i = BN_ucmp(field, a);
+ if (i == 0)
+ {
+ BN_zero(r);
+ return 1;
+ }
+ else if (i > 0)
+ return (r == a)? 1 : (BN_copy(r ,a) != NULL);
+
+ if (r != a)
+ {
+ if (!bn_wexpand(r, BN_NIST_384_TOP))
+ return 0;
+ r_d = r->d;
+ nist_cp_bn(r_d, a_d, BN_NIST_384_TOP);
+ }
+ else
+ r_d = a_d;
+
+ nist_cp_bn_0(buf.bn, a_d + BN_NIST_384_TOP, top - BN_NIST_384_TOP, BN_NIST_384_TOP);
+
+#if defined(NIST_INT64)
+ {
+ NIST_INT64 acc; /* accumulator */
+ unsigned int *rp=(unsigned int *)r_d;
+ const unsigned int *bp=(const unsigned int *)buf.ui;
+
+ acc = rp[0]; acc += bp[12-12];
+ acc += bp[21-12];
+ acc += bp[20-12];
+ acc -= bp[23-12]; rp[0] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[1]; acc += bp[13-12];
+ acc += bp[22-12];
+ acc += bp[23-12];
+ acc -= bp[12-12];
+ acc -= bp[20-12]; rp[1] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[2]; acc += bp[14-12];
+ acc += bp[23-12];
+ acc -= bp[13-12];
+ acc -= bp[21-12]; rp[2] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[3]; acc += bp[15-12];
+ acc += bp[12-12];
+ acc += bp[20-12];
+ acc += bp[21-12];
+ acc -= bp[14-12];
+ acc -= bp[22-12];
+ acc -= bp[23-12]; rp[3] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[4]; acc += bp[21-12];
+ acc += bp[21-12];
+ acc += bp[16-12];
+ acc += bp[13-12];
+ acc += bp[12-12];
+ acc += bp[20-12];
+ acc += bp[22-12];
+ acc -= bp[15-12];
+ acc -= bp[23-12];
+ acc -= bp[23-12]; rp[4] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[5]; acc += bp[22-12];
+ acc += bp[22-12];
+ acc += bp[17-12];
+ acc += bp[14-12];
+ acc += bp[13-12];
+ acc += bp[21-12];
+ acc += bp[23-12];
+ acc -= bp[16-12]; rp[5] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[6]; acc += bp[23-12];
+ acc += bp[23-12];
+ acc += bp[18-12];
+ acc += bp[15-12];
+ acc += bp[14-12];
+ acc += bp[22-12];
+ acc -= bp[17-12]; rp[6] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[7]; acc += bp[19-12];
+ acc += bp[16-12];
+ acc += bp[15-12];
+ acc += bp[23-12];
+ acc -= bp[18-12]; rp[7] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[8]; acc += bp[20-12];
+ acc += bp[17-12];
+ acc += bp[16-12];
+ acc -= bp[19-12]; rp[8] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[9]; acc += bp[21-12];
+ acc += bp[18-12];
+ acc += bp[17-12];
+ acc -= bp[20-12]; rp[9] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[10]; acc += bp[22-12];
+ acc += bp[19-12];
+ acc += bp[18-12];
+ acc -= bp[21-12]; rp[10] = (unsigned int)acc; acc >>= 32;
+
+ acc += rp[11]; acc += bp[23-12];
+ acc += bp[20-12];
+ acc += bp[19-12];
+ acc -= bp[22-12]; rp[11] = (unsigned int)acc;
+
+ carry = (int)(acc>>32);
+ }
+#else
+ {
+ BN_ULONG t_d[BN_NIST_384_TOP];
+
+ /*S1*/
+ nist_set_256(t_d, buf.bn, 0, 0, 0, 0, 0, 23-4, 22-4, 21-4);
+ /* left shift */
+ {
+ register BN_ULONG *ap,t,c;
+ ap = t_d;
+ c=0;
+ for (i = 3; i != 0; --i)
+ {
+ t= *ap;
+ *(ap++)=((t<<1)|c)&BN_MASK2;
+ c=(t & BN_TBIT)?1:0;
+ }
+ *ap=c;
+ }
+ carry = (int)bn_add_words(r_d+(128/BN_BITS2), r_d+(128/BN_BITS2),
+ t_d, BN_NIST_256_TOP);
+ /*S2 */
+ carry += (int)bn_add_words(r_d, r_d, buf.bn, BN_NIST_384_TOP);
+ /*S3*/
+ nist_set_384(t_d,buf.bn,20,19,18,17,16,15,14,13,12,23,22,21);
+ carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
+ /*S4*/
+ nist_set_384(t_d,buf.bn,19,18,17,16,15,14,13,12,20,0,23,0);
+ carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
+ /*S5*/
+ nist_set_384(t_d, buf.bn,0,0,0,0,23,22,21,20,0,0,0,0);
+ carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
+ /*S6*/
+ nist_set_384(t_d,buf.bn,0,0,0,0,0,0,23,22,21,0,0,20);
+ carry += (int)bn_add_words(r_d, r_d, t_d, BN_NIST_384_TOP);
+ /*D1*/
+ nist_set_384(t_d,buf.bn,22,21,20,19,18,17,16,15,14,13,12,23);
+ carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_384_TOP);
+ /*D2*/
+ nist_set_384(t_d,buf.bn,0,0,0,0,0,0,0,23,22,21,20,0);
+ carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_384_TOP);
+ /*D3*/
+ nist_set_384(t_d,buf.bn,0,0,0,0,0,0,0,23,23,0,0,0);
+ carry -= (int)bn_sub_words(r_d, r_d, t_d, BN_NIST_384_TOP);
+
+ }
+#endif
+ /* see BN_nist_mod_224 for explanation */
+ u.f = bn_sub_words;
+ if (carry > 0)
+ carry = (int)bn_sub_words(r_d,r_d,_nist_p_384[carry-1],BN_NIST_384_TOP);
+ else if (carry < 0)
+ {
+ carry = (int)bn_add_words(r_d,r_d,_nist_p_384[-carry-1],BN_NIST_384_TOP);
+ mask = 0-(PTR_SIZE_INT)carry;
+ u.p = ((PTR_SIZE_INT)bn_sub_words&mask) |
+ ((PTR_SIZE_INT)bn_add_words&~mask);
+ }
+ else
+ carry = 1;
+
+ mask = 0-(PTR_SIZE_INT)(*u.f)(c_d,r_d,_nist_p_384[0],BN_NIST_384_TOP);
+ mask &= 0-(PTR_SIZE_INT)carry;
+ res = c_d;
+ res = (BN_ULONG *)(((PTR_SIZE_INT)res&~mask) |
+ ((PTR_SIZE_INT)r_d&mask));
+ nist_cp_bn(r_d, res, BN_NIST_384_TOP);
+ r->top = BN_NIST_384_TOP;
+ bn_correct_top(r);
+
+ return 1;
+ }
+
+#define BN_NIST_521_RSHIFT (521%BN_BITS2)
+#define BN_NIST_521_LSHIFT (BN_BITS2-BN_NIST_521_RSHIFT)
+#define BN_NIST_521_TOP_MASK ((BN_ULONG)BN_MASK2>>BN_NIST_521_LSHIFT)
+
+int BN_nist_mod_521(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
+ BN_CTX *ctx)
+ {
+ int top = a->top, i;
+ BN_ULONG *r_d, *a_d = a->d,
+ t_d[BN_NIST_521_TOP],
+ val,tmp,*res;
+ PTR_SIZE_INT mask;
+ static const BIGNUM _bignum_nist_p_521_sqr = {
+ (BN_ULONG *)_nist_p_521_sqr,
+ sizeof(_nist_p_521_sqr)/sizeof(_nist_p_521_sqr[0]),
+ sizeof(_nist_p_521_sqr)/sizeof(_nist_p_521_sqr[0]),
+ 0,BN_FLG_STATIC_DATA };
+
+ field = &_bignum_nist_p_521; /* just to make sure */
+
+ if (BN_is_negative(a) || BN_ucmp(a,&_bignum_nist_p_521_sqr)>=0)
+ return BN_nnmod(r, a, field, ctx);
+
+ i = BN_ucmp(field, a);
+ if (i == 0)
+ {
+ BN_zero(r);
+ return 1;
+ }
+ else if (i > 0)
+ return (r == a)? 1 : (BN_copy(r ,a) != NULL);
+
+ if (r != a)
+ {
+ if (!bn_wexpand(r,BN_NIST_521_TOP))
+ return 0;
+ r_d = r->d;
+ nist_cp_bn(r_d,a_d, BN_NIST_521_TOP);
+ }
+ else
+ r_d = a_d;
+
+ /* upper 521 bits, copy ... */
+ nist_cp_bn_0(t_d,a_d + (BN_NIST_521_TOP-1), top - (BN_NIST_521_TOP-1),BN_NIST_521_TOP);
+ /* ... and right shift */
+ for (val=t_d[0],i=0; i<BN_NIST_521_TOP-1; i++)
+ {
+ tmp = val>>BN_NIST_521_RSHIFT;
+ val = t_d[i+1];
+ t_d[i] = (tmp | val<<BN_NIST_521_LSHIFT) & BN_MASK2;
+ }
+ t_d[i] = val>>BN_NIST_521_RSHIFT;
+ /* lower 521 bits */
+ r_d[i] &= BN_NIST_521_TOP_MASK;
+
+ bn_add_words(r_d,r_d,t_d,BN_NIST_521_TOP);
+ mask = 0-(PTR_SIZE_INT)bn_sub_words(t_d,r_d,_nist_p_521,BN_NIST_521_TOP);
+ res = t_d;
+ res = (BN_ULONG *)(((PTR_SIZE_INT)res&~mask) |
+ ((PTR_SIZE_INT)r_d&mask));
+ nist_cp_bn(r_d,res,BN_NIST_521_TOP);
+ r->top = BN_NIST_521_TOP;
+ bn_correct_top(r);
+
+ return 1;
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.c
new file mode 100644
index 00000000..7b25979d
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.c
@@ -0,0 +1,494 @@
+/* crypto/bn/bn_prime.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+/* ====================================================================
+ * Copyright (c) 1998-2001 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+#include <stdio.h>
+#include <time.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+#include <openssl/rand.h>
+
+/* NB: these functions have been "upgraded", the deprecated versions (which are
+ * compatibility wrappers using these functions) are in bn_depr.c.
+ * - Geoff
+ */
+
+/* The quick sieve algorithm approach to weeding out primes is
+ * Philip Zimmermann's, as implemented in PGP. I have had a read of
+ * his comments and implemented my own version.
+ */
+#include "bn_prime.h"
+
+static int witness(BIGNUM *w, const BIGNUM *a, const BIGNUM *a1,
+ const BIGNUM *a1_odd, int k, BN_CTX *ctx, BN_MONT_CTX *mont);
+static int probable_prime(BIGNUM *rnd, int bits);
+static int probable_prime_dh(BIGNUM *rnd, int bits,
+ const BIGNUM *add, const BIGNUM *rem, BN_CTX *ctx);
+static int probable_prime_dh_safe(BIGNUM *rnd, int bits,
+ const BIGNUM *add, const BIGNUM *rem, BN_CTX *ctx);
+
+int BN_GENCB_call(BN_GENCB *cb, int a, int b)
+ {
+ /* No callback means continue */
+ if(!cb) return 1;
+ switch(cb->ver)
+ {
+ case 1:
+ /* Deprecated-style callbacks */
+ if(!cb->cb.cb_1)
+ return 1;
+ cb->cb.cb_1(a, b, cb->arg);
+ return 1;
+ case 2:
+ /* New-style callbacks */
+ return cb->cb.cb_2(a, b, cb);
+ default:
+ break;
+ }
+ /* Unrecognised callback type */
+ return 0;
+ }
+
+int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe,
+ const BIGNUM *add, const BIGNUM *rem, BN_GENCB *cb)
+ {
+ BIGNUM *t;
+ int found=0;
+ int i,j,c1=0;
+ BN_CTX *ctx;
+ int checks = BN_prime_checks_for_size(bits);
+
+ ctx=BN_CTX_new();
+ if (ctx == NULL) goto err;
+ BN_CTX_start(ctx);
+ t = BN_CTX_get(ctx);
+ if(!t) goto err;
+loop:
+ /* make a random number and set the top and bottom bits */
+ if (add == NULL)
+ {
+ if (!probable_prime(ret,bits)) goto err;
+ }
+ else
+ {
+ if (safe)
+ {
+ if (!probable_prime_dh_safe(ret,bits,add,rem,ctx))
+ goto err;
+ }
+ else
+ {
+ if (!probable_prime_dh(ret,bits,add,rem,ctx))
+ goto err;
+ }
+ }
+ /* if (BN_mod_word(ret,(BN_ULONG)3) == 1) goto loop; */
+ if(!BN_GENCB_call(cb, 0, c1++))
+ /* aborted */
+ goto err;
+
+ if (!safe)
+ {
+ i=BN_is_prime_fasttest_ex(ret,checks,ctx,0,cb);
+ if (i == -1) goto err;
+ if (i == 0) goto loop;
+ }
+ else
+ {
+ /* for "safe prime" generation,
+ * check that (p-1)/2 is prime.
+ * Since a prime is odd, We just
+ * need to divide by 2 */
+ if (!BN_rshift1(t,ret)) goto err;
+
+ for (i=0; i<checks; i++)
+ {
+ j=BN_is_prime_fasttest_ex(ret,1,ctx,0,cb);
+ if (j == -1) goto err;
+ if (j == 0) goto loop;
+
+ j=BN_is_prime_fasttest_ex(t,1,ctx,0,cb);
+ if (j == -1) goto err;
+ if (j == 0) goto loop;
+
+ if(!BN_GENCB_call(cb, 2, c1-1))
+ goto err;
+ /* We have a safe prime test pass */
+ }
+ }
+ /* we have a prime :-) */
+ found = 1;
+err:
+ if (ctx != NULL)
+ {
+ BN_CTX_end(ctx);
+ BN_CTX_free(ctx);
+ }
+ bn_check_top(ret);
+ return found;
+ }
+
+int BN_is_prime_ex(const BIGNUM *a, int checks, BN_CTX *ctx_passed, BN_GENCB *cb)
+ {
+ return BN_is_prime_fasttest_ex(a, checks, ctx_passed, 0, cb);
+ }
+
+int BN_is_prime_fasttest_ex(const BIGNUM *a, int checks, BN_CTX *ctx_passed,
+ int do_trial_division, BN_GENCB *cb)
+ {
+ int i, j, ret = -1;
+ int k;
+ BN_CTX *ctx = NULL;
+ BIGNUM *A1, *A1_odd, *check; /* taken from ctx */
+ BN_MONT_CTX *mont = NULL;
+ const BIGNUM *A = NULL;
+
+ if (BN_cmp(a, BN_value_one()) <= 0)
+ return 0;
+
+ if (checks == BN_prime_checks)
+ checks = BN_prime_checks_for_size(BN_num_bits(a));
+
+ /* first look for small factors */
+ if (!BN_is_odd(a))
+ /* a is even => a is prime if and only if a == 2 */
+ return BN_is_word(a, 2);
+ if (do_trial_division)
+ {
+ for (i = 1; i < NUMPRIMES; i++)
+ if (BN_mod_word(a, primes[i]) == 0)
+ return 0;
+ if(!BN_GENCB_call(cb, 1, -1))
+ goto err;
+ }
+
+ if (ctx_passed != NULL)
+ ctx = ctx_passed;
+ else
+ if ((ctx=BN_CTX_new()) == NULL)
+ goto err;
+ BN_CTX_start(ctx);
+
+ /* A := abs(a) */
+ if (a->neg)
+ {
+ BIGNUM *t;
+ if ((t = BN_CTX_get(ctx)) == NULL) goto err;
+ BN_copy(t, a);
+ t->neg = 0;
+ A = t;
+ }
+ else
+ A = a;
+ A1 = BN_CTX_get(ctx);
+ A1_odd = BN_CTX_get(ctx);
+ check = BN_CTX_get(ctx);
+ if (check == NULL) goto err;
+
+ /* compute A1 := A - 1 */
+ if (!BN_copy(A1, A))
+ goto err;
+ if (!BN_sub_word(A1, 1))
+ goto err;
+ if (BN_is_zero(A1))
+ {
+ ret = 0;
+ goto err;
+ }
+
+ /* write A1 as A1_odd * 2^k */
+ k = 1;
+ while (!BN_is_bit_set(A1, k))
+ k++;
+ if (!BN_rshift(A1_odd, A1, k))
+ goto err;
+
+ /* Montgomery setup for computations mod A */
+ mont = BN_MONT_CTX_new();
+ if (mont == NULL)
+ goto err;
+ if (!BN_MONT_CTX_set(mont, A, ctx))
+ goto err;
+
+ for (i = 0; i < checks; i++)
+ {
+ if (!BN_pseudo_rand_range(check, A1))
+ goto err;
+ if (!BN_add_word(check, 1))
+ goto err;
+ /* now 1 <= check < A */
+
+ j = witness(check, A, A1, A1_odd, k, ctx, mont);
+ if (j == -1) goto err;
+ if (j)
+ {
+ ret=0;
+ goto err;
+ }
+ if(!BN_GENCB_call(cb, 1, i))
+ goto err;
+ }
+ ret=1;
+err:
+ if (ctx != NULL)
+ {
+ BN_CTX_end(ctx);
+ if (ctx_passed == NULL)
+ BN_CTX_free(ctx);
+ }
+ if (mont != NULL)
+ BN_MONT_CTX_free(mont);
+
+ return(ret);
+ }
+
+static int witness(BIGNUM *w, const BIGNUM *a, const BIGNUM *a1,
+ const BIGNUM *a1_odd, int k, BN_CTX *ctx, BN_MONT_CTX *mont)
+ {
+ if (!BN_mod_exp_mont(w, w, a1_odd, a, ctx, mont)) /* w := w^a1_odd mod a */
+ return -1;
+ if (BN_is_one(w))
+ return 0; /* probably prime */
+ if (BN_cmp(w, a1) == 0)
+ return 0; /* w == -1 (mod a), 'a' is probably prime */
+ while (--k)
+ {
+ if (!BN_mod_mul(w, w, w, a, ctx)) /* w := w^2 mod a */
+ return -1;
+ if (BN_is_one(w))
+ return 1; /* 'a' is composite, otherwise a previous 'w' would
+ * have been == -1 (mod 'a') */
+ if (BN_cmp(w, a1) == 0)
+ return 0; /* w == -1 (mod a), 'a' is probably prime */
+ }
+ /* If we get here, 'w' is the (a-1)/2-th power of the original 'w',
+ * and it is neither -1 nor +1 -- so 'a' cannot be prime */
+ bn_check_top(w);
+ return 1;
+ }
+
+static int probable_prime(BIGNUM *rnd, int bits)
+ {
+ int i;
+ prime_t mods[NUMPRIMES];
+ BN_ULONG delta,maxdelta;
+
+again:
+ if (!BN_rand(rnd,bits,1,1)) return(0);
+ /* we now have a random number 'rand' to test. */
+ for (i=1; i<NUMPRIMES; i++)
+ mods[i]=(prime_t)BN_mod_word(rnd,(BN_ULONG)primes[i]);
+ maxdelta=BN_MASK2 - primes[NUMPRIMES-1];
+ delta=0;
+ loop: for (i=1; i<NUMPRIMES; i++)
+ {
+ /* check that rnd is not a prime and also
+ * that gcd(rnd-1,primes) == 1 (except for 2) */
+ if (((mods[i]+delta)%primes[i]) <= 1)
+ {
+ delta+=2;
+ if (delta > maxdelta) goto again;
+ goto loop;
+ }
+ }
+ if (!BN_add_word(rnd,delta)) return(0);
+ bn_check_top(rnd);
+ return(1);
+ }
+
+static int probable_prime_dh(BIGNUM *rnd, int bits,
+ const BIGNUM *add, const BIGNUM *rem, BN_CTX *ctx)
+ {
+ int i,ret=0;
+ BIGNUM *t1;
+
+ BN_CTX_start(ctx);
+ if ((t1 = BN_CTX_get(ctx)) == NULL) goto err;
+
+ if (!BN_rand(rnd,bits,0,1)) goto err;
+
+ /* we need ((rnd-rem) % add) == 0 */
+
+ if (!BN_mod(t1,rnd,add,ctx)) goto err;
+ if (!BN_sub(rnd,rnd,t1)) goto err;
+ if (rem == NULL)
+ { if (!BN_add_word(rnd,1)) goto err; }
+ else
+ { if (!BN_add(rnd,rnd,rem)) goto err; }
+
+ /* we now have a random number 'rand' to test. */
+
+ loop: for (i=1; i<NUMPRIMES; i++)
+ {
+ /* check that rnd is a prime */
+ if (BN_mod_word(rnd,(BN_ULONG)primes[i]) <= 1)
+ {
+ if (!BN_add(rnd,rnd,add)) goto err;
+ goto loop;
+ }
+ }
+ ret=1;
+err:
+ BN_CTX_end(ctx);
+ bn_check_top(rnd);
+ return(ret);
+ }
+
+static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd,
+ const BIGNUM *rem, BN_CTX *ctx)
+ {
+ int i,ret=0;
+ BIGNUM *t1,*qadd,*q;
+
+ bits--;
+ BN_CTX_start(ctx);
+ t1 = BN_CTX_get(ctx);
+ q = BN_CTX_get(ctx);
+ qadd = BN_CTX_get(ctx);
+ if (qadd == NULL) goto err;
+
+ if (!BN_rshift1(qadd,padd)) goto err;
+
+ if (!BN_rand(q,bits,0,1)) goto err;
+
+ /* we need ((rnd-rem) % add) == 0 */
+ if (!BN_mod(t1,q,qadd,ctx)) goto err;
+ if (!BN_sub(q,q,t1)) goto err;
+ if (rem == NULL)
+ { if (!BN_add_word(q,1)) goto err; }
+ else
+ {
+ if (!BN_rshift1(t1,rem)) goto err;
+ if (!BN_add(q,q,t1)) goto err;
+ }
+
+ /* we now have a random number 'rand' to test. */
+ if (!BN_lshift1(p,q)) goto err;
+ if (!BN_add_word(p,1)) goto err;
+
+ loop: for (i=1; i<NUMPRIMES; i++)
+ {
+ /* check that p and q are prime */
+ /* check that for p and q
+ * gcd(p-1,primes) == 1 (except for 2) */
+ if ( (BN_mod_word(p,(BN_ULONG)primes[i]) == 0) ||
+ (BN_mod_word(q,(BN_ULONG)primes[i]) == 0))
+ {
+ if (!BN_add(p,p,padd)) goto err;
+ if (!BN_add(q,q,qadd)) goto err;
+ goto loop;
+ }
+ }
+ ret=1;
+err:
+ BN_CTX_end(ctx);
+ bn_check_top(p);
+ return(ret);
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.h b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.h
new file mode 100644
index 00000000..51d2194f
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.h
@@ -0,0 +1,327 @@
+/* Auto generated by bn_prime.pl */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#ifndef EIGHT_BIT
+#define NUMPRIMES 2048
+typedef unsigned short prime_t;
+#else
+#define NUMPRIMES 54
+typedef unsigned char prime_t;
+#endif
+static const prime_t primes[NUMPRIMES]=
+ {
+ 2, 3, 5, 7, 11, 13, 17, 19,
+ 23, 29, 31, 37, 41, 43, 47, 53,
+ 59, 61, 67, 71, 73, 79, 83, 89,
+ 97, 101, 103, 107, 109, 113, 127, 131,
+ 137, 139, 149, 151, 157, 163, 167, 173,
+ 179, 181, 191, 193, 197, 199, 211, 223,
+ 227, 229, 233, 239, 241, 251,
+#ifndef EIGHT_BIT
+ 257, 263,
+ 269, 271, 277, 281, 283, 293, 307, 311,
+ 313, 317, 331, 337, 347, 349, 353, 359,
+ 367, 373, 379, 383, 389, 397, 401, 409,
+ 419, 421, 431, 433, 439, 443, 449, 457,
+ 461, 463, 467, 479, 487, 491, 499, 503,
+ 509, 521, 523, 541, 547, 557, 563, 569,
+ 571, 577, 587, 593, 599, 601, 607, 613,
+ 617, 619, 631, 641, 643, 647, 653, 659,
+ 661, 673, 677, 683, 691, 701, 709, 719,
+ 727, 733, 739, 743, 751, 757, 761, 769,
+ 773, 787, 797, 809, 811, 821, 823, 827,
+ 829, 839, 853, 857, 859, 863, 877, 881,
+ 883, 887, 907, 911, 919, 929, 937, 941,
+ 947, 953, 967, 971, 977, 983, 991, 997,
+ 1009,1013,1019,1021,1031,1033,1039,1049,
+ 1051,1061,1063,1069,1087,1091,1093,1097,
+ 1103,1109,1117,1123,1129,1151,1153,1163,
+ 1171,1181,1187,1193,1201,1213,1217,1223,
+ 1229,1231,1237,1249,1259,1277,1279,1283,
+ 1289,1291,1297,1301,1303,1307,1319,1321,
+ 1327,1361,1367,1373,1381,1399,1409,1423,
+ 1427,1429,1433,1439,1447,1451,1453,1459,
+ 1471,1481,1483,1487,1489,1493,1499,1511,
+ 1523,1531,1543,1549,1553,1559,1567,1571,
+ 1579,1583,1597,1601,1607,1609,1613,1619,
+ 1621,1627,1637,1657,1663,1667,1669,1693,
+ 1697,1699,1709,1721,1723,1733,1741,1747,
+ 1753,1759,1777,1783,1787,1789,1801,1811,
+ 1823,1831,1847,1861,1867,1871,1873,1877,
+ 1879,1889,1901,1907,1913,1931,1933,1949,
+ 1951,1973,1979,1987,1993,1997,1999,2003,
+ 2011,2017,2027,2029,2039,2053,2063,2069,
+ 2081,2083,2087,2089,2099,2111,2113,2129,
+ 2131,2137,2141,2143,2153,2161,2179,2203,
+ 2207,2213,2221,2237,2239,2243,2251,2267,
+ 2269,2273,2281,2287,2293,2297,2309,2311,
+ 2333,2339,2341,2347,2351,2357,2371,2377,
+ 2381,2383,2389,2393,2399,2411,2417,2423,
+ 2437,2441,2447,2459,2467,2473,2477,2503,
+ 2521,2531,2539,2543,2549,2551,2557,2579,
+ 2591,2593,2609,2617,2621,2633,2647,2657,
+ 2659,2663,2671,2677,2683,2687,2689,2693,
+ 2699,2707,2711,2713,2719,2729,2731,2741,
+ 2749,2753,2767,2777,2789,2791,2797,2801,
+ 2803,2819,2833,2837,2843,2851,2857,2861,
+ 2879,2887,2897,2903,2909,2917,2927,2939,
+ 2953,2957,2963,2969,2971,2999,3001,3011,
+ 3019,3023,3037,3041,3049,3061,3067,3079,
+ 3083,3089,3109,3119,3121,3137,3163,3167,
+ 3169,3181,3187,3191,3203,3209,3217,3221,
+ 3229,3251,3253,3257,3259,3271,3299,3301,
+ 3307,3313,3319,3323,3329,3331,3343,3347,
+ 3359,3361,3371,3373,3389,3391,3407,3413,
+ 3433,3449,3457,3461,3463,3467,3469,3491,
+ 3499,3511,3517,3527,3529,3533,3539,3541,
+ 3547,3557,3559,3571,3581,3583,3593,3607,
+ 3613,3617,3623,3631,3637,3643,3659,3671,
+ 3673,3677,3691,3697,3701,3709,3719,3727,
+ 3733,3739,3761,3767,3769,3779,3793,3797,
+ 3803,3821,3823,3833,3847,3851,3853,3863,
+ 3877,3881,3889,3907,3911,3917,3919,3923,
+ 3929,3931,3943,3947,3967,3989,4001,4003,
+ 4007,4013,4019,4021,4027,4049,4051,4057,
+ 4073,4079,4091,4093,4099,4111,4127,4129,
+ 4133,4139,4153,4157,4159,4177,4201,4211,
+ 4217,4219,4229,4231,4241,4243,4253,4259,
+ 4261,4271,4273,4283,4289,4297,4327,4337,
+ 4339,4349,4357,4363,4373,4391,4397,4409,
+ 4421,4423,4441,4447,4451,4457,4463,4481,
+ 4483,4493,4507,4513,4517,4519,4523,4547,
+ 4549,4561,4567,4583,4591,4597,4603,4621,
+ 4637,4639,4643,4649,4651,4657,4663,4673,
+ 4679,4691,4703,4721,4723,4729,4733,4751,
+ 4759,4783,4787,4789,4793,4799,4801,4813,
+ 4817,4831,4861,4871,4877,4889,4903,4909,
+ 4919,4931,4933,4937,4943,4951,4957,4967,
+ 4969,4973,4987,4993,4999,5003,5009,5011,
+ 5021,5023,5039,5051,5059,5077,5081,5087,
+ 5099,5101,5107,5113,5119,5147,5153,5167,
+ 5171,5179,5189,5197,5209,5227,5231,5233,
+ 5237,5261,5273,5279,5281,5297,5303,5309,
+ 5323,5333,5347,5351,5381,5387,5393,5399,
+ 5407,5413,5417,5419,5431,5437,5441,5443,
+ 5449,5471,5477,5479,5483,5501,5503,5507,
+ 5519,5521,5527,5531,5557,5563,5569,5573,
+ 5581,5591,5623,5639,5641,5647,5651,5653,
+ 5657,5659,5669,5683,5689,5693,5701,5711,
+ 5717,5737,5741,5743,5749,5779,5783,5791,
+ 5801,5807,5813,5821,5827,5839,5843,5849,
+ 5851,5857,5861,5867,5869,5879,5881,5897,
+ 5903,5923,5927,5939,5953,5981,5987,6007,
+ 6011,6029,6037,6043,6047,6053,6067,6073,
+ 6079,6089,6091,6101,6113,6121,6131,6133,
+ 6143,6151,6163,6173,6197,6199,6203,6211,
+ 6217,6221,6229,6247,6257,6263,6269,6271,
+ 6277,6287,6299,6301,6311,6317,6323,6329,
+ 6337,6343,6353,6359,6361,6367,6373,6379,
+ 6389,6397,6421,6427,6449,6451,6469,6473,
+ 6481,6491,6521,6529,6547,6551,6553,6563,
+ 6569,6571,6577,6581,6599,6607,6619,6637,
+ 6653,6659,6661,6673,6679,6689,6691,6701,
+ 6703,6709,6719,6733,6737,6761,6763,6779,
+ 6781,6791,6793,6803,6823,6827,6829,6833,
+ 6841,6857,6863,6869,6871,6883,6899,6907,
+ 6911,6917,6947,6949,6959,6961,6967,6971,
+ 6977,6983,6991,6997,7001,7013,7019,7027,
+ 7039,7043,7057,7069,7079,7103,7109,7121,
+ 7127,7129,7151,7159,7177,7187,7193,7207,
+ 7211,7213,7219,7229,7237,7243,7247,7253,
+ 7283,7297,7307,7309,7321,7331,7333,7349,
+ 7351,7369,7393,7411,7417,7433,7451,7457,
+ 7459,7477,7481,7487,7489,7499,7507,7517,
+ 7523,7529,7537,7541,7547,7549,7559,7561,
+ 7573,7577,7583,7589,7591,7603,7607,7621,
+ 7639,7643,7649,7669,7673,7681,7687,7691,
+ 7699,7703,7717,7723,7727,7741,7753,7757,
+ 7759,7789,7793,7817,7823,7829,7841,7853,
+ 7867,7873,7877,7879,7883,7901,7907,7919,
+ 7927,7933,7937,7949,7951,7963,7993,8009,
+ 8011,8017,8039,8053,8059,8069,8081,8087,
+ 8089,8093,8101,8111,8117,8123,8147,8161,
+ 8167,8171,8179,8191,8209,8219,8221,8231,
+ 8233,8237,8243,8263,8269,8273,8287,8291,
+ 8293,8297,8311,8317,8329,8353,8363,8369,
+ 8377,8387,8389,8419,8423,8429,8431,8443,
+ 8447,8461,8467,8501,8513,8521,8527,8537,
+ 8539,8543,8563,8573,8581,8597,8599,8609,
+ 8623,8627,8629,8641,8647,8663,8669,8677,
+ 8681,8689,8693,8699,8707,8713,8719,8731,
+ 8737,8741,8747,8753,8761,8779,8783,8803,
+ 8807,8819,8821,8831,8837,8839,8849,8861,
+ 8863,8867,8887,8893,8923,8929,8933,8941,
+ 8951,8963,8969,8971,8999,9001,9007,9011,
+ 9013,9029,9041,9043,9049,9059,9067,9091,
+ 9103,9109,9127,9133,9137,9151,9157,9161,
+ 9173,9181,9187,9199,9203,9209,9221,9227,
+ 9239,9241,9257,9277,9281,9283,9293,9311,
+ 9319,9323,9337,9341,9343,9349,9371,9377,
+ 9391,9397,9403,9413,9419,9421,9431,9433,
+ 9437,9439,9461,9463,9467,9473,9479,9491,
+ 9497,9511,9521,9533,9539,9547,9551,9587,
+ 9601,9613,9619,9623,9629,9631,9643,9649,
+ 9661,9677,9679,9689,9697,9719,9721,9733,
+ 9739,9743,9749,9767,9769,9781,9787,9791,
+ 9803,9811,9817,9829,9833,9839,9851,9857,
+ 9859,9871,9883,9887,9901,9907,9923,9929,
+ 9931,9941,9949,9967,9973,10007,10009,10037,
+ 10039,10061,10067,10069,10079,10091,10093,10099,
+ 10103,10111,10133,10139,10141,10151,10159,10163,
+ 10169,10177,10181,10193,10211,10223,10243,10247,
+ 10253,10259,10267,10271,10273,10289,10301,10303,
+ 10313,10321,10331,10333,10337,10343,10357,10369,
+ 10391,10399,10427,10429,10433,10453,10457,10459,
+ 10463,10477,10487,10499,10501,10513,10529,10531,
+ 10559,10567,10589,10597,10601,10607,10613,10627,
+ 10631,10639,10651,10657,10663,10667,10687,10691,
+ 10709,10711,10723,10729,10733,10739,10753,10771,
+ 10781,10789,10799,10831,10837,10847,10853,10859,
+ 10861,10867,10883,10889,10891,10903,10909,10937,
+ 10939,10949,10957,10973,10979,10987,10993,11003,
+ 11027,11047,11057,11059,11069,11071,11083,11087,
+ 11093,11113,11117,11119,11131,11149,11159,11161,
+ 11171,11173,11177,11197,11213,11239,11243,11251,
+ 11257,11261,11273,11279,11287,11299,11311,11317,
+ 11321,11329,11351,11353,11369,11383,11393,11399,
+ 11411,11423,11437,11443,11447,11467,11471,11483,
+ 11489,11491,11497,11503,11519,11527,11549,11551,
+ 11579,11587,11593,11597,11617,11621,11633,11657,
+ 11677,11681,11689,11699,11701,11717,11719,11731,
+ 11743,11777,11779,11783,11789,11801,11807,11813,
+ 11821,11827,11831,11833,11839,11863,11867,11887,
+ 11897,11903,11909,11923,11927,11933,11939,11941,
+ 11953,11959,11969,11971,11981,11987,12007,12011,
+ 12037,12041,12043,12049,12071,12073,12097,12101,
+ 12107,12109,12113,12119,12143,12149,12157,12161,
+ 12163,12197,12203,12211,12227,12239,12241,12251,
+ 12253,12263,12269,12277,12281,12289,12301,12323,
+ 12329,12343,12347,12373,12377,12379,12391,12401,
+ 12409,12413,12421,12433,12437,12451,12457,12473,
+ 12479,12487,12491,12497,12503,12511,12517,12527,
+ 12539,12541,12547,12553,12569,12577,12583,12589,
+ 12601,12611,12613,12619,12637,12641,12647,12653,
+ 12659,12671,12689,12697,12703,12713,12721,12739,
+ 12743,12757,12763,12781,12791,12799,12809,12821,
+ 12823,12829,12841,12853,12889,12893,12899,12907,
+ 12911,12917,12919,12923,12941,12953,12959,12967,
+ 12973,12979,12983,13001,13003,13007,13009,13033,
+ 13037,13043,13049,13063,13093,13099,13103,13109,
+ 13121,13127,13147,13151,13159,13163,13171,13177,
+ 13183,13187,13217,13219,13229,13241,13249,13259,
+ 13267,13291,13297,13309,13313,13327,13331,13337,
+ 13339,13367,13381,13397,13399,13411,13417,13421,
+ 13441,13451,13457,13463,13469,13477,13487,13499,
+ 13513,13523,13537,13553,13567,13577,13591,13597,
+ 13613,13619,13627,13633,13649,13669,13679,13681,
+ 13687,13691,13693,13697,13709,13711,13721,13723,
+ 13729,13751,13757,13759,13763,13781,13789,13799,
+ 13807,13829,13831,13841,13859,13873,13877,13879,
+ 13883,13901,13903,13907,13913,13921,13931,13933,
+ 13963,13967,13997,13999,14009,14011,14029,14033,
+ 14051,14057,14071,14081,14083,14087,14107,14143,
+ 14149,14153,14159,14173,14177,14197,14207,14221,
+ 14243,14249,14251,14281,14293,14303,14321,14323,
+ 14327,14341,14347,14369,14387,14389,14401,14407,
+ 14411,14419,14423,14431,14437,14447,14449,14461,
+ 14479,14489,14503,14519,14533,14537,14543,14549,
+ 14551,14557,14561,14563,14591,14593,14621,14627,
+ 14629,14633,14639,14653,14657,14669,14683,14699,
+ 14713,14717,14723,14731,14737,14741,14747,14753,
+ 14759,14767,14771,14779,14783,14797,14813,14821,
+ 14827,14831,14843,14851,14867,14869,14879,14887,
+ 14891,14897,14923,14929,14939,14947,14951,14957,
+ 14969,14983,15013,15017,15031,15053,15061,15073,
+ 15077,15083,15091,15101,15107,15121,15131,15137,
+ 15139,15149,15161,15173,15187,15193,15199,15217,
+ 15227,15233,15241,15259,15263,15269,15271,15277,
+ 15287,15289,15299,15307,15313,15319,15329,15331,
+ 15349,15359,15361,15373,15377,15383,15391,15401,
+ 15413,15427,15439,15443,15451,15461,15467,15473,
+ 15493,15497,15511,15527,15541,15551,15559,15569,
+ 15581,15583,15601,15607,15619,15629,15641,15643,
+ 15647,15649,15661,15667,15671,15679,15683,15727,
+ 15731,15733,15737,15739,15749,15761,15767,15773,
+ 15787,15791,15797,15803,15809,15817,15823,15859,
+ 15877,15881,15887,15889,15901,15907,15913,15919,
+ 15923,15937,15959,15971,15973,15991,16001,16007,
+ 16033,16057,16061,16063,16067,16069,16073,16087,
+ 16091,16097,16103,16111,16127,16139,16141,16183,
+ 16187,16189,16193,16217,16223,16229,16231,16249,
+ 16253,16267,16273,16301,16319,16333,16339,16349,
+ 16361,16363,16369,16381,16411,16417,16421,16427,
+ 16433,16447,16451,16453,16477,16481,16487,16493,
+ 16519,16529,16547,16553,16561,16567,16573,16603,
+ 16607,16619,16631,16633,16649,16651,16657,16661,
+ 16673,16691,16693,16699,16703,16729,16741,16747,
+ 16759,16763,16787,16811,16823,16829,16831,16843,
+ 16871,16879,16883,16889,16901,16903,16921,16927,
+ 16931,16937,16943,16963,16979,16981,16987,16993,
+ 17011,17021,17027,17029,17033,17041,17047,17053,
+ 17077,17093,17099,17107,17117,17123,17137,17159,
+ 17167,17183,17189,17191,17203,17207,17209,17231,
+ 17239,17257,17291,17293,17299,17317,17321,17327,
+ 17333,17341,17351,17359,17377,17383,17387,17389,
+ 17393,17401,17417,17419,17431,17443,17449,17467,
+ 17471,17477,17483,17489,17491,17497,17509,17519,
+ 17539,17551,17569,17573,17579,17581,17597,17599,
+ 17609,17623,17627,17657,17659,17669,17681,17683,
+ 17707,17713,17729,17737,17747,17749,17761,17783,
+ 17789,17791,17807,17827,17837,17839,17851,17863,
+#endif
+ };
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.pl b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.pl
new file mode 100644
index 00000000..3fafb6f3
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_prime.pl
@@ -0,0 +1,119 @@
+#!/usr/local/bin/perl
+# bn_prime.pl
+
+$num=2048;
+$num=$ARGV[0] if ($#ARGV >= 0);
+
+push(@primes,2);
+$p=1;
+loop: while ($#primes < $num-1)
+ {
+ $p+=2;
+ $s=int(sqrt($p));
+
+ for ($i=0; defined($primes[$i]) && $primes[$i]<=$s; $i++)
+ {
+ next loop if (($p%$primes[$i]) == 0);
+ }
+ push(@primes,$p);
+ }
+
+# print <<"EOF";
+# /* Auto generated by bn_prime.pl */
+# /* Copyright (C) 1995-1997 Eric Young (eay\@mincom.oz.au).
+# * All rights reserved.
+# * Copyright remains Eric Young's, and as such any Copyright notices in
+# * the code are not to be removed.
+# * See the COPYRIGHT file in the SSLeay distribution for more details.
+# */
+#
+# EOF
+
+print <<\EOF;
+/* Auto generated by bn_prime.pl */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+EOF
+
+for ($i=0; $i <= $#primes; $i++)
+ {
+ if ($primes[$i] > 256)
+ {
+ $eight=$i;
+ last;
+ }
+ }
+
+printf "#ifndef EIGHT_BIT\n";
+printf "#define NUMPRIMES %d\n",$num;
+printf "typedef unsigned short prime_t;\n";
+printf "#else\n";
+printf "#define NUMPRIMES %d\n",$eight;
+printf "typedef unsigned char prime_t;\n";
+printf "#endif\n";
+print "static const prime_t primes[NUMPRIMES]=\n\t{\n\t";
+$init=0;
+for ($i=0; $i <= $#primes; $i++)
+ {
+ printf "\n#ifndef EIGHT_BIT\n\t" if ($primes[$i] > 256) && !($init++);
+ printf("\n\t") if (($i%8) == 0) && ($i != 0);
+ printf("%4d,",$primes[$i]);
+ }
+print "\n#endif\n\t};\n";
+
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_print.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_print.c
new file mode 100644
index 00000000..1743b6a7
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_print.c
@@ -0,0 +1,378 @@
+/* crypto/bn/bn_print.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include <ctype.h>
+#include "cryptlib.h"
+#include <openssl/buffer.h>
+#include "bn_lcl.h"
+
+static const char Hex[]="0123456789ABCDEF";
+
+/* Must 'OPENSSL_free' the returned data */
+char *BN_bn2hex(const BIGNUM *a)
+ {
+ int i,j,v,z=0;
+ char *buf;
+ char *p;
+
+ buf=(char *)OPENSSL_malloc(a->top*BN_BYTES*2+2);
+ if (buf == NULL)
+ {
+ BNerr(BN_F_BN_BN2HEX,ERR_R_MALLOC_FAILURE);
+ goto err;
+ }
+ p=buf;
+ if (a->neg) *(p++)='-';
+ if (BN_is_zero(a)) *(p++)='0';
+ for (i=a->top-1; i >=0; i--)
+ {
+ for (j=BN_BITS2-8; j >= 0; j-=8)
+ {
+ /* strip leading zeros */
+ v=((int)(a->d[i]>>(long)j))&0xff;
+ if (z || (v != 0))
+ {
+ *(p++)=Hex[v>>4];
+ *(p++)=Hex[v&0x0f];
+ z=1;
+ }
+ }
+ }
+ *p='\0';
+err:
+ return(buf);
+ }
+
+/* Must 'OPENSSL_free' the returned data */
+char *BN_bn2dec(const BIGNUM *a)
+ {
+ int i=0,num, ok = 0;
+ char *buf=NULL;
+ char *p;
+ BIGNUM *t=NULL;
+ BN_ULONG *bn_data=NULL,*lp;
+
+ /* get an upper bound for the length of the decimal integer
+ * num <= (BN_num_bits(a) + 1) * log(2)
+ * <= 3 * BN_num_bits(a) * 0.1001 + log(2) + 1 (rounding error)
+ * <= BN_num_bits(a)/10 + BN_num_bits/1000 + 1 + 1
+ */
+ i=BN_num_bits(a)*3;
+ num=(i/10+i/1000+1)+1;
+ bn_data=(BN_ULONG *)OPENSSL_malloc((num/BN_DEC_NUM+1)*sizeof(BN_ULONG));
+ buf=(char *)OPENSSL_malloc(num+3);
+ if ((buf == NULL) || (bn_data == NULL))
+ {
+ BNerr(BN_F_BN_BN2DEC,ERR_R_MALLOC_FAILURE);
+ goto err;
+ }
+ if ((t=BN_dup(a)) == NULL) goto err;
+
+#define BUF_REMAIN (num+3 - (size_t)(p - buf))
+ p=buf;
+ lp=bn_data;
+ if (BN_is_zero(t))
+ {
+ *(p++)='0';
+ *(p++)='\0';
+ }
+ else
+ {
+ if (BN_is_negative(t))
+ *p++ = '-';
+
+ i=0;
+ while (!BN_is_zero(t))
+ {
+ *lp=BN_div_word(t,BN_DEC_CONV);
+ lp++;
+ }
+ lp--;
+ /* We now have a series of blocks, BN_DEC_NUM chars
+ * in length, where the last one needs truncation.
+ * The blocks need to be reversed in order. */
+ BIO_snprintf(p,BUF_REMAIN,BN_DEC_FMT1,*lp);
+ while (*p) p++;
+ while (lp != bn_data)
+ {
+ lp--;
+ BIO_snprintf(p,BUF_REMAIN,BN_DEC_FMT2,*lp);
+ while (*p) p++;
+ }
+ }
+ ok = 1;
+err:
+ if (bn_data != NULL) OPENSSL_free(bn_data);
+ if (t != NULL) BN_free(t);
+ if (!ok && buf)
+ {
+ OPENSSL_free(buf);
+ buf = NULL;
+ }
+
+ return(buf);
+ }
+
+int BN_hex2bn(BIGNUM **bn, const char *a)
+ {
+ BIGNUM *ret=NULL;
+ BN_ULONG l=0;
+ int neg=0,h,m,i,j,k,c;
+ int num;
+
+ if ((a == NULL) || (*a == '\0')) return(0);
+
+ if (*a == '-') { neg=1; a++; }
+
+ for (i=0; isxdigit((unsigned char) a[i]); i++)
+ ;
+
+ num=i+neg;
+ if (bn == NULL) return(num);
+
+ /* a is the start of the hex digits, and it is 'i' long */
+ if (*bn == NULL)
+ {
+ if ((ret=BN_new()) == NULL) return(0);
+ }
+ else
+ {
+ ret= *bn;
+ BN_zero(ret);
+ }
+
+ /* i is the number of hex digests; */
+ if (bn_expand(ret,i*4) == NULL) goto err;
+
+ j=i; /* least significant 'hex' */
+ m=0;
+ h=0;
+ while (j > 0)
+ {
+ m=((BN_BYTES*2) <= j)?(BN_BYTES*2):j;
+ l=0;
+ for (;;)
+ {
+ c=a[j-m];
+ if ((c >= '0') && (c <= '9')) k=c-'0';
+ else if ((c >= 'a') && (c <= 'f')) k=c-'a'+10;
+ else if ((c >= 'A') && (c <= 'F')) k=c-'A'+10;
+ else k=0; /* paranoia */
+ l=(l<<4)|k;
+
+ if (--m <= 0)
+ {
+ ret->d[h++]=l;
+ break;
+ }
+ }
+ j-=(BN_BYTES*2);
+ }
+ ret->top=h;
+ bn_correct_top(ret);
+ ret->neg=neg;
+
+ *bn=ret;
+ bn_check_top(ret);
+ return(num);
+err:
+ if (*bn == NULL) BN_free(ret);
+ return(0);
+ }
+
+int BN_dec2bn(BIGNUM **bn, const char *a)
+ {
+ BIGNUM *ret=NULL;
+ BN_ULONG l=0;
+ int neg=0,i,j;
+ int num;
+
+ if ((a == NULL) || (*a == '\0')) return(0);
+ if (*a == '-') { neg=1; a++; }
+
+ for (i=0; isdigit((unsigned char) a[i]); i++)
+ ;
+
+ num=i+neg;
+ if (bn == NULL) return(num);
+
+ /* a is the start of the digits, and it is 'i' long.
+ * We chop it into BN_DEC_NUM digits at a time */
+ if (*bn == NULL)
+ {
+ if ((ret=BN_new()) == NULL) return(0);
+ }
+ else
+ {
+ ret= *bn;
+ BN_zero(ret);
+ }
+
+ /* i is the number of digests, a bit of an over expand; */
+ if (bn_expand(ret,i*4) == NULL) goto err;
+
+ j=BN_DEC_NUM-(i%BN_DEC_NUM);
+ if (j == BN_DEC_NUM) j=0;
+ l=0;
+ while (*a)
+ {
+ l*=10;
+ l+= *a-'0';
+ a++;
+ if (++j == BN_DEC_NUM)
+ {
+ BN_mul_word(ret,BN_DEC_CONV);
+ BN_add_word(ret,l);
+ l=0;
+ j=0;
+ }
+ }
+ ret->neg=neg;
+
+ bn_correct_top(ret);
+ *bn=ret;
+ bn_check_top(ret);
+ return(num);
+err:
+ if (*bn == NULL) BN_free(ret);
+ return(0);
+ }
+
+int BN_asc2bn(BIGNUM **bn, const char *a)
+ {
+ const char *p = a;
+ if (*p == '-')
+ p++;
+
+ if (p[0] == '0' && (p[1] == 'X' || p[1] == 'x'))
+ {
+ if (!BN_hex2bn(bn, p + 2))
+ return 0;
+ }
+ else
+ {
+ if (!BN_dec2bn(bn, p))
+ return 0;
+ }
+ if (*a == '-')
+ (*bn)->neg = 1;
+ return 1;
+ }
+
+#ifndef OPENSSL_NO_BIO
+#ifndef OPENSSL_NO_FP_API
+int BN_print_fp(FILE *fp, const BIGNUM *a)
+ {
+ BIO *b;
+ int ret;
+
+ if ((b=BIO_new(BIO_s_file())) == NULL)
+ return(0);
+ BIO_set_fp(b,fp,BIO_NOCLOSE);
+ ret=BN_print(b,a);
+ BIO_free(b);
+ return(ret);
+ }
+#endif
+
+int BN_print(BIO *bp, const BIGNUM *a)
+ {
+ int i,j,v,z=0;
+ int ret=0;
+
+ if ((a->neg) && (BIO_write(bp,"-",1) != 1)) goto end;
+ if (BN_is_zero(a) && (BIO_write(bp,"0",1) != 1)) goto end;
+ for (i=a->top-1; i >=0; i--)
+ {
+ for (j=BN_BITS2-4; j >= 0; j-=4)
+ {
+ /* strip leading zeros */
+ v=((int)(a->d[i]>>(long)j))&0x0f;
+ if (z || (v != 0))
+ {
+ if (BIO_write(bp,&(Hex[v]),1) != 1)
+ goto end;
+ z=1;
+ }
+ }
+ }
+ ret=1;
+end:
+ return(ret);
+ }
+#endif
+
+char *BN_options(void)
+ {
+ static int init=0;
+ static char data[16];
+
+ if (!init)
+ {
+ init++;
+#ifdef BN_LLONG
+ BIO_snprintf(data,sizeof data,"bn(%d,%d)",
+ (int)sizeof(BN_ULLONG)*8,(int)sizeof(BN_ULONG)*8);
+#else
+ BIO_snprintf(data,sizeof data,"bn(%d,%d)",
+ (int)sizeof(BN_ULONG)*8,(int)sizeof(BN_ULONG)*8);
+#endif
+ }
+ return(data);
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_rand.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_rand.c
new file mode 100644
index 00000000..55676f07
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_rand.c
@@ -0,0 +1,375 @@
+/* crypto/bn/bn_rand.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+/* ====================================================================
+ * Copyright (c) 1998-2001 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+#include <stdio.h>
+#include <time.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+#include <openssl/rand.h>
+#include <openssl/sha.h>
+
+static int bnrand(int pseudorand, BIGNUM *rnd, int bits, int top, int bottom)
+ {
+ unsigned char *buf=NULL;
+ int ret=0,bit,bytes,mask;
+ time_t tim;
+
+ if (bits == 0)
+ {
+ BN_zero(rnd);
+ return 1;
+ }
+
+ bytes=(bits+7)/8;
+ bit=(bits-1)%8;
+ mask=0xff<<(bit+1);
+
+ buf=(unsigned char *)OPENSSL_malloc(bytes);
+ if (buf == NULL)
+ {
+ BNerr(BN_F_BNRAND,ERR_R_MALLOC_FAILURE);
+ goto err;
+ }
+
+ /* make a random number and set the top and bottom bits */
+ time(&tim);
+ RAND_add(&tim,sizeof(tim),0.0);
+
+ if (pseudorand)
+ {
+ if (RAND_pseudo_bytes(buf, bytes) == -1)
+ goto err;
+ }
+ else
+ {
+ if (RAND_bytes(buf, bytes) <= 0)
+ goto err;
+ }
+
+#if 1
+ if (pseudorand == 2)
+ {
+ /* generate patterns that are more likely to trigger BN
+ library bugs */
+ int i;
+ unsigned char c;
+
+ for (i = 0; i < bytes; i++)
+ {
+ RAND_pseudo_bytes(&c, 1);
+ if (c >= 128 && i > 0)
+ buf[i] = buf[i-1];
+ else if (c < 42)
+ buf[i] = 0;
+ else if (c < 84)
+ buf[i] = 255;
+ }
+ }
+#endif
+
+ if (top != -1)
+ {
+ if (top)
+ {
+ if (bit == 0)
+ {
+ buf[0]=1;
+ buf[1]|=0x80;
+ }
+ else
+ {
+ buf[0]|=(3<<(bit-1));
+ }
+ }
+ else
+ {
+ buf[0]|=(1<<bit);
+ }
+ }
+ buf[0] &= ~mask;
+ if (bottom) /* set bottom bit if requested */
+ buf[bytes-1]|=1;
+ if (!BN_bin2bn(buf,bytes,rnd)) goto err;
+ ret=1;
+err:
+ if (buf != NULL)
+ {
+ OPENSSL_cleanse(buf,bytes);
+ OPENSSL_free(buf);
+ }
+ bn_check_top(rnd);
+ return(ret);
+ }
+
+int BN_rand(BIGNUM *rnd, int bits, int top, int bottom)
+ {
+ return bnrand(0, rnd, bits, top, bottom);
+ }
+
+int BN_pseudo_rand(BIGNUM *rnd, int bits, int top, int bottom)
+ {
+ return bnrand(1, rnd, bits, top, bottom);
+ }
+
+#if 1
+int BN_bntest_rand(BIGNUM *rnd, int bits, int top, int bottom)
+ {
+ return bnrand(2, rnd, bits, top, bottom);
+ }
+#endif
+
+
+/* random number r: 0 <= r < range */
+static int bn_rand_range(int pseudo, BIGNUM *r, const BIGNUM *range)
+ {
+ int (*bn_rand)(BIGNUM *, int, int, int) = pseudo ? BN_pseudo_rand : BN_rand;
+ int n;
+ int count = 100;
+
+ if (range->neg || BN_is_zero(range))
+ {
+ BNerr(BN_F_BN_RAND_RANGE, BN_R_INVALID_RANGE);
+ return 0;
+ }
+
+ n = BN_num_bits(range); /* n > 0 */
+
+ /* BN_is_bit_set(range, n - 1) always holds */
+
+ if (n == 1)
+ BN_zero(r);
+ else if (!BN_is_bit_set(range, n - 2) && !BN_is_bit_set(range, n - 3))
+ {
+ /* range = 100..._2,
+ * so 3*range (= 11..._2) is exactly one bit longer than range */
+ do
+ {
+ if (!bn_rand(r, n + 1, -1, 0)) return 0;
+ /* If r < 3*range, use r := r MOD range
+ * (which is either r, r - range, or r - 2*range).
+ * Otherwise, iterate once more.
+ * Since 3*range = 11..._2, each iteration succeeds with
+ * probability >= .75. */
+ if (BN_cmp(r ,range) >= 0)
+ {
+ if (!BN_sub(r, r, range)) return 0;
+ if (BN_cmp(r, range) >= 0)
+ if (!BN_sub(r, r, range)) return 0;
+ }
+
+ if (!--count)
+ {
+ BNerr(BN_F_BN_RAND_RANGE, BN_R_TOO_MANY_ITERATIONS);
+ return 0;
+ }
+
+ }
+ while (BN_cmp(r, range) >= 0);
+ }
+ else
+ {
+ do
+ {
+ /* range = 11..._2 or range = 101..._2 */
+ if (!bn_rand(r, n, -1, 0)) return 0;
+
+ if (!--count)
+ {
+ BNerr(BN_F_BN_RAND_RANGE, BN_R_TOO_MANY_ITERATIONS);
+ return 0;
+ }
+ }
+ while (BN_cmp(r, range) >= 0);
+ }
+
+ bn_check_top(r);
+ return 1;
+ }
+
+
+int BN_rand_range(BIGNUM *r, const BIGNUM *range)
+ {
+ return bn_rand_range(0, r, range);
+ }
+
+int BN_pseudo_rand_range(BIGNUM *r, const BIGNUM *range)
+ {
+ return bn_rand_range(1, r, range);
+ }
+
+#ifndef OPENSSL_NO_SHA512
+/* BN_generate_dsa_nonce generates a random number 0 <= out < range. Unlike
+ * BN_rand_range, it also includes the contents of |priv| and |message| in the
+ * generation so that an RNG failure isn't fatal as long as |priv| remains
+ * secret. This is intended for use in DSA and ECDSA where an RNG weakness
+ * leads directly to private key exposure unless this function is used. */
+int BN_generate_dsa_nonce(BIGNUM *out, const BIGNUM *range, const BIGNUM* priv,
+ const unsigned char *message, size_t message_len,
+ BN_CTX *ctx)
+ {
+ SHA512_CTX sha;
+ /* We use 512 bits of random data per iteration to
+ * ensure that we have at least |range| bits of randomness. */
+ unsigned char random_bytes[64];
+ unsigned char digest[SHA512_DIGEST_LENGTH];
+ unsigned done, todo;
+ /* We generate |range|+8 bytes of random output. */
+ const unsigned num_k_bytes = BN_num_bytes(range) + 8;
+ unsigned char private_bytes[96];
+ unsigned char *k_bytes;
+ int ret = 0;
+
+ k_bytes = OPENSSL_malloc(num_k_bytes);
+ if (!k_bytes)
+ goto err;
+
+ /* We copy |priv| into a local buffer to avoid exposing its length. */
+ todo = sizeof(priv->d[0])*priv->top;
+ if (todo > sizeof(private_bytes))
+ {
+ /* No reasonable DSA or ECDSA key should have a private key
+ * this large and we don't handle this case in order to avoid
+ * leaking the length of the private key. */
+ BNerr(BN_F_BN_GENERATE_DSA_NONCE, BN_R_PRIVATE_KEY_TOO_LARGE);
+ goto err;
+ }
+ memcpy(private_bytes, priv->d, todo);
+ memset(private_bytes + todo, 0, sizeof(private_bytes) - todo);
+
+ for (done = 0; done < num_k_bytes;) {
+ if (RAND_bytes(random_bytes, sizeof(random_bytes)) != 1)
+ goto err;
+ SHA512_Init(&sha);
+ SHA512_Update(&sha, &done, sizeof(done));
+ SHA512_Update(&sha, private_bytes, sizeof(private_bytes));
+ SHA512_Update(&sha, message, message_len);
+ SHA512_Update(&sha, random_bytes, sizeof(random_bytes));
+ SHA512_Final(digest, &sha);
+
+ todo = num_k_bytes - done;
+ if (todo > SHA512_DIGEST_LENGTH)
+ todo = SHA512_DIGEST_LENGTH;
+ memcpy(k_bytes + done, digest, todo);
+ done += todo;
+ }
+
+ if (!BN_bin2bn(k_bytes, num_k_bytes, out))
+ goto err;
+ if (BN_mod(out, out, range, ctx) != 1)
+ goto err;
+ ret = 1;
+
+err:
+ if (k_bytes)
+ OPENSSL_free(k_bytes);
+ return ret;
+ }
+#endif /* OPENSSL_NO_SHA512 */
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_recp.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_recp.c
new file mode 100644
index 00000000..2e8efb8d
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_recp.c
@@ -0,0 +1,234 @@
+/* crypto/bn/bn_recp.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+void BN_RECP_CTX_init(BN_RECP_CTX *recp)
+ {
+ BN_init(&(recp->N));
+ BN_init(&(recp->Nr));
+ recp->num_bits=0;
+ recp->flags=0;
+ }
+
+BN_RECP_CTX *BN_RECP_CTX_new(void)
+ {
+ BN_RECP_CTX *ret;
+
+ if ((ret=(BN_RECP_CTX *)OPENSSL_malloc(sizeof(BN_RECP_CTX))) == NULL)
+ return(NULL);
+
+ BN_RECP_CTX_init(ret);
+ ret->flags=BN_FLG_MALLOCED;
+ return(ret);
+ }
+
+void BN_RECP_CTX_free(BN_RECP_CTX *recp)
+ {
+ if(recp == NULL)
+ return;
+
+ BN_free(&(recp->N));
+ BN_free(&(recp->Nr));
+ if (recp->flags & BN_FLG_MALLOCED)
+ OPENSSL_free(recp);
+ }
+
+int BN_RECP_CTX_set(BN_RECP_CTX *recp, const BIGNUM *d, BN_CTX *ctx)
+ {
+ if (!BN_copy(&(recp->N),d)) return 0;
+ BN_zero(&(recp->Nr));
+ recp->num_bits=BN_num_bits(d);
+ recp->shift=0;
+ return(1);
+ }
+
+int BN_mod_mul_reciprocal(BIGNUM *r, const BIGNUM *x, const BIGNUM *y,
+ BN_RECP_CTX *recp, BN_CTX *ctx)
+ {
+ int ret=0;
+ BIGNUM *a;
+ const BIGNUM *ca;
+
+ BN_CTX_start(ctx);
+ if ((a = BN_CTX_get(ctx)) == NULL) goto err;
+ if (y != NULL)
+ {
+ if (x == y)
+ { if (!BN_sqr(a,x,ctx)) goto err; }
+ else
+ { if (!BN_mul(a,x,y,ctx)) goto err; }
+ ca = a;
+ }
+ else
+ ca=x; /* Just do the mod */
+
+ ret = BN_div_recp(NULL,r,ca,recp,ctx);
+err:
+ BN_CTX_end(ctx);
+ bn_check_top(r);
+ return(ret);
+ }
+
+int BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m,
+ BN_RECP_CTX *recp, BN_CTX *ctx)
+ {
+ int i,j,ret=0;
+ BIGNUM *a,*b,*d,*r;
+
+ BN_CTX_start(ctx);
+ a=BN_CTX_get(ctx);
+ b=BN_CTX_get(ctx);
+ if (dv != NULL)
+ d=dv;
+ else
+ d=BN_CTX_get(ctx);
+ if (rem != NULL)
+ r=rem;
+ else
+ r=BN_CTX_get(ctx);
+ if (a == NULL || b == NULL || d == NULL || r == NULL) goto err;
+
+ if (BN_ucmp(m,&(recp->N)) < 0)
+ {
+ BN_zero(d);
+ if (!BN_copy(r,m)) return 0;
+ BN_CTX_end(ctx);
+ return(1);
+ }
+
+ /* We want the remainder
+ * Given input of ABCDEF / ab
+ * we need multiply ABCDEF by 3 digests of the reciprocal of ab
+ *
+ */
+
+ /* i := max(BN_num_bits(m), 2*BN_num_bits(N)) */
+ i=BN_num_bits(m);
+ j=recp->num_bits<<1;
+ if (j>i) i=j;
+
+ /* Nr := round(2^i / N) */
+ if (i != recp->shift)
+ recp->shift=BN_reciprocal(&(recp->Nr),&(recp->N),
+ i,ctx); /* BN_reciprocal returns i, or -1 for an error */
+ if (recp->shift == -1) goto err;
+
+ /* d := |round(round(m / 2^BN_num_bits(N)) * recp->Nr / 2^(i - BN_num_bits(N)))|
+ * = |round(round(m / 2^BN_num_bits(N)) * round(2^i / N) / 2^(i - BN_num_bits(N)))|
+ * <= |(m / 2^BN_num_bits(N)) * (2^i / N) * (2^BN_num_bits(N) / 2^i)|
+ * = |m/N|
+ */
+ if (!BN_rshift(a,m,recp->num_bits)) goto err;
+ if (!BN_mul(b,a,&(recp->Nr),ctx)) goto err;
+ if (!BN_rshift(d,b,i-recp->num_bits)) goto err;
+ d->neg=0;
+
+ if (!BN_mul(b,&(recp->N),d,ctx)) goto err;
+ if (!BN_usub(r,m,b)) goto err;
+ r->neg=0;
+
+#if 1
+ j=0;
+ while (BN_ucmp(r,&(recp->N)) >= 0)
+ {
+ if (j++ > 2)
+ {
+ BNerr(BN_F_BN_DIV_RECP,BN_R_BAD_RECIPROCAL);
+ goto err;
+ }
+ if (!BN_usub(r,r,&(recp->N))) goto err;
+ if (!BN_add_word(d,1)) goto err;
+ }
+#endif
+
+ r->neg=BN_is_zero(r)?0:m->neg;
+ d->neg=m->neg^recp->N.neg;
+ ret=1;
+err:
+ BN_CTX_end(ctx);
+ bn_check_top(dv);
+ bn_check_top(rem);
+ return(ret);
+ }
+
+/* len is the expected size of the result
+ * We actually calculate with an extra word of precision, so
+ * we can do faster division if the remainder is not required.
+ */
+/* r := 2^len / m */
+int BN_reciprocal(BIGNUM *r, const BIGNUM *m, int len, BN_CTX *ctx)
+ {
+ int ret= -1;
+ BIGNUM *t;
+
+ BN_CTX_start(ctx);
+ if((t = BN_CTX_get(ctx)) == NULL) goto err;
+
+ if (!BN_set_bit(t,len)) goto err;
+
+ if (!BN_div(r,NULL,t,m,ctx)) goto err;
+
+ ret=len;
+err:
+ bn_check_top(r);
+ BN_CTX_end(ctx);
+ return(ret);
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_shift.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_shift.c
new file mode 100644
index 00000000..a6fca2c4
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_shift.c
@@ -0,0 +1,223 @@
+/* crypto/bn/bn_shift.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+int BN_lshift1(BIGNUM *r, const BIGNUM *a)
+ {
+ register BN_ULONG *ap,*rp,t,c;
+ int i;
+
+ bn_check_top(r);
+ bn_check_top(a);
+
+ if (r != a)
+ {
+ r->neg=a->neg;
+ if (bn_wexpand(r,a->top+1) == NULL) return(0);
+ r->top=a->top;
+ }
+ else
+ {
+ if (bn_wexpand(r,a->top+1) == NULL) return(0);
+ }
+ ap=a->d;
+ rp=r->d;
+ c=0;
+ for (i=0; i<a->top; i++)
+ {
+ t= *(ap++);
+ *(rp++)=((t<<1)|c)&BN_MASK2;
+ c=(t & BN_TBIT)?1:0;
+ }
+ if (c)
+ {
+ *rp=1;
+ r->top++;
+ }
+ bn_check_top(r);
+ return(1);
+ }
+
+int BN_rshift1(BIGNUM *r, const BIGNUM *a)
+ {
+ BN_ULONG *ap,*rp,t,c;
+ int i,j;
+
+ bn_check_top(r);
+ bn_check_top(a);
+
+ if (BN_is_zero(a))
+ {
+ BN_zero(r);
+ return(1);
+ }
+ i = a->top;
+ ap= a->d;
+ j = i-(ap[i-1]==1);
+ if (a != r)
+ {
+ if (bn_wexpand(r,j) == NULL) return(0);
+ r->neg=a->neg;
+ }
+ rp=r->d;
+ t=ap[--i];
+ c=(t&1)?BN_TBIT:0;
+ if (t>>=1) rp[i]=t;
+ while (i>0)
+ {
+ t=ap[--i];
+ rp[i]=((t>>1)&BN_MASK2)|c;
+ c=(t&1)?BN_TBIT:0;
+ }
+ r->top=j;
+ bn_check_top(r);
+ return(1);
+ }
+
+int BN_lshift(BIGNUM *r, const BIGNUM *a, int n)
+ {
+ int i,nw,lb,rb;
+ BN_ULONG *t,*f;
+ BN_ULONG l;
+
+ bn_check_top(r);
+ bn_check_top(a);
+
+ r->neg=a->neg;
+ nw=n/BN_BITS2;
+ if (bn_wexpand(r,a->top+nw+1) == NULL) return(0);
+ lb=n%BN_BITS2;
+ rb=BN_BITS2-lb;
+ f=a->d;
+ t=r->d;
+ t[a->top+nw]=0;
+ if (lb == 0)
+ for (i=a->top-1; i>=0; i--)
+ t[nw+i]=f[i];
+ else
+ for (i=a->top-1; i>=0; i--)
+ {
+ l=f[i];
+ t[nw+i+1]|=(l>>rb)&BN_MASK2;
+ t[nw+i]=(l<<lb)&BN_MASK2;
+ }
+ memset(t,0,nw*sizeof(t[0]));
+/* for (i=0; i<nw; i++)
+ t[i]=0;*/
+ r->top=a->top+nw+1;
+ bn_correct_top(r);
+ bn_check_top(r);
+ return(1);
+ }
+
+int BN_rshift(BIGNUM *r, const BIGNUM *a, int n)
+ {
+ int i,j,nw,lb,rb;
+ BN_ULONG *t,*f;
+ BN_ULONG l,tmp;
+
+ bn_check_top(r);
+ bn_check_top(a);
+
+ nw=n/BN_BITS2;
+ rb=n%BN_BITS2;
+ lb=BN_BITS2-rb;
+ if (nw >= a->top || a->top == 0)
+ {
+ BN_zero(r);
+ return(1);
+ }
+ i = (BN_num_bits(a)-n+(BN_BITS2-1))/BN_BITS2;
+ if (r != a)
+ {
+ r->neg=a->neg;
+ if (bn_wexpand(r,i) == NULL) return(0);
+ }
+ else
+ {
+ if (n == 0)
+ return 1; /* or the copying loop will go berserk */
+ }
+
+ f= &(a->d[nw]);
+ t=r->d;
+ j=a->top-nw;
+ r->top=i;
+
+ if (rb == 0)
+ {
+ for (i=j; i != 0; i--)
+ *(t++)= *(f++);
+ }
+ else
+ {
+ l= *(f++);
+ for (i=j-1; i != 0; i--)
+ {
+ tmp =(l>>rb)&BN_MASK2;
+ l= *(f++);
+ *(t++) =(tmp|(l<<lb))&BN_MASK2;
+ }
+ if ((l = (l>>rb)&BN_MASK2)) *(t) = l;
+ }
+ bn_check_top(r);
+ return(1);
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_sqr.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_sqr.c
new file mode 100644
index 00000000..270d0cd3
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_sqr.c
@@ -0,0 +1,294 @@
+/* crypto/bn/bn_sqr.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+/* r must not be a */
+/* I've just gone over this and it is now %20 faster on x86 - eay - 27 Jun 96 */
+int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx)
+ {
+ int max,al;
+ int ret = 0;
+ BIGNUM *tmp,*rr;
+
+#ifdef BN_COUNT
+ fprintf(stderr,"BN_sqr %d * %d\n",a->top,a->top);
+#endif
+ bn_check_top(a);
+
+ al=a->top;
+ if (al <= 0)
+ {
+ r->top=0;
+ return 1;
+ }
+
+ BN_CTX_start(ctx);
+ rr=(a != r) ? r : BN_CTX_get(ctx);
+ tmp=BN_CTX_get(ctx);
+ if (!rr || !tmp) goto err;
+
+ max = 2 * al; /* Non-zero (from above) */
+ if (bn_wexpand(rr,max) == NULL) goto err;
+
+ if (al == 4)
+ {
+#ifndef BN_SQR_COMBA
+ BN_ULONG t[8];
+ bn_sqr_normal(rr->d,a->d,4,t);
+#else
+ bn_sqr_comba4(rr->d,a->d);
+#endif
+ }
+ else if (al == 8)
+ {
+#ifndef BN_SQR_COMBA
+ BN_ULONG t[16];
+ bn_sqr_normal(rr->d,a->d,8,t);
+#else
+ bn_sqr_comba8(rr->d,a->d);
+#endif
+ }
+ else
+ {
+#if defined(BN_RECURSION)
+ if (al < BN_SQR_RECURSIVE_SIZE_NORMAL)
+ {
+ BN_ULONG t[BN_SQR_RECURSIVE_SIZE_NORMAL*2];
+ bn_sqr_normal(rr->d,a->d,al,t);
+ }
+ else
+ {
+ int j,k;
+
+ j=BN_num_bits_word((BN_ULONG)al);
+ j=1<<(j-1);
+ k=j+j;
+ if (al == j)
+ {
+ if (bn_wexpand(tmp,k*2) == NULL) goto err;
+ bn_sqr_recursive(rr->d,a->d,al,tmp->d);
+ }
+ else
+ {
+ if (bn_wexpand(tmp,max) == NULL) goto err;
+ bn_sqr_normal(rr->d,a->d,al,tmp->d);
+ }
+ }
+#else
+ if (bn_wexpand(tmp,max) == NULL) goto err;
+ bn_sqr_normal(rr->d,a->d,al,tmp->d);
+#endif
+ }
+
+ rr->neg=0;
+ /* If the most-significant half of the top word of 'a' is zero, then
+ * the square of 'a' will max-1 words. */
+ if(a->d[al - 1] == (a->d[al - 1] & BN_MASK2l))
+ rr->top = max - 1;
+ else
+ rr->top = max;
+ if (rr != r) BN_copy(r,rr);
+ ret = 1;
+ err:
+ bn_check_top(rr);
+ bn_check_top(tmp);
+ BN_CTX_end(ctx);
+ return(ret);
+ }
+
+/* tmp must have 2*n words */
+void bn_sqr_normal(BN_ULONG *r, const BN_ULONG *a, int n, BN_ULONG *tmp)
+ {
+ int i,j,max;
+ const BN_ULONG *ap;
+ BN_ULONG *rp;
+
+ max=n*2;
+ ap=a;
+ rp=r;
+ rp[0]=rp[max-1]=0;
+ rp++;
+ j=n;
+
+ if (--j > 0)
+ {
+ ap++;
+ rp[j]=bn_mul_words(rp,ap,j,ap[-1]);
+ rp+=2;
+ }
+
+ for (i=n-2; i>0; i--)
+ {
+ j--;
+ ap++;
+ rp[j]=bn_mul_add_words(rp,ap,j,ap[-1]);
+ rp+=2;
+ }
+
+ bn_add_words(r,r,r,max);
+
+ /* There will not be a carry */
+
+ bn_sqr_words(tmp,a,n);
+
+ bn_add_words(r,r,tmp,max);
+ }
+
+#ifdef BN_RECURSION
+/* r is 2*n words in size,
+ * a and b are both n words in size. (There's not actually a 'b' here ...)
+ * n must be a power of 2.
+ * We multiply and return the result.
+ * t must be 2*n words in size
+ * We calculate
+ * a[0]*b[0]
+ * a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0])
+ * a[1]*b[1]
+ */
+void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t)
+ {
+ int n=n2/2;
+ int zero,c1;
+ BN_ULONG ln,lo,*p;
+
+#ifdef BN_COUNT
+ fprintf(stderr," bn_sqr_recursive %d * %d\n",n2,n2);
+#endif
+ if (n2 == 4)
+ {
+#ifndef BN_SQR_COMBA
+ bn_sqr_normal(r,a,4,t);
+#else
+ bn_sqr_comba4(r,a);
+#endif
+ return;
+ }
+ else if (n2 == 8)
+ {
+#ifndef BN_SQR_COMBA
+ bn_sqr_normal(r,a,8,t);
+#else
+ bn_sqr_comba8(r,a);
+#endif
+ return;
+ }
+ if (n2 < BN_SQR_RECURSIVE_SIZE_NORMAL)
+ {
+ bn_sqr_normal(r,a,n2,t);
+ return;
+ }
+ /* r=(a[0]-a[1])*(a[1]-a[0]) */
+ c1=bn_cmp_words(a,&(a[n]),n);
+ zero=0;
+ if (c1 > 0)
+ bn_sub_words(t,a,&(a[n]),n);
+ else if (c1 < 0)
+ bn_sub_words(t,&(a[n]),a,n);
+ else
+ zero=1;
+
+ /* The result will always be negative unless it is zero */
+ p= &(t[n2*2]);
+
+ if (!zero)
+ bn_sqr_recursive(&(t[n2]),t,n,p);
+ else
+ memset(&(t[n2]),0,n2*sizeof(BN_ULONG));
+ bn_sqr_recursive(r,a,n,p);
+ bn_sqr_recursive(&(r[n2]),&(a[n]),n,p);
+
+ /* t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ */
+
+ c1=(int)(bn_add_words(t,r,&(r[n2]),n2));
+
+ /* t[32] is negative */
+ c1-=(int)(bn_sub_words(&(t[n2]),t,&(t[n2]),n2));
+
+ /* t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1])
+ * r[10] holds (a[0]*a[0])
+ * r[32] holds (a[1]*a[1])
+ * c1 holds the carry bits
+ */
+ c1+=(int)(bn_add_words(&(r[n]),&(r[n]),&(t[n2]),n2));
+ if (c1)
+ {
+ p= &(r[n+n2]);
+ lo= *p;
+ ln=(lo+c1)&BN_MASK2;
+ *p=ln;
+
+ /* The overflow will stop before we over write
+ * words we should not overwrite */
+ if (ln < (BN_ULONG)c1)
+ {
+ do {
+ p++;
+ lo= *p;
+ ln=(lo+1)&BN_MASK2;
+ *p=ln;
+ } while (ln == 0);
+ }
+ }
+ }
+#endif
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_sqrt.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_sqrt.c
new file mode 100644
index 00000000..6beaf9e5
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_sqrt.c
@@ -0,0 +1,393 @@
+/* crypto/bn/bn_sqrt.c */
+/* Written by Lenka Fibikova <fibikova@exp-math.uni-essen.de>
+ * and Bodo Moeller for the OpenSSL project. */
+/* ====================================================================
+ * Copyright (c) 1998-2000 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+
+BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
+/* Returns 'ret' such that
+ * ret^2 == a (mod p),
+ * using the Tonelli/Shanks algorithm (cf. Henri Cohen, "A Course
+ * in Algebraic Computational Number Theory", algorithm 1.5.1).
+ * 'p' must be prime!
+ */
+ {
+ BIGNUM *ret = in;
+ int err = 1;
+ int r;
+ BIGNUM *A, *b, *q, *t, *x, *y;
+ int e, i, j;
+
+ if (!BN_is_odd(p) || BN_abs_is_word(p, 1))
+ {
+ if (BN_abs_is_word(p, 2))
+ {
+ if (ret == NULL)
+ ret = BN_new();
+ if (ret == NULL)
+ goto end;
+ if (!BN_set_word(ret, BN_is_bit_set(a, 0)))
+ {
+ if (ret != in)
+ BN_free(ret);
+ return NULL;
+ }
+ bn_check_top(ret);
+ return ret;
+ }
+
+ BNerr(BN_F_BN_MOD_SQRT, BN_R_P_IS_NOT_PRIME);
+ return(NULL);
+ }
+
+ if (BN_is_zero(a) || BN_is_one(a))
+ {
+ if (ret == NULL)
+ ret = BN_new();
+ if (ret == NULL)
+ goto end;
+ if (!BN_set_word(ret, BN_is_one(a)))
+ {
+ if (ret != in)
+ BN_free(ret);
+ return NULL;
+ }
+ bn_check_top(ret);
+ return ret;
+ }
+
+ BN_CTX_start(ctx);
+ A = BN_CTX_get(ctx);
+ b = BN_CTX_get(ctx);
+ q = BN_CTX_get(ctx);
+ t = BN_CTX_get(ctx);
+ x = BN_CTX_get(ctx);
+ y = BN_CTX_get(ctx);
+ if (y == NULL) goto end;
+
+ if (ret == NULL)
+ ret = BN_new();
+ if (ret == NULL) goto end;
+
+ /* A = a mod p */
+ if (!BN_nnmod(A, a, p, ctx)) goto end;
+
+ /* now write |p| - 1 as 2^e*q where q is odd */
+ e = 1;
+ while (!BN_is_bit_set(p, e))
+ e++;
+ /* we'll set q later (if needed) */
+
+ if (e == 1)
+ {
+ /* The easy case: (|p|-1)/2 is odd, so 2 has an inverse
+ * modulo (|p|-1)/2, and square roots can be computed
+ * directly by modular exponentiation.
+ * We have
+ * 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2),
+ * so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1.
+ */
+ if (!BN_rshift(q, p, 2)) goto end;
+ q->neg = 0;
+ if (!BN_add_word(q, 1)) goto end;
+ if (!BN_mod_exp(ret, A, q, p, ctx)) goto end;
+ err = 0;
+ goto vrfy;
+ }
+
+ if (e == 2)
+ {
+ /* |p| == 5 (mod 8)
+ *
+ * In this case 2 is always a non-square since
+ * Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime.
+ * So if a really is a square, then 2*a is a non-square.
+ * Thus for
+ * b := (2*a)^((|p|-5)/8),
+ * i := (2*a)*b^2
+ * we have
+ * i^2 = (2*a)^((1 + (|p|-5)/4)*2)
+ * = (2*a)^((p-1)/2)
+ * = -1;
+ * so if we set
+ * x := a*b*(i-1),
+ * then
+ * x^2 = a^2 * b^2 * (i^2 - 2*i + 1)
+ * = a^2 * b^2 * (-2*i)
+ * = a*(-i)*(2*a*b^2)
+ * = a*(-i)*i
+ * = a.
+ *
+ * (This is due to A.O.L. Atkin,
+ * <URL: http://listserv.nodak.edu/scripts/wa.exe?A2=ind9211&L=nmbrthry&O=T&P=562>,
+ * November 1992.)
+ */
+
+ /* t := 2*a */
+ if (!BN_mod_lshift1_quick(t, A, p)) goto end;
+
+ /* b := (2*a)^((|p|-5)/8) */
+ if (!BN_rshift(q, p, 3)) goto end;
+ q->neg = 0;
+ if (!BN_mod_exp(b, t, q, p, ctx)) goto end;
+
+ /* y := b^2 */
+ if (!BN_mod_sqr(y, b, p, ctx)) goto end;
+
+ /* t := (2*a)*b^2 - 1*/
+ if (!BN_mod_mul(t, t, y, p, ctx)) goto end;
+ if (!BN_sub_word(t, 1)) goto end;
+
+ /* x = a*b*t */
+ if (!BN_mod_mul(x, A, b, p, ctx)) goto end;
+ if (!BN_mod_mul(x, x, t, p, ctx)) goto end;
+
+ if (!BN_copy(ret, x)) goto end;
+ err = 0;
+ goto vrfy;
+ }
+
+ /* e > 2, so we really have to use the Tonelli/Shanks algorithm.
+ * First, find some y that is not a square. */
+ if (!BN_copy(q, p)) goto end; /* use 'q' as temp */
+ q->neg = 0;
+ i = 2;
+ do
+ {
+ /* For efficiency, try small numbers first;
+ * if this fails, try random numbers.
+ */
+ if (i < 22)
+ {
+ if (!BN_set_word(y, i)) goto end;
+ }
+ else
+ {
+ if (!BN_pseudo_rand(y, BN_num_bits(p), 0, 0)) goto end;
+ if (BN_ucmp(y, p) >= 0)
+ {
+ if (!(p->neg ? BN_add : BN_sub)(y, y, p)) goto end;
+ }
+ /* now 0 <= y < |p| */
+ if (BN_is_zero(y))
+ if (!BN_set_word(y, i)) goto end;
+ }
+
+ r = BN_kronecker(y, q, ctx); /* here 'q' is |p| */
+ if (r < -1) goto end;
+ if (r == 0)
+ {
+ /* m divides p */
+ BNerr(BN_F_BN_MOD_SQRT, BN_R_P_IS_NOT_PRIME);
+ goto end;
+ }
+ }
+ while (r == 1 && ++i < 82);
+
+ if (r != -1)
+ {
+ /* Many rounds and still no non-square -- this is more likely
+ * a bug than just bad luck.
+ * Even if p is not prime, we should have found some y
+ * such that r == -1.
+ */
+ BNerr(BN_F_BN_MOD_SQRT, BN_R_TOO_MANY_ITERATIONS);
+ goto end;
+ }
+
+ /* Here's our actual 'q': */
+ if (!BN_rshift(q, q, e)) goto end;
+
+ /* Now that we have some non-square, we can find an element
+ * of order 2^e by computing its q'th power. */
+ if (!BN_mod_exp(y, y, q, p, ctx)) goto end;
+ if (BN_is_one(y))
+ {
+ BNerr(BN_F_BN_MOD_SQRT, BN_R_P_IS_NOT_PRIME);
+ goto end;
+ }
+
+ /* Now we know that (if p is indeed prime) there is an integer
+ * k, 0 <= k < 2^e, such that
+ *
+ * a^q * y^k == 1 (mod p).
+ *
+ * As a^q is a square and y is not, k must be even.
+ * q+1 is even, too, so there is an element
+ *
+ * X := a^((q+1)/2) * y^(k/2),
+ *
+ * and it satisfies
+ *
+ * X^2 = a^q * a * y^k
+ * = a,
+ *
+ * so it is the square root that we are looking for.
+ */
+
+ /* t := (q-1)/2 (note that q is odd) */
+ if (!BN_rshift1(t, q)) goto end;
+
+ /* x := a^((q-1)/2) */
+ if (BN_is_zero(t)) /* special case: p = 2^e + 1 */
+ {
+ if (!BN_nnmod(t, A, p, ctx)) goto end;
+ if (BN_is_zero(t))
+ {
+ /* special case: a == 0 (mod p) */
+ BN_zero(ret);
+ err = 0;
+ goto end;
+ }
+ else
+ if (!BN_one(x)) goto end;
+ }
+ else
+ {
+ if (!BN_mod_exp(x, A, t, p, ctx)) goto end;
+ if (BN_is_zero(x))
+ {
+ /* special case: a == 0 (mod p) */
+ BN_zero(ret);
+ err = 0;
+ goto end;
+ }
+ }
+
+ /* b := a*x^2 (= a^q) */
+ if (!BN_mod_sqr(b, x, p, ctx)) goto end;
+ if (!BN_mod_mul(b, b, A, p, ctx)) goto end;
+
+ /* x := a*x (= a^((q+1)/2)) */
+ if (!BN_mod_mul(x, x, A, p, ctx)) goto end;
+
+ while (1)
+ {
+ /* Now b is a^q * y^k for some even k (0 <= k < 2^E
+ * where E refers to the original value of e, which we
+ * don't keep in a variable), and x is a^((q+1)/2) * y^(k/2).
+ *
+ * We have a*b = x^2,
+ * y^2^(e-1) = -1,
+ * b^2^(e-1) = 1.
+ */
+
+ if (BN_is_one(b))
+ {
+ if (!BN_copy(ret, x)) goto end;
+ err = 0;
+ goto vrfy;
+ }
+
+
+ /* find smallest i such that b^(2^i) = 1 */
+ i = 1;
+ if (!BN_mod_sqr(t, b, p, ctx)) goto end;
+ while (!BN_is_one(t))
+ {
+ i++;
+ if (i == e)
+ {
+ BNerr(BN_F_BN_MOD_SQRT, BN_R_NOT_A_SQUARE);
+ goto end;
+ }
+ if (!BN_mod_mul(t, t, t, p, ctx)) goto end;
+ }
+
+
+ /* t := y^2^(e - i - 1) */
+ if (!BN_copy(t, y)) goto end;
+ for (j = e - i - 1; j > 0; j--)
+ {
+ if (!BN_mod_sqr(t, t, p, ctx)) goto end;
+ }
+ if (!BN_mod_mul(y, t, t, p, ctx)) goto end;
+ if (!BN_mod_mul(x, x, t, p, ctx)) goto end;
+ if (!BN_mod_mul(b, b, y, p, ctx)) goto end;
+ e = i;
+ }
+
+ vrfy:
+ if (!err)
+ {
+ /* verify the result -- the input might have been not a square
+ * (test added in 0.9.8) */
+
+ if (!BN_mod_sqr(x, ret, p, ctx))
+ err = 1;
+
+ if (!err && 0 != BN_cmp(x, A))
+ {
+ BNerr(BN_F_BN_MOD_SQRT, BN_R_NOT_A_SQUARE);
+ err = 1;
+ }
+ }
+
+ end:
+ if (err)
+ {
+ if (ret != NULL && ret != in)
+ {
+ BN_clear_free(ret);
+ }
+ ret = NULL;
+ }
+ BN_CTX_end(ctx);
+ bn_check_top(ret);
+ return ret;
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bn_word.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_word.c
new file mode 100644
index 00000000..de83a15b
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bn_word.c
@@ -0,0 +1,238 @@
+/* crypto/bn/bn_word.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include "cryptlib.h"
+#include "bn_lcl.h"
+
+BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w)
+ {
+#ifndef BN_LLONG
+ BN_ULONG ret=0;
+#else
+ BN_ULLONG ret=0;
+#endif
+ int i;
+
+ if (w == 0)
+ return (BN_ULONG)-1;
+
+ bn_check_top(a);
+ w&=BN_MASK2;
+ for (i=a->top-1; i>=0; i--)
+ {
+#ifndef BN_LLONG
+ ret=((ret<<BN_BITS4)|((a->d[i]>>BN_BITS4)&BN_MASK2l))%w;
+ ret=((ret<<BN_BITS4)|(a->d[i]&BN_MASK2l))%w;
+#else
+ ret=(BN_ULLONG)(((ret<<(BN_ULLONG)BN_BITS2)|a->d[i])%
+ (BN_ULLONG)w);
+#endif
+ }
+ return((BN_ULONG)ret);
+ }
+
+BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w)
+ {
+ BN_ULONG ret = 0;
+ int i, j;
+
+ bn_check_top(a);
+ w &= BN_MASK2;
+
+ if (!w)
+ /* actually this an error (division by zero) */
+ return (BN_ULONG)-1;
+ if (a->top == 0)
+ return 0;
+
+ /* normalize input (so bn_div_words doesn't complain) */
+ j = BN_BITS2 - BN_num_bits_word(w);
+ w <<= j;
+ if (!BN_lshift(a, a, j))
+ return (BN_ULONG)-1;
+
+ for (i=a->top-1; i>=0; i--)
+ {
+ BN_ULONG l,d;
+
+ l=a->d[i];
+ d=bn_div_words(ret,l,w);
+ ret=(l-((d*w)&BN_MASK2))&BN_MASK2;
+ a->d[i]=d;
+ }
+ if ((a->top > 0) && (a->d[a->top-1] == 0))
+ a->top--;
+ ret >>= j;
+ bn_check_top(a);
+ return(ret);
+ }
+
+int BN_add_word(BIGNUM *a, BN_ULONG w)
+ {
+ BN_ULONG l;
+ int i;
+
+ bn_check_top(a);
+ w &= BN_MASK2;
+
+ /* degenerate case: w is zero */
+ if (!w) return 1;
+ /* degenerate case: a is zero */
+ if(BN_is_zero(a)) return BN_set_word(a, w);
+ /* handle 'a' when negative */
+ if (a->neg)
+ {
+ a->neg=0;
+ i=BN_sub_word(a,w);
+ if (!BN_is_zero(a))
+ a->neg=!(a->neg);
+ return(i);
+ }
+ for (i=0;w!=0 && i<a->top;i++)
+ {
+ a->d[i] = l = (a->d[i]+w)&BN_MASK2;
+ w = (w>l)?1:0;
+ }
+ if (w && i==a->top)
+ {
+ if (bn_wexpand(a,a->top+1) == NULL) return 0;
+ a->top++;
+ a->d[i]=w;
+ }
+ bn_check_top(a);
+ return(1);
+ }
+
+int BN_sub_word(BIGNUM *a, BN_ULONG w)
+ {
+ int i;
+
+ bn_check_top(a);
+ w &= BN_MASK2;
+
+ /* degenerate case: w is zero */
+ if (!w) return 1;
+ /* degenerate case: a is zero */
+ if(BN_is_zero(a))
+ {
+ i = BN_set_word(a,w);
+ if (i != 0)
+ BN_set_negative(a, 1);
+ return i;
+ }
+ /* handle 'a' when negative */
+ if (a->neg)
+ {
+ a->neg=0;
+ i=BN_add_word(a,w);
+ a->neg=1;
+ return(i);
+ }
+
+ if ((a->top == 1) && (a->d[0] < w))
+ {
+ a->d[0]=w-a->d[0];
+ a->neg=1;
+ return(1);
+ }
+ i=0;
+ for (;;)
+ {
+ if (a->d[i] >= w)
+ {
+ a->d[i]-=w;
+ break;
+ }
+ else
+ {
+ a->d[i]=(a->d[i]-w)&BN_MASK2;
+ i++;
+ w=1;
+ }
+ }
+ if ((a->d[i] == 0) && (i == (a->top-1)))
+ a->top--;
+ bn_check_top(a);
+ return(1);
+ }
+
+int BN_mul_word(BIGNUM *a, BN_ULONG w)
+ {
+ BN_ULONG ll;
+
+ bn_check_top(a);
+ w&=BN_MASK2;
+ if (a->top)
+ {
+ if (w == 0)
+ BN_zero(a);
+ else
+ {
+ ll=bn_mul_words(a->d,a->d,a->top,w);
+ if (ll)
+ {
+ if (bn_wexpand(a,a->top+1) == NULL) return(0);
+ a->d[a->top++]=ll;
+ }
+ }
+ }
+ bn_check_top(a);
+ return(1);
+ }
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bnspeed.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bnspeed.c
new file mode 100644
index 00000000..b554ac8c
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bnspeed.c
@@ -0,0 +1,233 @@
+/* unused */
+
+/* crypto/bn/bnspeed.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+/* most of this code has been pilfered from my libdes speed.c program */
+
+#define BASENUM 1000000
+#undef PROG
+#define PROG bnspeed_main
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <openssl/crypto.h>
+#include <openssl/err.h>
+
+#if !defined(OPENSSL_SYS_MSDOS) && (!defined(OPENSSL_SYS_VMS) || defined(__DECC)) && !defined(OPENSSL_SYS_MACOSX)
+#define TIMES
+#endif
+
+#ifndef _IRIX
+#include <time.h>
+#endif
+#ifdef TIMES
+#include <sys/types.h>
+#include <sys/times.h>
+#endif
+
+/* Depending on the VMS version, the tms structure is perhaps defined.
+ The __TMS macro will show if it was. If it wasn't defined, we should
+ undefine TIMES, since that tells the rest of the program how things
+ should be handled. -- Richard Levitte */
+#if defined(OPENSSL_SYS_VMS_DECC) && !defined(__TMS)
+#undef TIMES
+#endif
+
+#ifndef TIMES
+#include <sys/timeb.h>
+#endif
+
+#if defined(sun) || defined(__ultrix)
+#define _POSIX_SOURCE
+#include <limits.h>
+#include <sys/param.h>
+#endif
+
+#include <openssl/bn.h>
+#include <openssl/x509.h>
+
+/* The following if from times(3) man page. It may need to be changed */
+#ifndef HZ
+# ifndef CLK_TCK
+# ifndef _BSD_CLK_TCK_ /* FreeBSD hack */
+# define HZ 100.0
+# else /* _BSD_CLK_TCK_ */
+# define HZ ((double)_BSD_CLK_TCK_)
+# endif
+# else /* CLK_TCK */
+# define HZ ((double)CLK_TCK)
+# endif
+#endif
+
+#undef BUFSIZE
+#define BUFSIZE ((long)1024*8)
+int run=0;
+
+static double Time_F(int s);
+#define START 0
+#define STOP 1
+
+static double Time_F(int s)
+ {
+ double ret;
+#ifdef TIMES
+ static struct tms tstart,tend;
+
+ if (s == START)
+ {
+ times(&tstart);
+ return(0);
+ }
+ else
+ {
+ times(&tend);
+ ret=((double)(tend.tms_utime-tstart.tms_utime))/HZ;
+ return((ret < 1e-3)?1e-3:ret);
+ }
+#else /* !times() */
+ static struct timeb tstart,tend;
+ long i;
+
+ if (s == START)
+ {
+ ftime(&tstart);
+ return(0);
+ }
+ else
+ {
+ ftime(&tend);
+ i=(long)tend.millitm-(long)tstart.millitm;
+ ret=((double)(tend.time-tstart.time))+((double)i)/1000.0;
+ return((ret < 0.001)?0.001:ret);
+ }
+#endif
+ }
+
+#define NUM_SIZES 5
+static int sizes[NUM_SIZES]={128,256,512,1024,2048};
+/*static int sizes[NUM_SIZES]={59,179,299,419,539}; */
+
+void do_mul(BIGNUM *r,BIGNUM *a,BIGNUM *b,BN_CTX *ctx);
+
+int main(int argc, char **argv)
+ {
+ BN_CTX *ctx;
+ BIGNUM a,b,c;
+
+ ctx=BN_CTX_new();
+ BN_init(&a);
+ BN_init(&b);
+ BN_init(&c);
+
+ do_mul(&a,&b,&c,ctx);
+ }
+
+void do_mul(BIGNUM *r, BIGNUM *a, BIGNUM *b, BN_CTX *ctx)
+ {
+ int i,j,k;
+ double tm;
+ long num;
+
+ for (i=0; i<NUM_SIZES; i++)
+ {
+ num=BASENUM;
+ if (i) num/=(i*3);
+ BN_rand(a,sizes[i],1,0);
+ for (j=i; j<NUM_SIZES; j++)
+ {
+ BN_rand(b,sizes[j],1,0);
+ Time_F(START);
+ for (k=0; k<num; k++)
+ BN_mul(r,b,a,ctx);
+ tm=Time_F(STOP);
+ printf("mul %4d x %4d -> %8.3fms\n",sizes[i],sizes[j],tm*1000.0/num);
+ }
+ }
+
+ for (i=0; i<NUM_SIZES; i++)
+ {
+ num=BASENUM;
+ if (i) num/=(i*3);
+ BN_rand(a,sizes[i],1,0);
+ Time_F(START);
+ for (k=0; k<num; k++)
+ BN_sqr(r,a,ctx);
+ tm=Time_F(STOP);
+ printf("sqr %4d x %4d -> %8.3fms\n",sizes[i],sizes[i],tm*1000.0/num);
+ }
+
+ for (i=0; i<NUM_SIZES; i++)
+ {
+ num=BASENUM/10;
+ if (i) num/=(i*3);
+ BN_rand(a,sizes[i]-1,1,0);
+ for (j=i; j<NUM_SIZES; j++)
+ {
+ BN_rand(b,sizes[j],1,0);
+ Time_F(START);
+ for (k=0; k<100000; k++)
+ BN_div(r, NULL, b, a,ctx);
+ tm=Time_F(STOP);
+ printf("div %4d / %4d -> %8.3fms\n",sizes[j],sizes[i]-1,tm*1000.0/num);
+ }
+ }
+ }
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/bntest.c b/ics-openvpn-stripped/main/openssl/crypto/bn/bntest.c
new file mode 100644
index 00000000..06f5954a
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/bntest.c
@@ -0,0 +1,2013 @@
+/* crypto/bn/bntest.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+/* ====================================================================
+ * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
+ *
+ * Portions of the attached software ("Contribution") are developed by
+ * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project.
+ *
+ * The Contribution is licensed pursuant to the Eric Young open source
+ * license provided above.
+ *
+ * The binary polynomial arithmetic software is originally written by
+ * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems Laboratories.
+ *
+ */
+
+/* Until the key-gen callbacks are modified to use newer prototypes, we allow
+ * deprecated functions for openssl-internal code */
+#ifdef OPENSSL_NO_DEPRECATED
+#undef OPENSSL_NO_DEPRECATED
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "e_os.h"
+
+#include <openssl/bio.h>
+#include <openssl/bn.h>
+#include <openssl/rand.h>
+#include <openssl/x509.h>
+#include <openssl/err.h>
+
+const int num0 = 100; /* number of tests */
+const int num1 = 50; /* additional tests for some functions */
+const int num2 = 5; /* number of tests for slow functions */
+
+int test_add(BIO *bp);
+int test_sub(BIO *bp);
+int test_lshift1(BIO *bp);
+int test_lshift(BIO *bp,BN_CTX *ctx,BIGNUM *a_);
+int test_rshift1(BIO *bp);
+int test_rshift(BIO *bp,BN_CTX *ctx);
+int test_div(BIO *bp,BN_CTX *ctx);
+int test_div_word(BIO *bp);
+int test_div_recp(BIO *bp,BN_CTX *ctx);
+int test_mul(BIO *bp);
+int test_sqr(BIO *bp,BN_CTX *ctx);
+int test_mont(BIO *bp,BN_CTX *ctx);
+int test_mod(BIO *bp,BN_CTX *ctx);
+int test_mod_mul(BIO *bp,BN_CTX *ctx);
+int test_mod_exp(BIO *bp,BN_CTX *ctx);
+int test_mod_exp_mont_consttime(BIO *bp,BN_CTX *ctx);
+int test_exp(BIO *bp,BN_CTX *ctx);
+int test_gf2m_add(BIO *bp);
+int test_gf2m_mod(BIO *bp);
+int test_gf2m_mod_mul(BIO *bp,BN_CTX *ctx);
+int test_gf2m_mod_sqr(BIO *bp,BN_CTX *ctx);
+int test_gf2m_mod_inv(BIO *bp,BN_CTX *ctx);
+int test_gf2m_mod_div(BIO *bp,BN_CTX *ctx);
+int test_gf2m_mod_exp(BIO *bp,BN_CTX *ctx);
+int test_gf2m_mod_sqrt(BIO *bp,BN_CTX *ctx);
+int test_gf2m_mod_solve_quad(BIO *bp,BN_CTX *ctx);
+int test_kron(BIO *bp,BN_CTX *ctx);
+int test_sqrt(BIO *bp,BN_CTX *ctx);
+int rand_neg(void);
+static int results=0;
+
+static unsigned char lst[]="\xC6\x4F\x43\x04\x2A\xEA\xCA\x6E\x58\x36\x80\x5B\xE8\xC9"
+"\x9B\x04\x5D\x48\x36\xC2\xFD\x16\xC9\x64\xF0";
+
+static const char rnd_seed[] = "string to make the random number generator think it has entropy";
+
+static void message(BIO *out, char *m)
+ {
+ fprintf(stderr, "test %s\n", m);
+ BIO_puts(out, "print \"test ");
+ BIO_puts(out, m);
+ BIO_puts(out, "\\n\"\n");
+ }
+
+int main(int argc, char *argv[])
+ {
+ BN_CTX *ctx;
+ BIO *out;
+ char *outfile=NULL;
+
+ results = 0;
+
+ RAND_seed(rnd_seed, sizeof rnd_seed); /* or BN_generate_prime may fail */
+
+ argc--;
+ argv++;
+ while (argc >= 1)
+ {
+ if (strcmp(*argv,"-results") == 0)
+ results=1;
+ else if (strcmp(*argv,"-out") == 0)
+ {
+ if (--argc < 1) break;
+ outfile= *(++argv);
+ }
+ argc--;
+ argv++;
+ }
+
+
+ ctx=BN_CTX_new();
+ if (ctx == NULL) EXIT(1);
+
+ out=BIO_new(BIO_s_file());
+ if (out == NULL) EXIT(1);
+ if (outfile == NULL)
+ {
+ BIO_set_fp(out,stdout,BIO_NOCLOSE);
+ }
+ else
+ {
+ if (!BIO_write_filename(out,outfile))
+ {
+ perror(outfile);
+ EXIT(1);
+ }
+ }
+
+ if (!results)
+ BIO_puts(out,"obase=16\nibase=16\n");
+
+ message(out,"BN_add");
+ if (!test_add(out)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_sub");
+ if (!test_sub(out)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_lshift1");
+ if (!test_lshift1(out)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_lshift (fixed)");
+ if (!test_lshift(out,ctx,BN_bin2bn(lst,sizeof(lst)-1,NULL)))
+ goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_lshift");
+ if (!test_lshift(out,ctx,NULL)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_rshift1");
+ if (!test_rshift1(out)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_rshift");
+ if (!test_rshift(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_sqr");
+ if (!test_sqr(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_mul");
+ if (!test_mul(out)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_div");
+ if (!test_div(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_div_word");
+ if (!test_div_word(out)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_div_recp");
+ if (!test_div_recp(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_mod");
+ if (!test_mod(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_mod_mul");
+ if (!test_mod_mul(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_mont");
+ if (!test_mont(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_mod_exp");
+ if (!test_mod_exp(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_mod_exp_mont_consttime");
+ if (!test_mod_exp_mont_consttime(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_exp");
+ if (!test_exp(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_kronecker");
+ if (!test_kron(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_mod_sqrt");
+ if (!test_sqrt(out,ctx)) goto err;
+ (void)BIO_flush(out);
+#ifndef OPENSSL_NO_EC2M
+ message(out,"BN_GF2m_add");
+ if (!test_gf2m_add(out)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_GF2m_mod");
+ if (!test_gf2m_mod(out)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_GF2m_mod_mul");
+ if (!test_gf2m_mod_mul(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_GF2m_mod_sqr");
+ if (!test_gf2m_mod_sqr(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_GF2m_mod_inv");
+ if (!test_gf2m_mod_inv(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_GF2m_mod_div");
+ if (!test_gf2m_mod_div(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_GF2m_mod_exp");
+ if (!test_gf2m_mod_exp(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_GF2m_mod_sqrt");
+ if (!test_gf2m_mod_sqrt(out,ctx)) goto err;
+ (void)BIO_flush(out);
+
+ message(out,"BN_GF2m_mod_solve_quad");
+ if (!test_gf2m_mod_solve_quad(out,ctx)) goto err;
+ (void)BIO_flush(out);
+#endif
+ BN_CTX_free(ctx);
+ BIO_free(out);
+
+/**/
+ EXIT(0);
+err:
+ BIO_puts(out,"1\n"); /* make sure the Perl script fed by bc notices
+ * the failure, see test_bn in test/Makefile.ssl*/
+ (void)BIO_flush(out);
+ ERR_load_crypto_strings();
+ ERR_print_errors_fp(stderr);
+ EXIT(1);
+ return(1);
+ }
+
+int test_add(BIO *bp)
+ {
+ BIGNUM a,b,c;
+ int i;
+
+ BN_init(&a);
+ BN_init(&b);
+ BN_init(&c);
+
+ BN_bntest_rand(&a,512,0,0);
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(&b,450+i,0,0);
+ a.neg=rand_neg();
+ b.neg=rand_neg();
+ BN_add(&c,&a,&b);
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,&a);
+ BIO_puts(bp," + ");
+ BN_print(bp,&b);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,&c);
+ BIO_puts(bp,"\n");
+ }
+ a.neg=!a.neg;
+ b.neg=!b.neg;
+ BN_add(&c,&c,&b);
+ BN_add(&c,&c,&a);
+ if(!BN_is_zero(&c))
+ {
+ fprintf(stderr,"Add test failed!\n");
+ return 0;
+ }
+ }
+ BN_free(&a);
+ BN_free(&b);
+ BN_free(&c);
+ return(1);
+ }
+
+int test_sub(BIO *bp)
+ {
+ BIGNUM a,b,c;
+ int i;
+
+ BN_init(&a);
+ BN_init(&b);
+ BN_init(&c);
+
+ for (i=0; i<num0+num1; i++)
+ {
+ if (i < num1)
+ {
+ BN_bntest_rand(&a,512,0,0);
+ BN_copy(&b,&a);
+ if (BN_set_bit(&a,i)==0) return(0);
+ BN_add_word(&b,i);
+ }
+ else
+ {
+ BN_bntest_rand(&b,400+i-num1,0,0);
+ a.neg=rand_neg();
+ b.neg=rand_neg();
+ }
+ BN_sub(&c,&a,&b);
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,&a);
+ BIO_puts(bp," - ");
+ BN_print(bp,&b);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,&c);
+ BIO_puts(bp,"\n");
+ }
+ BN_add(&c,&c,&b);
+ BN_sub(&c,&c,&a);
+ if(!BN_is_zero(&c))
+ {
+ fprintf(stderr,"Subtract test failed!\n");
+ return 0;
+ }
+ }
+ BN_free(&a);
+ BN_free(&b);
+ BN_free(&c);
+ return(1);
+ }
+
+int test_div(BIO *bp, BN_CTX *ctx)
+ {
+ BIGNUM a,b,c,d,e;
+ int i;
+
+ BN_init(&a);
+ BN_init(&b);
+ BN_init(&c);
+ BN_init(&d);
+ BN_init(&e);
+
+ for (i=0; i<num0+num1; i++)
+ {
+ if (i < num1)
+ {
+ BN_bntest_rand(&a,400,0,0);
+ BN_copy(&b,&a);
+ BN_lshift(&a,&a,i);
+ BN_add_word(&a,i);
+ }
+ else
+ BN_bntest_rand(&b,50+3*(i-num1),0,0);
+ a.neg=rand_neg();
+ b.neg=rand_neg();
+ BN_div(&d,&c,&a,&b,ctx);
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,&a);
+ BIO_puts(bp," / ");
+ BN_print(bp,&b);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,&d);
+ BIO_puts(bp,"\n");
+
+ if (!results)
+ {
+ BN_print(bp,&a);
+ BIO_puts(bp," % ");
+ BN_print(bp,&b);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,&c);
+ BIO_puts(bp,"\n");
+ }
+ BN_mul(&e,&d,&b,ctx);
+ BN_add(&d,&e,&c);
+ BN_sub(&d,&d,&a);
+ if(!BN_is_zero(&d))
+ {
+ fprintf(stderr,"Division test failed!\n");
+ return 0;
+ }
+ }
+ BN_free(&a);
+ BN_free(&b);
+ BN_free(&c);
+ BN_free(&d);
+ BN_free(&e);
+ return(1);
+ }
+
+static void print_word(BIO *bp,BN_ULONG w)
+ {
+#ifdef SIXTY_FOUR_BIT
+ if (sizeof(w) > sizeof(unsigned long))
+ {
+ unsigned long h=(unsigned long)(w>>32),
+ l=(unsigned long)(w);
+
+ if (h) BIO_printf(bp,"%lX%08lX",h,l);
+ else BIO_printf(bp,"%lX",l);
+ return;
+ }
+#endif
+ BIO_printf(bp,BN_HEX_FMT1,w);
+ }
+
+int test_div_word(BIO *bp)
+ {
+ BIGNUM a,b;
+ BN_ULONG r,s;
+ int i;
+
+ BN_init(&a);
+ BN_init(&b);
+
+ for (i=0; i<num0; i++)
+ {
+ do {
+ BN_bntest_rand(&a,512,-1,0);
+ BN_bntest_rand(&b,BN_BITS2,-1,0);
+ s = b.d[0];
+ } while (!s);
+
+ BN_copy(&b, &a);
+ r = BN_div_word(&b, s);
+
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,&a);
+ BIO_puts(bp," / ");
+ print_word(bp,s);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,&b);
+ BIO_puts(bp,"\n");
+
+ if (!results)
+ {
+ BN_print(bp,&a);
+ BIO_puts(bp," % ");
+ print_word(bp,s);
+ BIO_puts(bp," - ");
+ }
+ print_word(bp,r);
+ BIO_puts(bp,"\n");
+ }
+ BN_mul_word(&b,s);
+ BN_add_word(&b,r);
+ BN_sub(&b,&a,&b);
+ if(!BN_is_zero(&b))
+ {
+ fprintf(stderr,"Division (word) test failed!\n");
+ return 0;
+ }
+ }
+ BN_free(&a);
+ BN_free(&b);
+ return(1);
+ }
+
+int test_div_recp(BIO *bp, BN_CTX *ctx)
+ {
+ BIGNUM a,b,c,d,e;
+ BN_RECP_CTX recp;
+ int i;
+
+ BN_RECP_CTX_init(&recp);
+ BN_init(&a);
+ BN_init(&b);
+ BN_init(&c);
+ BN_init(&d);
+ BN_init(&e);
+
+ for (i=0; i<num0+num1; i++)
+ {
+ if (i < num1)
+ {
+ BN_bntest_rand(&a,400,0,0);
+ BN_copy(&b,&a);
+ BN_lshift(&a,&a,i);
+ BN_add_word(&a,i);
+ }
+ else
+ BN_bntest_rand(&b,50+3*(i-num1),0,0);
+ a.neg=rand_neg();
+ b.neg=rand_neg();
+ BN_RECP_CTX_set(&recp,&b,ctx);
+ BN_div_recp(&d,&c,&a,&recp,ctx);
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,&a);
+ BIO_puts(bp," / ");
+ BN_print(bp,&b);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,&d);
+ BIO_puts(bp,"\n");
+
+ if (!results)
+ {
+ BN_print(bp,&a);
+ BIO_puts(bp," % ");
+ BN_print(bp,&b);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,&c);
+ BIO_puts(bp,"\n");
+ }
+ BN_mul(&e,&d,&b,ctx);
+ BN_add(&d,&e,&c);
+ BN_sub(&d,&d,&a);
+ if(!BN_is_zero(&d))
+ {
+ fprintf(stderr,"Reciprocal division test failed!\n");
+ fprintf(stderr,"a=");
+ BN_print_fp(stderr,&a);
+ fprintf(stderr,"\nb=");
+ BN_print_fp(stderr,&b);
+ fprintf(stderr,"\n");
+ return 0;
+ }
+ }
+ BN_free(&a);
+ BN_free(&b);
+ BN_free(&c);
+ BN_free(&d);
+ BN_free(&e);
+ BN_RECP_CTX_free(&recp);
+ return(1);
+ }
+
+int test_mul(BIO *bp)
+ {
+ BIGNUM a,b,c,d,e;
+ int i;
+ BN_CTX *ctx;
+
+ ctx = BN_CTX_new();
+ if (ctx == NULL) EXIT(1);
+
+ BN_init(&a);
+ BN_init(&b);
+ BN_init(&c);
+ BN_init(&d);
+ BN_init(&e);
+
+ for (i=0; i<num0+num1; i++)
+ {
+ if (i <= num1)
+ {
+ BN_bntest_rand(&a,100,0,0);
+ BN_bntest_rand(&b,100,0,0);
+ }
+ else
+ BN_bntest_rand(&b,i-num1,0,0);
+ a.neg=rand_neg();
+ b.neg=rand_neg();
+ BN_mul(&c,&a,&b,ctx);
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,&a);
+ BIO_puts(bp," * ");
+ BN_print(bp,&b);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,&c);
+ BIO_puts(bp,"\n");
+ }
+ BN_div(&d,&e,&c,&a,ctx);
+ BN_sub(&d,&d,&b);
+ if(!BN_is_zero(&d) || !BN_is_zero(&e))
+ {
+ fprintf(stderr,"Multiplication test failed!\n");
+ return 0;
+ }
+ }
+ BN_free(&a);
+ BN_free(&b);
+ BN_free(&c);
+ BN_free(&d);
+ BN_free(&e);
+ BN_CTX_free(ctx);
+ return(1);
+ }
+
+int test_sqr(BIO *bp, BN_CTX *ctx)
+ {
+ BIGNUM a,c,d,e;
+ int i;
+
+ BN_init(&a);
+ BN_init(&c);
+ BN_init(&d);
+ BN_init(&e);
+
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(&a,40+i*10,0,0);
+ a.neg=rand_neg();
+ BN_sqr(&c,&a,ctx);
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,&a);
+ BIO_puts(bp," * ");
+ BN_print(bp,&a);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,&c);
+ BIO_puts(bp,"\n");
+ }
+ BN_div(&d,&e,&c,&a,ctx);
+ BN_sub(&d,&d,&a);
+ if(!BN_is_zero(&d) || !BN_is_zero(&e))
+ {
+ fprintf(stderr,"Square test failed!\n");
+ return 0;
+ }
+ }
+ BN_free(&a);
+ BN_free(&c);
+ BN_free(&d);
+ BN_free(&e);
+ return(1);
+ }
+
+int test_mont(BIO *bp, BN_CTX *ctx)
+ {
+ BIGNUM a,b,c,d,A,B;
+ BIGNUM n;
+ int i;
+ BN_MONT_CTX *mont;
+
+ BN_init(&a);
+ BN_init(&b);
+ BN_init(&c);
+ BN_init(&d);
+ BN_init(&A);
+ BN_init(&B);
+ BN_init(&n);
+
+ mont=BN_MONT_CTX_new();
+ if (mont == NULL)
+ return 0;
+
+ BN_bntest_rand(&a,100,0,0); /**/
+ BN_bntest_rand(&b,100,0,0); /**/
+ for (i=0; i<num2; i++)
+ {
+ int bits = (200*(i+1))/num2;
+
+ if (bits == 0)
+ continue;
+ BN_bntest_rand(&n,bits,0,1);
+ BN_MONT_CTX_set(mont,&n,ctx);
+
+ BN_nnmod(&a,&a,&n,ctx);
+ BN_nnmod(&b,&b,&n,ctx);
+
+ BN_to_montgomery(&A,&a,mont,ctx);
+ BN_to_montgomery(&B,&b,mont,ctx);
+
+ BN_mod_mul_montgomery(&c,&A,&B,mont,ctx);/**/
+ BN_from_montgomery(&A,&c,mont,ctx);/**/
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+#ifdef undef
+fprintf(stderr,"%d * %d %% %d\n",
+BN_num_bits(&a),
+BN_num_bits(&b),
+BN_num_bits(mont->N));
+#endif
+ BN_print(bp,&a);
+ BIO_puts(bp," * ");
+ BN_print(bp,&b);
+ BIO_puts(bp," % ");
+ BN_print(bp,&(mont->N));
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,&A);
+ BIO_puts(bp,"\n");
+ }
+ BN_mod_mul(&d,&a,&b,&n,ctx);
+ BN_sub(&d,&d,&A);
+ if(!BN_is_zero(&d))
+ {
+ fprintf(stderr,"Montgomery multiplication test failed!\n");
+ return 0;
+ }
+ }
+ BN_MONT_CTX_free(mont);
+ BN_free(&a);
+ BN_free(&b);
+ BN_free(&c);
+ BN_free(&d);
+ BN_free(&A);
+ BN_free(&B);
+ BN_free(&n);
+ return(1);
+ }
+
+int test_mod(BIO *bp, BN_CTX *ctx)
+ {
+ BIGNUM *a,*b,*c,*d,*e;
+ int i;
+
+ a=BN_new();
+ b=BN_new();
+ c=BN_new();
+ d=BN_new();
+ e=BN_new();
+
+ BN_bntest_rand(a,1024,0,0); /**/
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(b,450+i*10,0,0); /**/
+ a->neg=rand_neg();
+ b->neg=rand_neg();
+ BN_mod(c,a,b,ctx);/**/
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," % ");
+ BN_print(bp,b);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,c);
+ BIO_puts(bp,"\n");
+ }
+ BN_div(d,e,a,b,ctx);
+ BN_sub(e,e,c);
+ if(!BN_is_zero(e))
+ {
+ fprintf(stderr,"Modulo test failed!\n");
+ return 0;
+ }
+ }
+ BN_free(a);
+ BN_free(b);
+ BN_free(c);
+ BN_free(d);
+ BN_free(e);
+ return(1);
+ }
+
+int test_mod_mul(BIO *bp, BN_CTX *ctx)
+ {
+ BIGNUM *a,*b,*c,*d,*e;
+ int i,j;
+
+ a=BN_new();
+ b=BN_new();
+ c=BN_new();
+ d=BN_new();
+ e=BN_new();
+
+ for (j=0; j<3; j++) {
+ BN_bntest_rand(c,1024,0,0); /**/
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(a,475+i*10,0,0); /**/
+ BN_bntest_rand(b,425+i*11,0,0); /**/
+ a->neg=rand_neg();
+ b->neg=rand_neg();
+ if (!BN_mod_mul(e,a,b,c,ctx))
+ {
+ unsigned long l;
+
+ while ((l=ERR_get_error()))
+ fprintf(stderr,"ERROR:%s\n",
+ ERR_error_string(l,NULL));
+ EXIT(1);
+ }
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," * ");
+ BN_print(bp,b);
+ BIO_puts(bp," % ");
+ BN_print(bp,c);
+ if ((a->neg ^ b->neg) && !BN_is_zero(e))
+ {
+ /* If (a*b) % c is negative, c must be added
+ * in order to obtain the normalized remainder
+ * (new with OpenSSL 0.9.7, previous versions of
+ * BN_mod_mul could generate negative results)
+ */
+ BIO_puts(bp," + ");
+ BN_print(bp,c);
+ }
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,e);
+ BIO_puts(bp,"\n");
+ }
+ BN_mul(d,a,b,ctx);
+ BN_sub(d,d,e);
+ BN_div(a,b,d,c,ctx);
+ if(!BN_is_zero(b))
+ {
+ fprintf(stderr,"Modulo multiply test failed!\n");
+ ERR_print_errors_fp(stderr);
+ return 0;
+ }
+ }
+ }
+ BN_free(a);
+ BN_free(b);
+ BN_free(c);
+ BN_free(d);
+ BN_free(e);
+ return(1);
+ }
+
+int test_mod_exp(BIO *bp, BN_CTX *ctx)
+ {
+ BIGNUM *a,*b,*c,*d,*e;
+ int i;
+
+ a=BN_new();
+ b=BN_new();
+ c=BN_new();
+ d=BN_new();
+ e=BN_new();
+
+ BN_bntest_rand(c,30,0,1); /* must be odd for montgomery */
+ for (i=0; i<num2; i++)
+ {
+ BN_bntest_rand(a,20+i*5,0,0); /**/
+ BN_bntest_rand(b,2+i,0,0); /**/
+
+ if (!BN_mod_exp(d,a,b,c,ctx))
+ return(0);
+
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," ^ ");
+ BN_print(bp,b);
+ BIO_puts(bp," % ");
+ BN_print(bp,c);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,d);
+ BIO_puts(bp,"\n");
+ }
+ BN_exp(e,a,b,ctx);
+ BN_sub(e,e,d);
+ BN_div(a,b,e,c,ctx);
+ if(!BN_is_zero(b))
+ {
+ fprintf(stderr,"Modulo exponentiation test failed!\n");
+ return 0;
+ }
+ }
+ BN_free(a);
+ BN_free(b);
+ BN_free(c);
+ BN_free(d);
+ BN_free(e);
+ return(1);
+ }
+
+int test_mod_exp_mont_consttime(BIO *bp, BN_CTX *ctx)
+ {
+ BIGNUM *a,*b,*c,*d,*e;
+ int i;
+
+ a=BN_new();
+ b=BN_new();
+ c=BN_new();
+ d=BN_new();
+ e=BN_new();
+
+ BN_bntest_rand(c,30,0,1); /* must be odd for montgomery */
+ for (i=0; i<num2; i++)
+ {
+ BN_bntest_rand(a,20+i*5,0,0); /**/
+ BN_bntest_rand(b,2+i,0,0); /**/
+
+ if (!BN_mod_exp_mont_consttime(d,a,b,c,ctx,NULL))
+ return(00);
+
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," ^ ");
+ BN_print(bp,b);
+ BIO_puts(bp," % ");
+ BN_print(bp,c);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,d);
+ BIO_puts(bp,"\n");
+ }
+ BN_exp(e,a,b,ctx);
+ BN_sub(e,e,d);
+ BN_div(a,b,e,c,ctx);
+ if(!BN_is_zero(b))
+ {
+ fprintf(stderr,"Modulo exponentiation test failed!\n");
+ return 0;
+ }
+ }
+ BN_free(a);
+ BN_free(b);
+ BN_free(c);
+ BN_free(d);
+ BN_free(e);
+ return(1);
+ }
+
+int test_exp(BIO *bp, BN_CTX *ctx)
+ {
+ BIGNUM *a,*b,*d,*e,*one;
+ int i;
+
+ a=BN_new();
+ b=BN_new();
+ d=BN_new();
+ e=BN_new();
+ one=BN_new();
+ BN_one(one);
+
+ for (i=0; i<num2; i++)
+ {
+ BN_bntest_rand(a,20+i*5,0,0); /**/
+ BN_bntest_rand(b,2+i,0,0); /**/
+
+ if (BN_exp(d,a,b,ctx) <= 0)
+ return(0);
+
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," ^ ");
+ BN_print(bp,b);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,d);
+ BIO_puts(bp,"\n");
+ }
+ BN_one(e);
+ for( ; !BN_is_zero(b) ; BN_sub(b,b,one))
+ BN_mul(e,e,a,ctx);
+ BN_sub(e,e,d);
+ if(!BN_is_zero(e))
+ {
+ fprintf(stderr,"Exponentiation test failed!\n");
+ return 0;
+ }
+ }
+ BN_free(a);
+ BN_free(b);
+ BN_free(d);
+ BN_free(e);
+ BN_free(one);
+ return(1);
+ }
+#ifndef OPENSSL_NO_EC2M
+int test_gf2m_add(BIO *bp)
+ {
+ BIGNUM a,b,c;
+ int i, ret = 0;
+
+ BN_init(&a);
+ BN_init(&b);
+ BN_init(&c);
+
+ for (i=0; i<num0; i++)
+ {
+ BN_rand(&a,512,0,0);
+ BN_copy(&b, BN_value_one());
+ a.neg=rand_neg();
+ b.neg=rand_neg();
+ BN_GF2m_add(&c,&a,&b);
+#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,&a);
+ BIO_puts(bp," ^ ");
+ BN_print(bp,&b);
+ BIO_puts(bp," = ");
+ }
+ BN_print(bp,&c);
+ BIO_puts(bp,"\n");
+ }
+#endif
+ /* Test that two added values have the correct parity. */
+ if((BN_is_odd(&a) && BN_is_odd(&c)) || (!BN_is_odd(&a) && !BN_is_odd(&c)))
+ {
+ fprintf(stderr,"GF(2^m) addition test (a) failed!\n");
+ goto err;
+ }
+ BN_GF2m_add(&c,&c,&c);
+ /* Test that c + c = 0. */
+ if(!BN_is_zero(&c))
+ {
+ fprintf(stderr,"GF(2^m) addition test (b) failed!\n");
+ goto err;
+ }
+ }
+ ret = 1;
+ err:
+ BN_free(&a);
+ BN_free(&b);
+ BN_free(&c);
+ return ret;
+ }
+
+int test_gf2m_mod(BIO *bp)
+ {
+ BIGNUM *a,*b[2],*c,*d,*e;
+ int i, j, ret = 0;
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
+
+ a=BN_new();
+ b[0]=BN_new();
+ b[1]=BN_new();
+ c=BN_new();
+ d=BN_new();
+ e=BN_new();
+
+ BN_GF2m_arr2poly(p0, b[0]);
+ BN_GF2m_arr2poly(p1, b[1]);
+
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(a, 1024, 0, 0);
+ for (j=0; j < 2; j++)
+ {
+ BN_GF2m_mod(c, a, b[j]);
+#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," % ");
+ BN_print(bp,b[j]);
+ BIO_puts(bp," - ");
+ BN_print(bp,c);
+ BIO_puts(bp,"\n");
+ }
+ }
+#endif
+ BN_GF2m_add(d, a, c);
+ BN_GF2m_mod(e, d, b[j]);
+ /* Test that a + (a mod p) mod p == 0. */
+ if(!BN_is_zero(e))
+ {
+ fprintf(stderr,"GF(2^m) modulo test failed!\n");
+ goto err;
+ }
+ }
+ }
+ ret = 1;
+ err:
+ BN_free(a);
+ BN_free(b[0]);
+ BN_free(b[1]);
+ BN_free(c);
+ BN_free(d);
+ BN_free(e);
+ return ret;
+ }
+
+int test_gf2m_mod_mul(BIO *bp,BN_CTX *ctx)
+ {
+ BIGNUM *a,*b[2],*c,*d,*e,*f,*g,*h;
+ int i, j, ret = 0;
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
+
+ a=BN_new();
+ b[0]=BN_new();
+ b[1]=BN_new();
+ c=BN_new();
+ d=BN_new();
+ e=BN_new();
+ f=BN_new();
+ g=BN_new();
+ h=BN_new();
+
+ BN_GF2m_arr2poly(p0, b[0]);
+ BN_GF2m_arr2poly(p1, b[1]);
+
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(a, 1024, 0, 0);
+ BN_bntest_rand(c, 1024, 0, 0);
+ BN_bntest_rand(d, 1024, 0, 0);
+ for (j=0; j < 2; j++)
+ {
+ BN_GF2m_mod_mul(e, a, c, b[j], ctx);
+#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," * ");
+ BN_print(bp,c);
+ BIO_puts(bp," % ");
+ BN_print(bp,b[j]);
+ BIO_puts(bp," - ");
+ BN_print(bp,e);
+ BIO_puts(bp,"\n");
+ }
+ }
+#endif
+ BN_GF2m_add(f, a, d);
+ BN_GF2m_mod_mul(g, f, c, b[j], ctx);
+ BN_GF2m_mod_mul(h, d, c, b[j], ctx);
+ BN_GF2m_add(f, e, g);
+ BN_GF2m_add(f, f, h);
+ /* Test that (a+d)*c = a*c + d*c. */
+ if(!BN_is_zero(f))
+ {
+ fprintf(stderr,"GF(2^m) modular multiplication test failed!\n");
+ goto err;
+ }
+ }
+ }
+ ret = 1;
+ err:
+ BN_free(a);
+ BN_free(b[0]);
+ BN_free(b[1]);
+ BN_free(c);
+ BN_free(d);
+ BN_free(e);
+ BN_free(f);
+ BN_free(g);
+ BN_free(h);
+ return ret;
+ }
+
+int test_gf2m_mod_sqr(BIO *bp,BN_CTX *ctx)
+ {
+ BIGNUM *a,*b[2],*c,*d;
+ int i, j, ret = 0;
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
+
+ a=BN_new();
+ b[0]=BN_new();
+ b[1]=BN_new();
+ c=BN_new();
+ d=BN_new();
+
+ BN_GF2m_arr2poly(p0, b[0]);
+ BN_GF2m_arr2poly(p1, b[1]);
+
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(a, 1024, 0, 0);
+ for (j=0; j < 2; j++)
+ {
+ BN_GF2m_mod_sqr(c, a, b[j], ctx);
+ BN_copy(d, a);
+ BN_GF2m_mod_mul(d, a, d, b[j], ctx);
+#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," ^ 2 % ");
+ BN_print(bp,b[j]);
+ BIO_puts(bp, " = ");
+ BN_print(bp,c);
+ BIO_puts(bp,"; a * a = ");
+ BN_print(bp,d);
+ BIO_puts(bp,"\n");
+ }
+ }
+#endif
+ BN_GF2m_add(d, c, d);
+ /* Test that a*a = a^2. */
+ if(!BN_is_zero(d))
+ {
+ fprintf(stderr,"GF(2^m) modular squaring test failed!\n");
+ goto err;
+ }
+ }
+ }
+ ret = 1;
+ err:
+ BN_free(a);
+ BN_free(b[0]);
+ BN_free(b[1]);
+ BN_free(c);
+ BN_free(d);
+ return ret;
+ }
+
+int test_gf2m_mod_inv(BIO *bp,BN_CTX *ctx)
+ {
+ BIGNUM *a,*b[2],*c,*d;
+ int i, j, ret = 0;
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
+
+ a=BN_new();
+ b[0]=BN_new();
+ b[1]=BN_new();
+ c=BN_new();
+ d=BN_new();
+
+ BN_GF2m_arr2poly(p0, b[0]);
+ BN_GF2m_arr2poly(p1, b[1]);
+
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(a, 512, 0, 0);
+ for (j=0; j < 2; j++)
+ {
+ BN_GF2m_mod_inv(c, a, b[j], ctx);
+ BN_GF2m_mod_mul(d, a, c, b[j], ctx);
+#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp, " * ");
+ BN_print(bp,c);
+ BIO_puts(bp," - 1 % ");
+ BN_print(bp,b[j]);
+ BIO_puts(bp,"\n");
+ }
+ }
+#endif
+ /* Test that ((1/a)*a) = 1. */
+ if(!BN_is_one(d))
+ {
+ fprintf(stderr,"GF(2^m) modular inversion test failed!\n");
+ goto err;
+ }
+ }
+ }
+ ret = 1;
+ err:
+ BN_free(a);
+ BN_free(b[0]);
+ BN_free(b[1]);
+ BN_free(c);
+ BN_free(d);
+ return ret;
+ }
+
+int test_gf2m_mod_div(BIO *bp,BN_CTX *ctx)
+ {
+ BIGNUM *a,*b[2],*c,*d,*e,*f;
+ int i, j, ret = 0;
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
+
+ a=BN_new();
+ b[0]=BN_new();
+ b[1]=BN_new();
+ c=BN_new();
+ d=BN_new();
+ e=BN_new();
+ f=BN_new();
+
+ BN_GF2m_arr2poly(p0, b[0]);
+ BN_GF2m_arr2poly(p1, b[1]);
+
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(a, 512, 0, 0);
+ BN_bntest_rand(c, 512, 0, 0);
+ for (j=0; j < 2; j++)
+ {
+ BN_GF2m_mod_div(d, a, c, b[j], ctx);
+ BN_GF2m_mod_mul(e, d, c, b[j], ctx);
+ BN_GF2m_mod_div(f, a, e, b[j], ctx);
+#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp, " = ");
+ BN_print(bp,c);
+ BIO_puts(bp," * ");
+ BN_print(bp,d);
+ BIO_puts(bp, " % ");
+ BN_print(bp,b[j]);
+ BIO_puts(bp,"\n");
+ }
+ }
+#endif
+ /* Test that ((a/c)*c)/a = 1. */
+ if(!BN_is_one(f))
+ {
+ fprintf(stderr,"GF(2^m) modular division test failed!\n");
+ goto err;
+ }
+ }
+ }
+ ret = 1;
+ err:
+ BN_free(a);
+ BN_free(b[0]);
+ BN_free(b[1]);
+ BN_free(c);
+ BN_free(d);
+ BN_free(e);
+ BN_free(f);
+ return ret;
+ }
+
+int test_gf2m_mod_exp(BIO *bp,BN_CTX *ctx)
+ {
+ BIGNUM *a,*b[2],*c,*d,*e,*f;
+ int i, j, ret = 0;
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
+
+ a=BN_new();
+ b[0]=BN_new();
+ b[1]=BN_new();
+ c=BN_new();
+ d=BN_new();
+ e=BN_new();
+ f=BN_new();
+
+ BN_GF2m_arr2poly(p0, b[0]);
+ BN_GF2m_arr2poly(p1, b[1]);
+
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(a, 512, 0, 0);
+ BN_bntest_rand(c, 512, 0, 0);
+ BN_bntest_rand(d, 512, 0, 0);
+ for (j=0; j < 2; j++)
+ {
+ BN_GF2m_mod_exp(e, a, c, b[j], ctx);
+ BN_GF2m_mod_exp(f, a, d, b[j], ctx);
+ BN_GF2m_mod_mul(e, e, f, b[j], ctx);
+ BN_add(f, c, d);
+ BN_GF2m_mod_exp(f, a, f, b[j], ctx);
+#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp, " ^ (");
+ BN_print(bp,c);
+ BIO_puts(bp," + ");
+ BN_print(bp,d);
+ BIO_puts(bp, ") = ");
+ BN_print(bp,e);
+ BIO_puts(bp, "; - ");
+ BN_print(bp,f);
+ BIO_puts(bp, " % ");
+ BN_print(bp,b[j]);
+ BIO_puts(bp,"\n");
+ }
+ }
+#endif
+ BN_GF2m_add(f, e, f);
+ /* Test that a^(c+d)=a^c*a^d. */
+ if(!BN_is_zero(f))
+ {
+ fprintf(stderr,"GF(2^m) modular exponentiation test failed!\n");
+ goto err;
+ }
+ }
+ }
+ ret = 1;
+ err:
+ BN_free(a);
+ BN_free(b[0]);
+ BN_free(b[1]);
+ BN_free(c);
+ BN_free(d);
+ BN_free(e);
+ BN_free(f);
+ return ret;
+ }
+
+int test_gf2m_mod_sqrt(BIO *bp,BN_CTX *ctx)
+ {
+ BIGNUM *a,*b[2],*c,*d,*e,*f;
+ int i, j, ret = 0;
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
+
+ a=BN_new();
+ b[0]=BN_new();
+ b[1]=BN_new();
+ c=BN_new();
+ d=BN_new();
+ e=BN_new();
+ f=BN_new();
+
+ BN_GF2m_arr2poly(p0, b[0]);
+ BN_GF2m_arr2poly(p1, b[1]);
+
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(a, 512, 0, 0);
+ for (j=0; j < 2; j++)
+ {
+ BN_GF2m_mod(c, a, b[j]);
+ BN_GF2m_mod_sqrt(d, a, b[j], ctx);
+ BN_GF2m_mod_sqr(e, d, b[j], ctx);
+#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,d);
+ BIO_puts(bp, " ^ 2 - ");
+ BN_print(bp,a);
+ BIO_puts(bp,"\n");
+ }
+ }
+#endif
+ BN_GF2m_add(f, c, e);
+ /* Test that d^2 = a, where d = sqrt(a). */
+ if(!BN_is_zero(f))
+ {
+ fprintf(stderr,"GF(2^m) modular square root test failed!\n");
+ goto err;
+ }
+ }
+ }
+ ret = 1;
+ err:
+ BN_free(a);
+ BN_free(b[0]);
+ BN_free(b[1]);
+ BN_free(c);
+ BN_free(d);
+ BN_free(e);
+ BN_free(f);
+ return ret;
+ }
+
+int test_gf2m_mod_solve_quad(BIO *bp,BN_CTX *ctx)
+ {
+ BIGNUM *a,*b[2],*c,*d,*e;
+ int i, j, s = 0, t, ret = 0;
+ int p0[] = {163,7,6,3,0,-1};
+ int p1[] = {193,15,0,-1};
+
+ a=BN_new();
+ b[0]=BN_new();
+ b[1]=BN_new();
+ c=BN_new();
+ d=BN_new();
+ e=BN_new();
+
+ BN_GF2m_arr2poly(p0, b[0]);
+ BN_GF2m_arr2poly(p1, b[1]);
+
+ for (i=0; i<num0; i++)
+ {
+ BN_bntest_rand(a, 512, 0, 0);
+ for (j=0; j < 2; j++)
+ {
+ t = BN_GF2m_mod_solve_quad(c, a, b[j], ctx);
+ if (t)
+ {
+ s++;
+ BN_GF2m_mod_sqr(d, c, b[j], ctx);
+ BN_GF2m_add(d, c, d);
+ BN_GF2m_mod(e, a, b[j]);
+#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,c);
+ BIO_puts(bp, " is root of z^2 + z = ");
+ BN_print(bp,a);
+ BIO_puts(bp, " % ");
+ BN_print(bp,b[j]);
+ BIO_puts(bp, "\n");
+ }
+ }
+#endif
+ BN_GF2m_add(e, e, d);
+ /* Test that solution of quadratic c satisfies c^2 + c = a. */
+ if(!BN_is_zero(e))
+ {
+ fprintf(stderr,"GF(2^m) modular solve quadratic test failed!\n");
+ goto err;
+ }
+
+ }
+ else
+ {
+#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BIO_puts(bp, "There are no roots of z^2 + z = ");
+ BN_print(bp,a);
+ BIO_puts(bp, " % ");
+ BN_print(bp,b[j]);
+ BIO_puts(bp, "\n");
+ }
+ }
+#endif
+ }
+ }
+ }
+ if (s == 0)
+ {
+ fprintf(stderr,"All %i tests of GF(2^m) modular solve quadratic resulted in no roots;\n", num0);
+ fprintf(stderr,"this is very unlikely and probably indicates an error.\n");
+ goto err;
+ }
+ ret = 1;
+ err:
+ BN_free(a);
+ BN_free(b[0]);
+ BN_free(b[1]);
+ BN_free(c);
+ BN_free(d);
+ BN_free(e);
+ return ret;
+ }
+#endif
+static int genprime_cb(int p, int n, BN_GENCB *arg)
+ {
+ char c='*';
+
+ if (p == 0) c='.';
+ if (p == 1) c='+';
+ if (p == 2) c='*';
+ if (p == 3) c='\n';
+ putc(c, stderr);
+ fflush(stderr);
+ return 1;
+ }
+
+int test_kron(BIO *bp, BN_CTX *ctx)
+ {
+ BN_GENCB cb;
+ BIGNUM *a,*b,*r,*t;
+ int i;
+ int legendre, kronecker;
+ int ret = 0;
+
+ a = BN_new();
+ b = BN_new();
+ r = BN_new();
+ t = BN_new();
+ if (a == NULL || b == NULL || r == NULL || t == NULL) goto err;
+
+ BN_GENCB_set(&cb, genprime_cb, NULL);
+
+ /* We test BN_kronecker(a, b, ctx) just for b odd (Jacobi symbol).
+ * In this case we know that if b is prime, then BN_kronecker(a, b, ctx)
+ * is congruent to $a^{(b-1)/2}$, modulo $b$ (Legendre symbol).
+ * So we generate a random prime b and compare these values
+ * for a number of random a's. (That is, we run the Solovay-Strassen
+ * primality test to confirm that b is prime, except that we
+ * don't want to test whether b is prime but whether BN_kronecker
+ * works.) */
+
+ if (!BN_generate_prime_ex(b, 512, 0, NULL, NULL, &cb)) goto err;
+ b->neg = rand_neg();
+ putc('\n', stderr);
+
+ for (i = 0; i < num0; i++)
+ {
+ if (!BN_bntest_rand(a, 512, 0, 0)) goto err;
+ a->neg = rand_neg();
+
+ /* t := (|b|-1)/2 (note that b is odd) */
+ if (!BN_copy(t, b)) goto err;
+ t->neg = 0;
+ if (!BN_sub_word(t, 1)) goto err;
+ if (!BN_rshift1(t, t)) goto err;
+ /* r := a^t mod b */
+ b->neg=0;
+
+ if (!BN_mod_exp_recp(r, a, t, b, ctx)) goto err;
+ b->neg=1;
+
+ if (BN_is_word(r, 1))
+ legendre = 1;
+ else if (BN_is_zero(r))
+ legendre = 0;
+ else
+ {
+ if (!BN_add_word(r, 1)) goto err;
+ if (0 != BN_ucmp(r, b))
+ {
+ fprintf(stderr, "Legendre symbol computation failed\n");
+ goto err;
+ }
+ legendre = -1;
+ }
+
+ kronecker = BN_kronecker(a, b, ctx);
+ if (kronecker < -1) goto err;
+ /* we actually need BN_kronecker(a, |b|) */
+ if (a->neg && b->neg)
+ kronecker = -kronecker;
+
+ if (legendre != kronecker)
+ {
+ fprintf(stderr, "legendre != kronecker; a = ");
+ BN_print_fp(stderr, a);
+ fprintf(stderr, ", b = ");
+ BN_print_fp(stderr, b);
+ fprintf(stderr, "\n");
+ goto err;
+ }
+
+ putc('.', stderr);
+ fflush(stderr);
+ }
+
+ putc('\n', stderr);
+ fflush(stderr);
+ ret = 1;
+ err:
+ if (a != NULL) BN_free(a);
+ if (b != NULL) BN_free(b);
+ if (r != NULL) BN_free(r);
+ if (t != NULL) BN_free(t);
+ return ret;
+ }
+
+int test_sqrt(BIO *bp, BN_CTX *ctx)
+ {
+ BN_GENCB cb;
+ BIGNUM *a,*p,*r;
+ int i, j;
+ int ret = 0;
+
+ a = BN_new();
+ p = BN_new();
+ r = BN_new();
+ if (a == NULL || p == NULL || r == NULL) goto err;
+
+ BN_GENCB_set(&cb, genprime_cb, NULL);
+
+ for (i = 0; i < 16; i++)
+ {
+ if (i < 8)
+ {
+ unsigned primes[8] = { 2, 3, 5, 7, 11, 13, 17, 19 };
+
+ if (!BN_set_word(p, primes[i])) goto err;
+ }
+ else
+ {
+ if (!BN_set_word(a, 32)) goto err;
+ if (!BN_set_word(r, 2*i + 1)) goto err;
+
+ if (!BN_generate_prime_ex(p, 256, 0, a, r, &cb)) goto err;
+ putc('\n', stderr);
+ }
+ p->neg = rand_neg();
+
+ for (j = 0; j < num2; j++)
+ {
+ /* construct 'a' such that it is a square modulo p,
+ * but in general not a proper square and not reduced modulo p */
+ if (!BN_bntest_rand(r, 256, 0, 3)) goto err;
+ if (!BN_nnmod(r, r, p, ctx)) goto err;
+ if (!BN_mod_sqr(r, r, p, ctx)) goto err;
+ if (!BN_bntest_rand(a, 256, 0, 3)) goto err;
+ if (!BN_nnmod(a, a, p, ctx)) goto err;
+ if (!BN_mod_sqr(a, a, p, ctx)) goto err;
+ if (!BN_mul(a, a, r, ctx)) goto err;
+ if (rand_neg())
+ if (!BN_sub(a, a, p)) goto err;
+
+ if (!BN_mod_sqrt(r, a, p, ctx)) goto err;
+ if (!BN_mod_sqr(r, r, p, ctx)) goto err;
+
+ if (!BN_nnmod(a, a, p, ctx)) goto err;
+
+ if (BN_cmp(a, r) != 0)
+ {
+ fprintf(stderr, "BN_mod_sqrt failed: a = ");
+ BN_print_fp(stderr, a);
+ fprintf(stderr, ", r = ");
+ BN_print_fp(stderr, r);
+ fprintf(stderr, ", p = ");
+ BN_print_fp(stderr, p);
+ fprintf(stderr, "\n");
+ goto err;
+ }
+
+ putc('.', stderr);
+ fflush(stderr);
+ }
+
+ putc('\n', stderr);
+ fflush(stderr);
+ }
+ ret = 1;
+ err:
+ if (a != NULL) BN_free(a);
+ if (p != NULL) BN_free(p);
+ if (r != NULL) BN_free(r);
+ return ret;
+ }
+
+int test_lshift(BIO *bp,BN_CTX *ctx,BIGNUM *a_)
+ {
+ BIGNUM *a,*b,*c,*d;
+ int i;
+
+ b=BN_new();
+ c=BN_new();
+ d=BN_new();
+ BN_one(c);
+
+ if(a_)
+ a=a_;
+ else
+ {
+ a=BN_new();
+ BN_bntest_rand(a,200,0,0); /**/
+ a->neg=rand_neg();
+ }
+ for (i=0; i<num0; i++)
+ {
+ BN_lshift(b,a,i+1);
+ BN_add(c,c,c);
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," * ");
+ BN_print(bp,c);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,b);
+ BIO_puts(bp,"\n");
+ }
+ BN_mul(d,a,c,ctx);
+ BN_sub(d,d,b);
+ if(!BN_is_zero(d))
+ {
+ fprintf(stderr,"Left shift test failed!\n");
+ fprintf(stderr,"a=");
+ BN_print_fp(stderr,a);
+ fprintf(stderr,"\nb=");
+ BN_print_fp(stderr,b);
+ fprintf(stderr,"\nc=");
+ BN_print_fp(stderr,c);
+ fprintf(stderr,"\nd=");
+ BN_print_fp(stderr,d);
+ fprintf(stderr,"\n");
+ return 0;
+ }
+ }
+ BN_free(a);
+ BN_free(b);
+ BN_free(c);
+ BN_free(d);
+ return(1);
+ }
+
+int test_lshift1(BIO *bp)
+ {
+ BIGNUM *a,*b,*c;
+ int i;
+
+ a=BN_new();
+ b=BN_new();
+ c=BN_new();
+
+ BN_bntest_rand(a,200,0,0); /**/
+ a->neg=rand_neg();
+ for (i=0; i<num0; i++)
+ {
+ BN_lshift1(b,a);
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," * 2");
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,b);
+ BIO_puts(bp,"\n");
+ }
+ BN_add(c,a,a);
+ BN_sub(a,b,c);
+ if(!BN_is_zero(a))
+ {
+ fprintf(stderr,"Left shift one test failed!\n");
+ return 0;
+ }
+
+ BN_copy(a,b);
+ }
+ BN_free(a);
+ BN_free(b);
+ BN_free(c);
+ return(1);
+ }
+
+int test_rshift(BIO *bp,BN_CTX *ctx)
+ {
+ BIGNUM *a,*b,*c,*d,*e;
+ int i;
+
+ a=BN_new();
+ b=BN_new();
+ c=BN_new();
+ d=BN_new();
+ e=BN_new();
+ BN_one(c);
+
+ BN_bntest_rand(a,200,0,0); /**/
+ a->neg=rand_neg();
+ for (i=0; i<num0; i++)
+ {
+ BN_rshift(b,a,i+1);
+ BN_add(c,c,c);
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," / ");
+ BN_print(bp,c);
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,b);
+ BIO_puts(bp,"\n");
+ }
+ BN_div(d,e,a,c,ctx);
+ BN_sub(d,d,b);
+ if(!BN_is_zero(d))
+ {
+ fprintf(stderr,"Right shift test failed!\n");
+ return 0;
+ }
+ }
+ BN_free(a);
+ BN_free(b);
+ BN_free(c);
+ BN_free(d);
+ BN_free(e);
+ return(1);
+ }
+
+int test_rshift1(BIO *bp)
+ {
+ BIGNUM *a,*b,*c;
+ int i;
+
+ a=BN_new();
+ b=BN_new();
+ c=BN_new();
+
+ BN_bntest_rand(a,200,0,0); /**/
+ a->neg=rand_neg();
+ for (i=0; i<num0; i++)
+ {
+ BN_rshift1(b,a);
+ if (bp != NULL)
+ {
+ if (!results)
+ {
+ BN_print(bp,a);
+ BIO_puts(bp," / 2");
+ BIO_puts(bp," - ");
+ }
+ BN_print(bp,b);
+ BIO_puts(bp,"\n");
+ }
+ BN_sub(c,a,b);
+ BN_sub(c,c,b);
+ if(!BN_is_zero(c) && !BN_abs_is_word(c, 1))
+ {
+ fprintf(stderr,"Right shift one test failed!\n");
+ return 0;
+ }
+ BN_copy(a,b);
+ }
+ BN_free(a);
+ BN_free(b);
+ BN_free(c);
+ return(1);
+ }
+
+int rand_neg(void)
+ {
+ static unsigned int neg=0;
+ static int sign[8]={0,0,0,1,1,0,1,1};
+
+ return(sign[(neg++)%8]);
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/divtest.c b/ics-openvpn-stripped/main/openssl/crypto/bn/divtest.c
new file mode 100644
index 00000000..d3fc688f
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/divtest.c
@@ -0,0 +1,41 @@
+#include <openssl/bn.h>
+#include <openssl/rand.h>
+
+static int Rand(n)
+{
+ unsigned char x[2];
+ RAND_pseudo_bytes(x,2);
+ return (x[0] + 2*x[1]);
+}
+
+static void bug(char *m, BIGNUM *a, BIGNUM *b)
+{
+ printf("%s!\na=",m);
+ BN_print_fp(stdout, a);
+ printf("\nb=");
+ BN_print_fp(stdout, b);
+ printf("\n");
+ fflush(stdout);
+}
+
+main()
+{
+ BIGNUM *a=BN_new(), *b=BN_new(), *c=BN_new(), *d=BN_new(),
+ *C=BN_new(), *D=BN_new();
+ BN_RECP_CTX *recp=BN_RECP_CTX_new();
+ BN_CTX *ctx=BN_CTX_new();
+
+ for(;;) {
+ BN_pseudo_rand(a,Rand(),0,0);
+ BN_pseudo_rand(b,Rand(),0,0);
+ if (BN_is_zero(b)) continue;
+
+ BN_RECP_CTX_set(recp,b,ctx);
+ if (BN_div(C,D,a,b,ctx) != 1)
+ bug("BN_div failed",a,b);
+ if (BN_div_recp(c,d,a,recp,ctx) != 1)
+ bug("BN_div_recp failed",a,b);
+ else if (BN_cmp(c,C) != 0 || BN_cmp(c,C) != 0)
+ bug("mismatch",a,b);
+ }
+}
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/exp.c b/ics-openvpn-stripped/main/openssl/crypto/bn/exp.c
new file mode 100644
index 00000000..4865b0ef
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/exp.c
@@ -0,0 +1,62 @@
+/* unused */
+
+#include <stdio.h>
+#include <openssl/tmdiff.h>
+#include "bn_lcl.h"
+
+#define SIZE 256
+#define NUM (8*8*8)
+#define MOD (8*8*8*8*8)
+
+main(argc,argv)
+int argc;
+char *argv[];
+ {
+ BN_CTX ctx;
+ BIGNUM a,b,c,r,rr,t,l;
+ int j,i,size=SIZE,num=NUM,mod=MOD;
+ char *start,*end;
+ BN_MONT_CTX mont;
+ double d,md;
+
+ BN_MONT_CTX_init(&mont);
+ BN_CTX_init(&ctx);
+ BN_init(&a);
+ BN_init(&b);
+ BN_init(&c);
+ BN_init(&r);
+
+ start=ms_time_new();
+ end=ms_time_new();
+ while (size <= 1024*8)
+ {
+ BN_rand(&a,size,0,0);
+ BN_rand(&b,size,1,0);
+ BN_rand(&c,size,0,1);
+
+ BN_mod(&a,&a,&c,&ctx);
+
+ ms_time_get(start);
+ for (i=0; i<10; i++)
+ BN_MONT_CTX_set(&mont,&c,&ctx);
+ ms_time_get(end);
+ md=ms_time_diff(start,end);
+
+ ms_time_get(start);
+ for (i=0; i<num; i++)
+ {
+ /* bn_mull(&r,&a,&b,&ctx); */
+ /* BN_sqr(&r,&a,&ctx); */
+ BN_mod_exp_mont(&r,&a,&b,&c,&ctx,&mont);
+ }
+ ms_time_get(end);
+ d=ms_time_diff(start,end)/* *50/33 */;
+ printf("%5d bit:%6.2f %6d %6.4f %4d m_set(%5.4f)\n",size,
+ d,num,d/num,(int)((d/num)*mod),md/10.0);
+ num/=8;
+ mod/=8;
+ if (num <= 0) num=1;
+ size*=2;
+ }
+
+ }
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/expspeed.c b/ics-openvpn-stripped/main/openssl/crypto/bn/expspeed.c
new file mode 100644
index 00000000..4d5f221f
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/expspeed.c
@@ -0,0 +1,353 @@
+/* unused */
+
+/* crypto/bn/expspeed.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+/* most of this code has been pilfered from my libdes speed.c program */
+
+#define BASENUM 5000
+#define NUM_START 0
+
+
+/* determine timings for modexp, modmul, modsqr, gcd, Kronecker symbol,
+ * modular inverse, or modular square roots */
+#define TEST_EXP
+#undef TEST_MUL
+#undef TEST_SQR
+#undef TEST_GCD
+#undef TEST_KRON
+#undef TEST_INV
+#undef TEST_SQRT
+#define P_MOD_64 9 /* least significant 6 bits for prime to be used for BN_sqrt timings */
+
+#if defined(TEST_EXP) + defined(TEST_MUL) + defined(TEST_SQR) + defined(TEST_GCD) + defined(TEST_KRON) + defined(TEST_INV) +defined(TEST_SQRT) != 1
+# error "choose one test"
+#endif
+
+#if defined(TEST_INV) || defined(TEST_SQRT)
+# define C_PRIME
+static void genprime_cb(int p, int n, void *arg);
+#endif
+
+
+
+#undef PROG
+#define PROG bnspeed_main
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <openssl/crypto.h>
+#include <openssl/err.h>
+#include <openssl/rand.h>
+
+#if !defined(OPENSSL_SYS_MSDOS) && (!defined(OPENSSL_SYS_VMS) || defined(__DECC)) && !defined(OPENSSL_SYS_MACOSX)
+#define TIMES
+#endif
+
+#ifndef _IRIX
+#include <time.h>
+#endif
+#ifdef TIMES
+#include <sys/types.h>
+#include <sys/times.h>
+#endif
+
+/* Depending on the VMS version, the tms structure is perhaps defined.
+ The __TMS macro will show if it was. If it wasn't defined, we should
+ undefine TIMES, since that tells the rest of the program how things
+ should be handled. -- Richard Levitte */
+#if defined(OPENSSL_SYS_VMS_DECC) && !defined(__TMS)
+#undef TIMES
+#endif
+
+#ifndef TIMES
+#include <sys/timeb.h>
+#endif
+
+#if defined(sun) || defined(__ultrix)
+#define _POSIX_SOURCE
+#include <limits.h>
+#include <sys/param.h>
+#endif
+
+#include <openssl/bn.h>
+#include <openssl/x509.h>
+
+/* The following if from times(3) man page. It may need to be changed */
+#ifndef HZ
+# ifndef CLK_TCK
+# ifndef _BSD_CLK_TCK_ /* FreeBSD hack */
+# define HZ 100.0
+# else /* _BSD_CLK_TCK_ */
+# define HZ ((double)_BSD_CLK_TCK_)
+# endif
+# else /* CLK_TCK */
+# define HZ ((double)CLK_TCK)
+# endif
+#endif
+
+#undef BUFSIZE
+#define BUFSIZE ((long)1024*8)
+int run=0;
+
+static double Time_F(int s);
+#define START 0
+#define STOP 1
+
+static double Time_F(int s)
+ {
+ double ret;
+#ifdef TIMES
+ static struct tms tstart,tend;
+
+ if (s == START)
+ {
+ times(&tstart);
+ return(0);
+ }
+ else
+ {
+ times(&tend);
+ ret=((double)(tend.tms_utime-tstart.tms_utime))/HZ;
+ return((ret < 1e-3)?1e-3:ret);
+ }
+#else /* !times() */
+ static struct timeb tstart,tend;
+ long i;
+
+ if (s == START)
+ {
+ ftime(&tstart);
+ return(0);
+ }
+ else
+ {
+ ftime(&tend);
+ i=(long)tend.millitm-(long)tstart.millitm;
+ ret=((double)(tend.time-tstart.time))+((double)i)/1000.0;
+ return((ret < 0.001)?0.001:ret);
+ }
+#endif
+ }
+
+#define NUM_SIZES 7
+#if NUM_START > NUM_SIZES
+# error "NUM_START > NUM_SIZES"
+#endif
+static int sizes[NUM_SIZES]={128,256,512,1024,2048,4096,8192};
+static int mul_c[NUM_SIZES]={8*8*8*8*8*8,8*8*8*8*8,8*8*8*8,8*8*8,8*8,8,1};
+/*static int sizes[NUM_SIZES]={59,179,299,419,539}; */
+
+#define RAND_SEED(string) { const char str[] = string; RAND_seed(string, sizeof str); }
+
+void do_mul_exp(BIGNUM *r,BIGNUM *a,BIGNUM *b,BIGNUM *c,BN_CTX *ctx);
+
+int main(int argc, char **argv)
+ {
+ BN_CTX *ctx;
+ BIGNUM *a,*b,*c,*r;
+
+#if 1
+ if (!CRYPTO_set_mem_debug_functions(0,0,0,0,0))
+ abort();
+#endif
+
+ ctx=BN_CTX_new();
+ a=BN_new();
+ b=BN_new();
+ c=BN_new();
+ r=BN_new();
+
+ while (!RAND_status())
+ /* not enough bits */
+ RAND_SEED("I demand a manual recount!");
+
+ do_mul_exp(r,a,b,c,ctx);
+ return 0;
+ }
+
+void do_mul_exp(BIGNUM *r, BIGNUM *a, BIGNUM *b, BIGNUM *c, BN_CTX *ctx)
+ {
+ int i,k;
+ double tm;
+ long num;
+
+ num=BASENUM;
+ for (i=NUM_START; i<NUM_SIZES; i++)
+ {
+#ifdef C_PRIME
+# ifdef TEST_SQRT
+ if (!BN_set_word(a, 64)) goto err;
+ if (!BN_set_word(b, P_MOD_64)) goto err;
+# define ADD a
+# define REM b
+# else
+# define ADD NULL
+# define REM NULL
+# endif
+ if (!BN_generate_prime(c,sizes[i],0,ADD,REM,genprime_cb,NULL)) goto err;
+ putc('\n', stderr);
+ fflush(stderr);
+#endif
+
+ for (k=0; k<num; k++)
+ {
+ if (k%50 == 0) /* Average over num/50 different choices of random numbers. */
+ {
+ if (!BN_pseudo_rand(a,sizes[i],1,0)) goto err;
+
+ if (!BN_pseudo_rand(b,sizes[i],1,0)) goto err;
+
+#ifndef C_PRIME
+ if (!BN_pseudo_rand(c,sizes[i],1,1)) goto err;
+#endif
+
+#ifdef TEST_SQRT
+ if (!BN_mod_sqr(a,a,c,ctx)) goto err;
+ if (!BN_mod_sqr(b,b,c,ctx)) goto err;
+#else
+ if (!BN_nnmod(a,a,c,ctx)) goto err;
+ if (!BN_nnmod(b,b,c,ctx)) goto err;
+#endif
+
+ if (k == 0)
+ Time_F(START);
+ }
+
+#if defined(TEST_EXP)
+ if (!BN_mod_exp(r,a,b,c,ctx)) goto err;
+#elif defined(TEST_MUL)
+ {
+ int i = 0;
+ for (i = 0; i < 50; i++)
+ if (!BN_mod_mul(r,a,b,c,ctx)) goto err;
+ }
+#elif defined(TEST_SQR)
+ {
+ int i = 0;
+ for (i = 0; i < 50; i++)
+ {
+ if (!BN_mod_sqr(r,a,c,ctx)) goto err;
+ if (!BN_mod_sqr(r,b,c,ctx)) goto err;
+ }
+ }
+#elif defined(TEST_GCD)
+ if (!BN_gcd(r,a,b,ctx)) goto err;
+ if (!BN_gcd(r,b,c,ctx)) goto err;
+ if (!BN_gcd(r,c,a,ctx)) goto err;
+#elif defined(TEST_KRON)
+ if (-2 == BN_kronecker(a,b,ctx)) goto err;
+ if (-2 == BN_kronecker(b,c,ctx)) goto err;
+ if (-2 == BN_kronecker(c,a,ctx)) goto err;
+#elif defined(TEST_INV)
+ if (!BN_mod_inverse(r,a,c,ctx)) goto err;
+ if (!BN_mod_inverse(r,b,c,ctx)) goto err;
+#else /* TEST_SQRT */
+ if (!BN_mod_sqrt(r,a,c,ctx)) goto err;
+ if (!BN_mod_sqrt(r,b,c,ctx)) goto err;
+#endif
+ }
+ tm=Time_F(STOP);
+ printf(
+#if defined(TEST_EXP)
+ "modexp %4d ^ %4d %% %4d"
+#elif defined(TEST_MUL)
+ "50*modmul %4d %4d %4d"
+#elif defined(TEST_SQR)
+ "100*modsqr %4d %4d %4d"
+#elif defined(TEST_GCD)
+ "3*gcd %4d %4d %4d"
+#elif defined(TEST_KRON)
+ "3*kronecker %4d %4d %4d"
+#elif defined(TEST_INV)
+ "2*inv %4d %4d mod %4d"
+#else /* TEST_SQRT */
+ "2*sqrt [prime == %d (mod 64)] %4d %4d mod %4d"
+#endif
+ " -> %8.6fms %5.1f (%ld)\n",
+#ifdef TEST_SQRT
+ P_MOD_64,
+#endif
+ sizes[i],sizes[i],sizes[i],tm*1000.0/num,tm*mul_c[i]/num, num);
+ num/=7;
+ if (num <= 0) num=1;
+ }
+ return;
+
+ err:
+ ERR_print_errors_fp(stderr);
+ }
+
+
+#ifdef C_PRIME
+static void genprime_cb(int p, int n, void *arg)
+ {
+ char c='*';
+
+ if (p == 0) c='.';
+ if (p == 1) c='+';
+ if (p == 2) c='*';
+ if (p == 3) c='\n';
+ putc(c, stderr);
+ fflush(stderr);
+ (void)n;
+ (void)arg;
+ }
+#endif
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/exptest.c b/ics-openvpn-stripped/main/openssl/crypto/bn/exptest.c
new file mode 100644
index 00000000..074a8e88
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/exptest.c
@@ -0,0 +1,204 @@
+/* crypto/bn/exptest.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../e_os.h"
+
+#include <openssl/bio.h>
+#include <openssl/bn.h>
+#include <openssl/rand.h>
+#include <openssl/err.h>
+
+#define NUM_BITS (BN_BITS*2)
+
+static const char rnd_seed[] = "string to make the random number generator think it has entropy";
+
+int main(int argc, char *argv[])
+ {
+ BN_CTX *ctx;
+ BIO *out=NULL;
+ int i,ret;
+ unsigned char c;
+ BIGNUM *r_mont,*r_mont_const,*r_recp,*r_simple,*a,*b,*m;
+
+ RAND_seed(rnd_seed, sizeof rnd_seed); /* or BN_rand may fail, and we don't
+ * even check its return value
+ * (which we should) */
+
+ ERR_load_BN_strings();
+
+ ctx=BN_CTX_new();
+ if (ctx == NULL) EXIT(1);
+ r_mont=BN_new();
+ r_mont_const=BN_new();
+ r_recp=BN_new();
+ r_simple=BN_new();
+ a=BN_new();
+ b=BN_new();
+ m=BN_new();
+ if ( (r_mont == NULL) || (r_recp == NULL) ||
+ (a == NULL) || (b == NULL))
+ goto err;
+
+ out=BIO_new(BIO_s_file());
+
+ if (out == NULL) EXIT(1);
+ BIO_set_fp(out,stdout,BIO_NOCLOSE);
+
+ for (i=0; i<200; i++)
+ {
+ RAND_bytes(&c,1);
+ c=(c%BN_BITS)-BN_BITS2;
+ BN_rand(a,NUM_BITS+c,0,0);
+
+ RAND_bytes(&c,1);
+ c=(c%BN_BITS)-BN_BITS2;
+ BN_rand(b,NUM_BITS+c,0,0);
+
+ RAND_bytes(&c,1);
+ c=(c%BN_BITS)-BN_BITS2;
+ BN_rand(m,NUM_BITS+c,0,1);
+
+ BN_mod(a,a,m,ctx);
+ BN_mod(b,b,m,ctx);
+
+ ret=BN_mod_exp_mont(r_mont,a,b,m,ctx,NULL);
+ if (ret <= 0)
+ {
+ printf("BN_mod_exp_mont() problems\n");
+ ERR_print_errors(out);
+ EXIT(1);
+ }
+
+ ret=BN_mod_exp_recp(r_recp,a,b,m,ctx);
+ if (ret <= 0)
+ {
+ printf("BN_mod_exp_recp() problems\n");
+ ERR_print_errors(out);
+ EXIT(1);
+ }
+
+ ret=BN_mod_exp_simple(r_simple,a,b,m,ctx);
+ if (ret <= 0)
+ {
+ printf("BN_mod_exp_simple() problems\n");
+ ERR_print_errors(out);
+ EXIT(1);
+ }
+
+ ret=BN_mod_exp_mont_consttime(r_mont_const,a,b,m,ctx,NULL);
+ if (ret <= 0)
+ {
+ printf("BN_mod_exp_mont_consttime() problems\n");
+ ERR_print_errors(out);
+ EXIT(1);
+ }
+
+ if (BN_cmp(r_simple, r_mont) == 0
+ && BN_cmp(r_simple,r_recp) == 0
+ && BN_cmp(r_simple,r_mont_const) == 0)
+ {
+ printf(".");
+ fflush(stdout);
+ }
+ else
+ {
+ if (BN_cmp(r_simple,r_mont) != 0)
+ printf("\nsimple and mont results differ\n");
+ if (BN_cmp(r_simple,r_mont_const) != 0)
+ printf("\nsimple and mont const time results differ\n");
+ if (BN_cmp(r_simple,r_recp) != 0)
+ printf("\nsimple and recp results differ\n");
+
+ printf("a (%3d) = ",BN_num_bits(a)); BN_print(out,a);
+ printf("\nb (%3d) = ",BN_num_bits(b)); BN_print(out,b);
+ printf("\nm (%3d) = ",BN_num_bits(m)); BN_print(out,m);
+ printf("\nsimple ="); BN_print(out,r_simple);
+ printf("\nrecp ="); BN_print(out,r_recp);
+ printf("\nmont ="); BN_print(out,r_mont);
+ printf("\nmont_ct ="); BN_print(out,r_mont_const);
+ printf("\n");
+ EXIT(1);
+ }
+ }
+ BN_free(r_mont);
+ BN_free(r_mont_const);
+ BN_free(r_recp);
+ BN_free(r_simple);
+ BN_free(a);
+ BN_free(b);
+ BN_free(m);
+ BN_CTX_free(ctx);
+ ERR_remove_thread_state(NULL);
+ CRYPTO_mem_leaks(out);
+ BIO_free(out);
+ printf(" done\n");
+ EXIT(0);
+err:
+ ERR_load_crypto_strings();
+ ERR_print_errors(out);
+#ifdef OPENSSL_SYS_NETWARE
+ printf("ERROR\n");
+#endif
+ EXIT(1);
+ return(1);
+ }
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/bn/todo b/ics-openvpn-stripped/main/openssl/crypto/bn/todo
new file mode 100644
index 00000000..e47e381a
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/bn/todo
@@ -0,0 +1,3 @@
+Cache RECP_CTX values
+make the result argument independant of the inputs.
+split up the _exp_ functions