summaryrefslogtreecommitdiff
path: root/ics-openvpn-stripped/main/openssl/crypto/md5/asm
diff options
context:
space:
mode:
Diffstat (limited to 'ics-openvpn-stripped/main/openssl/crypto/md5/asm')
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-586.S679
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-586.pl307
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-ia64.S992
-rw-r--r--ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-x86_64.S668
-rwxr-xr-xics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-x86_64.pl370
5 files changed, 3016 insertions, 0 deletions
diff --git a/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-586.S b/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-586.S
new file mode 100644
index 00000000..23e4de7d
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-586.S
@@ -0,0 +1,679 @@
+.file "crypto/md5/asm/md5-586.s"
+.text
+.globl md5_block_asm_data_order
+.type md5_block_asm_data_order,@function
+.align 16
+md5_block_asm_data_order:
+.L_md5_block_asm_data_order_begin:
+ pushl %esi
+ pushl %edi
+ movl 12(%esp),%edi
+ movl 16(%esp),%esi
+ movl 20(%esp),%ecx
+ pushl %ebp
+ shll $6,%ecx
+ pushl %ebx
+ addl %esi,%ecx
+ subl $64,%ecx
+ movl (%edi),%eax
+ pushl %ecx
+ movl 4(%edi),%ebx
+ movl 8(%edi),%ecx
+ movl 12(%edi),%edx
+.L000start:
+
+
+ movl %ecx,%edi
+ movl (%esi),%ebp
+
+ xorl %edx,%edi
+ andl %ebx,%edi
+ leal 3614090360(%eax,%ebp,1),%eax
+ xorl %edx,%edi
+ addl %edi,%eax
+ movl %ebx,%edi
+ roll $7,%eax
+ movl 4(%esi),%ebp
+ addl %ebx,%eax
+
+ xorl %ecx,%edi
+ andl %eax,%edi
+ leal 3905402710(%edx,%ebp,1),%edx
+ xorl %ecx,%edi
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $12,%edx
+ movl 8(%esi),%ebp
+ addl %eax,%edx
+
+ xorl %ebx,%edi
+ andl %edx,%edi
+ leal 606105819(%ecx,%ebp,1),%ecx
+ xorl %ebx,%edi
+ addl %edi,%ecx
+ movl %edx,%edi
+ roll $17,%ecx
+ movl 12(%esi),%ebp
+ addl %edx,%ecx
+
+ xorl %eax,%edi
+ andl %ecx,%edi
+ leal 3250441966(%ebx,%ebp,1),%ebx
+ xorl %eax,%edi
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $22,%ebx
+ movl 16(%esi),%ebp
+ addl %ecx,%ebx
+
+ xorl %edx,%edi
+ andl %ebx,%edi
+ leal 4118548399(%eax,%ebp,1),%eax
+ xorl %edx,%edi
+ addl %edi,%eax
+ movl %ebx,%edi
+ roll $7,%eax
+ movl 20(%esi),%ebp
+ addl %ebx,%eax
+
+ xorl %ecx,%edi
+ andl %eax,%edi
+ leal 1200080426(%edx,%ebp,1),%edx
+ xorl %ecx,%edi
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $12,%edx
+ movl 24(%esi),%ebp
+ addl %eax,%edx
+
+ xorl %ebx,%edi
+ andl %edx,%edi
+ leal 2821735955(%ecx,%ebp,1),%ecx
+ xorl %ebx,%edi
+ addl %edi,%ecx
+ movl %edx,%edi
+ roll $17,%ecx
+ movl 28(%esi),%ebp
+ addl %edx,%ecx
+
+ xorl %eax,%edi
+ andl %ecx,%edi
+ leal 4249261313(%ebx,%ebp,1),%ebx
+ xorl %eax,%edi
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $22,%ebx
+ movl 32(%esi),%ebp
+ addl %ecx,%ebx
+
+ xorl %edx,%edi
+ andl %ebx,%edi
+ leal 1770035416(%eax,%ebp,1),%eax
+ xorl %edx,%edi
+ addl %edi,%eax
+ movl %ebx,%edi
+ roll $7,%eax
+ movl 36(%esi),%ebp
+ addl %ebx,%eax
+
+ xorl %ecx,%edi
+ andl %eax,%edi
+ leal 2336552879(%edx,%ebp,1),%edx
+ xorl %ecx,%edi
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $12,%edx
+ movl 40(%esi),%ebp
+ addl %eax,%edx
+
+ xorl %ebx,%edi
+ andl %edx,%edi
+ leal 4294925233(%ecx,%ebp,1),%ecx
+ xorl %ebx,%edi
+ addl %edi,%ecx
+ movl %edx,%edi
+ roll $17,%ecx
+ movl 44(%esi),%ebp
+ addl %edx,%ecx
+
+ xorl %eax,%edi
+ andl %ecx,%edi
+ leal 2304563134(%ebx,%ebp,1),%ebx
+ xorl %eax,%edi
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $22,%ebx
+ movl 48(%esi),%ebp
+ addl %ecx,%ebx
+
+ xorl %edx,%edi
+ andl %ebx,%edi
+ leal 1804603682(%eax,%ebp,1),%eax
+ xorl %edx,%edi
+ addl %edi,%eax
+ movl %ebx,%edi
+ roll $7,%eax
+ movl 52(%esi),%ebp
+ addl %ebx,%eax
+
+ xorl %ecx,%edi
+ andl %eax,%edi
+ leal 4254626195(%edx,%ebp,1),%edx
+ xorl %ecx,%edi
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $12,%edx
+ movl 56(%esi),%ebp
+ addl %eax,%edx
+
+ xorl %ebx,%edi
+ andl %edx,%edi
+ leal 2792965006(%ecx,%ebp,1),%ecx
+ xorl %ebx,%edi
+ addl %edi,%ecx
+ movl %edx,%edi
+ roll $17,%ecx
+ movl 60(%esi),%ebp
+ addl %edx,%ecx
+
+ xorl %eax,%edi
+ andl %ecx,%edi
+ leal 1236535329(%ebx,%ebp,1),%ebx
+ xorl %eax,%edi
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $22,%ebx
+ movl 4(%esi),%ebp
+ addl %ecx,%ebx
+
+
+
+ leal 4129170786(%eax,%ebp,1),%eax
+ xorl %ebx,%edi
+ andl %edx,%edi
+ movl 24(%esi),%ebp
+ xorl %ecx,%edi
+ addl %edi,%eax
+ movl %ebx,%edi
+ roll $5,%eax
+ addl %ebx,%eax
+
+ leal 3225465664(%edx,%ebp,1),%edx
+ xorl %eax,%edi
+ andl %ecx,%edi
+ movl 44(%esi),%ebp
+ xorl %ebx,%edi
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $9,%edx
+ addl %eax,%edx
+
+ leal 643717713(%ecx,%ebp,1),%ecx
+ xorl %edx,%edi
+ andl %ebx,%edi
+ movl (%esi),%ebp
+ xorl %eax,%edi
+ addl %edi,%ecx
+ movl %edx,%edi
+ roll $14,%ecx
+ addl %edx,%ecx
+
+ leal 3921069994(%ebx,%ebp,1),%ebx
+ xorl %ecx,%edi
+ andl %eax,%edi
+ movl 20(%esi),%ebp
+ xorl %edx,%edi
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $20,%ebx
+ addl %ecx,%ebx
+
+ leal 3593408605(%eax,%ebp,1),%eax
+ xorl %ebx,%edi
+ andl %edx,%edi
+ movl 40(%esi),%ebp
+ xorl %ecx,%edi
+ addl %edi,%eax
+ movl %ebx,%edi
+ roll $5,%eax
+ addl %ebx,%eax
+
+ leal 38016083(%edx,%ebp,1),%edx
+ xorl %eax,%edi
+ andl %ecx,%edi
+ movl 60(%esi),%ebp
+ xorl %ebx,%edi
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $9,%edx
+ addl %eax,%edx
+
+ leal 3634488961(%ecx,%ebp,1),%ecx
+ xorl %edx,%edi
+ andl %ebx,%edi
+ movl 16(%esi),%ebp
+ xorl %eax,%edi
+ addl %edi,%ecx
+ movl %edx,%edi
+ roll $14,%ecx
+ addl %edx,%ecx
+
+ leal 3889429448(%ebx,%ebp,1),%ebx
+ xorl %ecx,%edi
+ andl %eax,%edi
+ movl 36(%esi),%ebp
+ xorl %edx,%edi
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $20,%ebx
+ addl %ecx,%ebx
+
+ leal 568446438(%eax,%ebp,1),%eax
+ xorl %ebx,%edi
+ andl %edx,%edi
+ movl 56(%esi),%ebp
+ xorl %ecx,%edi
+ addl %edi,%eax
+ movl %ebx,%edi
+ roll $5,%eax
+ addl %ebx,%eax
+
+ leal 3275163606(%edx,%ebp,1),%edx
+ xorl %eax,%edi
+ andl %ecx,%edi
+ movl 12(%esi),%ebp
+ xorl %ebx,%edi
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $9,%edx
+ addl %eax,%edx
+
+ leal 4107603335(%ecx,%ebp,1),%ecx
+ xorl %edx,%edi
+ andl %ebx,%edi
+ movl 32(%esi),%ebp
+ xorl %eax,%edi
+ addl %edi,%ecx
+ movl %edx,%edi
+ roll $14,%ecx
+ addl %edx,%ecx
+
+ leal 1163531501(%ebx,%ebp,1),%ebx
+ xorl %ecx,%edi
+ andl %eax,%edi
+ movl 52(%esi),%ebp
+ xorl %edx,%edi
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $20,%ebx
+ addl %ecx,%ebx
+
+ leal 2850285829(%eax,%ebp,1),%eax
+ xorl %ebx,%edi
+ andl %edx,%edi
+ movl 8(%esi),%ebp
+ xorl %ecx,%edi
+ addl %edi,%eax
+ movl %ebx,%edi
+ roll $5,%eax
+ addl %ebx,%eax
+
+ leal 4243563512(%edx,%ebp,1),%edx
+ xorl %eax,%edi
+ andl %ecx,%edi
+ movl 28(%esi),%ebp
+ xorl %ebx,%edi
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $9,%edx
+ addl %eax,%edx
+
+ leal 1735328473(%ecx,%ebp,1),%ecx
+ xorl %edx,%edi
+ andl %ebx,%edi
+ movl 48(%esi),%ebp
+ xorl %eax,%edi
+ addl %edi,%ecx
+ movl %edx,%edi
+ roll $14,%ecx
+ addl %edx,%ecx
+
+ leal 2368359562(%ebx,%ebp,1),%ebx
+ xorl %ecx,%edi
+ andl %eax,%edi
+ movl 20(%esi),%ebp
+ xorl %edx,%edi
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $20,%ebx
+ addl %ecx,%ebx
+
+
+
+ xorl %edx,%edi
+ xorl %ebx,%edi
+ leal 4294588738(%eax,%ebp,1),%eax
+ addl %edi,%eax
+ roll $4,%eax
+ movl 32(%esi),%ebp
+ movl %ebx,%edi
+
+ leal 2272392833(%edx,%ebp,1),%edx
+ addl %ebx,%eax
+ xorl %ecx,%edi
+ xorl %eax,%edi
+ movl 44(%esi),%ebp
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $11,%edx
+ addl %eax,%edx
+
+ xorl %ebx,%edi
+ xorl %edx,%edi
+ leal 1839030562(%ecx,%ebp,1),%ecx
+ addl %edi,%ecx
+ roll $16,%ecx
+ movl 56(%esi),%ebp
+ movl %edx,%edi
+
+ leal 4259657740(%ebx,%ebp,1),%ebx
+ addl %edx,%ecx
+ xorl %eax,%edi
+ xorl %ecx,%edi
+ movl 4(%esi),%ebp
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $23,%ebx
+ addl %ecx,%ebx
+
+ xorl %edx,%edi
+ xorl %ebx,%edi
+ leal 2763975236(%eax,%ebp,1),%eax
+ addl %edi,%eax
+ roll $4,%eax
+ movl 16(%esi),%ebp
+ movl %ebx,%edi
+
+ leal 1272893353(%edx,%ebp,1),%edx
+ addl %ebx,%eax
+ xorl %ecx,%edi
+ xorl %eax,%edi
+ movl 28(%esi),%ebp
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $11,%edx
+ addl %eax,%edx
+
+ xorl %ebx,%edi
+ xorl %edx,%edi
+ leal 4139469664(%ecx,%ebp,1),%ecx
+ addl %edi,%ecx
+ roll $16,%ecx
+ movl 40(%esi),%ebp
+ movl %edx,%edi
+
+ leal 3200236656(%ebx,%ebp,1),%ebx
+ addl %edx,%ecx
+ xorl %eax,%edi
+ xorl %ecx,%edi
+ movl 52(%esi),%ebp
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $23,%ebx
+ addl %ecx,%ebx
+
+ xorl %edx,%edi
+ xorl %ebx,%edi
+ leal 681279174(%eax,%ebp,1),%eax
+ addl %edi,%eax
+ roll $4,%eax
+ movl (%esi),%ebp
+ movl %ebx,%edi
+
+ leal 3936430074(%edx,%ebp,1),%edx
+ addl %ebx,%eax
+ xorl %ecx,%edi
+ xorl %eax,%edi
+ movl 12(%esi),%ebp
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $11,%edx
+ addl %eax,%edx
+
+ xorl %ebx,%edi
+ xorl %edx,%edi
+ leal 3572445317(%ecx,%ebp,1),%ecx
+ addl %edi,%ecx
+ roll $16,%ecx
+ movl 24(%esi),%ebp
+ movl %edx,%edi
+
+ leal 76029189(%ebx,%ebp,1),%ebx
+ addl %edx,%ecx
+ xorl %eax,%edi
+ xorl %ecx,%edi
+ movl 36(%esi),%ebp
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $23,%ebx
+ addl %ecx,%ebx
+
+ xorl %edx,%edi
+ xorl %ebx,%edi
+ leal 3654602809(%eax,%ebp,1),%eax
+ addl %edi,%eax
+ roll $4,%eax
+ movl 48(%esi),%ebp
+ movl %ebx,%edi
+
+ leal 3873151461(%edx,%ebp,1),%edx
+ addl %ebx,%eax
+ xorl %ecx,%edi
+ xorl %eax,%edi
+ movl 60(%esi),%ebp
+ addl %edi,%edx
+ movl %eax,%edi
+ roll $11,%edx
+ addl %eax,%edx
+
+ xorl %ebx,%edi
+ xorl %edx,%edi
+ leal 530742520(%ecx,%ebp,1),%ecx
+ addl %edi,%ecx
+ roll $16,%ecx
+ movl 8(%esi),%ebp
+ movl %edx,%edi
+
+ leal 3299628645(%ebx,%ebp,1),%ebx
+ addl %edx,%ecx
+ xorl %eax,%edi
+ xorl %ecx,%edi
+ movl (%esi),%ebp
+ addl %edi,%ebx
+ movl $-1,%edi
+ roll $23,%ebx
+ addl %ecx,%ebx
+
+
+
+ xorl %edx,%edi
+ orl %ebx,%edi
+ leal 4096336452(%eax,%ebp,1),%eax
+ xorl %ecx,%edi
+ movl 28(%esi),%ebp
+ addl %edi,%eax
+ movl $-1,%edi
+ roll $6,%eax
+ xorl %ecx,%edi
+ addl %ebx,%eax
+
+ orl %eax,%edi
+ leal 1126891415(%edx,%ebp,1),%edx
+ xorl %ebx,%edi
+ movl 56(%esi),%ebp
+ addl %edi,%edx
+ movl $-1,%edi
+ roll $10,%edx
+ xorl %ebx,%edi
+ addl %eax,%edx
+
+ orl %edx,%edi
+ leal 2878612391(%ecx,%ebp,1),%ecx
+ xorl %eax,%edi
+ movl 20(%esi),%ebp
+ addl %edi,%ecx
+ movl $-1,%edi
+ roll $15,%ecx
+ xorl %eax,%edi
+ addl %edx,%ecx
+
+ orl %ecx,%edi
+ leal 4237533241(%ebx,%ebp,1),%ebx
+ xorl %edx,%edi
+ movl 48(%esi),%ebp
+ addl %edi,%ebx
+ movl $-1,%edi
+ roll $21,%ebx
+ xorl %edx,%edi
+ addl %ecx,%ebx
+
+ orl %ebx,%edi
+ leal 1700485571(%eax,%ebp,1),%eax
+ xorl %ecx,%edi
+ movl 12(%esi),%ebp
+ addl %edi,%eax
+ movl $-1,%edi
+ roll $6,%eax
+ xorl %ecx,%edi
+ addl %ebx,%eax
+
+ orl %eax,%edi
+ leal 2399980690(%edx,%ebp,1),%edx
+ xorl %ebx,%edi
+ movl 40(%esi),%ebp
+ addl %edi,%edx
+ movl $-1,%edi
+ roll $10,%edx
+ xorl %ebx,%edi
+ addl %eax,%edx
+
+ orl %edx,%edi
+ leal 4293915773(%ecx,%ebp,1),%ecx
+ xorl %eax,%edi
+ movl 4(%esi),%ebp
+ addl %edi,%ecx
+ movl $-1,%edi
+ roll $15,%ecx
+ xorl %eax,%edi
+ addl %edx,%ecx
+
+ orl %ecx,%edi
+ leal 2240044497(%ebx,%ebp,1),%ebx
+ xorl %edx,%edi
+ movl 32(%esi),%ebp
+ addl %edi,%ebx
+ movl $-1,%edi
+ roll $21,%ebx
+ xorl %edx,%edi
+ addl %ecx,%ebx
+
+ orl %ebx,%edi
+ leal 1873313359(%eax,%ebp,1),%eax
+ xorl %ecx,%edi
+ movl 60(%esi),%ebp
+ addl %edi,%eax
+ movl $-1,%edi
+ roll $6,%eax
+ xorl %ecx,%edi
+ addl %ebx,%eax
+
+ orl %eax,%edi
+ leal 4264355552(%edx,%ebp,1),%edx
+ xorl %ebx,%edi
+ movl 24(%esi),%ebp
+ addl %edi,%edx
+ movl $-1,%edi
+ roll $10,%edx
+ xorl %ebx,%edi
+ addl %eax,%edx
+
+ orl %edx,%edi
+ leal 2734768916(%ecx,%ebp,1),%ecx
+ xorl %eax,%edi
+ movl 52(%esi),%ebp
+ addl %edi,%ecx
+ movl $-1,%edi
+ roll $15,%ecx
+ xorl %eax,%edi
+ addl %edx,%ecx
+
+ orl %ecx,%edi
+ leal 1309151649(%ebx,%ebp,1),%ebx
+ xorl %edx,%edi
+ movl 16(%esi),%ebp
+ addl %edi,%ebx
+ movl $-1,%edi
+ roll $21,%ebx
+ xorl %edx,%edi
+ addl %ecx,%ebx
+
+ orl %ebx,%edi
+ leal 4149444226(%eax,%ebp,1),%eax
+ xorl %ecx,%edi
+ movl 44(%esi),%ebp
+ addl %edi,%eax
+ movl $-1,%edi
+ roll $6,%eax
+ xorl %ecx,%edi
+ addl %ebx,%eax
+
+ orl %eax,%edi
+ leal 3174756917(%edx,%ebp,1),%edx
+ xorl %ebx,%edi
+ movl 8(%esi),%ebp
+ addl %edi,%edx
+ movl $-1,%edi
+ roll $10,%edx
+ xorl %ebx,%edi
+ addl %eax,%edx
+
+ orl %edx,%edi
+ leal 718787259(%ecx,%ebp,1),%ecx
+ xorl %eax,%edi
+ movl 36(%esi),%ebp
+ addl %edi,%ecx
+ movl $-1,%edi
+ roll $15,%ecx
+ xorl %eax,%edi
+ addl %edx,%ecx
+
+ orl %ecx,%edi
+ leal 3951481745(%ebx,%ebp,1),%ebx
+ xorl %edx,%edi
+ movl 24(%esp),%ebp
+ addl %edi,%ebx
+ addl $64,%esi
+ roll $21,%ebx
+ movl (%ebp),%edi
+ addl %ecx,%ebx
+ addl %edi,%eax
+ movl 4(%ebp),%edi
+ addl %edi,%ebx
+ movl 8(%ebp),%edi
+ addl %edi,%ecx
+ movl 12(%ebp),%edi
+ addl %edi,%edx
+ movl %eax,(%ebp)
+ movl %ebx,4(%ebp)
+ movl (%esp),%edi
+ movl %ecx,8(%ebp)
+ movl %edx,12(%ebp)
+ cmpl %esi,%edi
+ jae .L000start
+ popl %eax
+ popl %ebx
+ popl %ebp
+ popl %edi
+ popl %esi
+ ret
+.size md5_block_asm_data_order,.-.L_md5_block_asm_data_order_begin
diff --git a/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-586.pl b/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-586.pl
new file mode 100644
index 00000000..6cb66bb4
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-586.pl
@@ -0,0 +1,307 @@
+#!/usr/local/bin/perl
+
+# Normal is the
+# md5_block_x86(MD5_CTX *c, ULONG *X);
+# version, non-normal is the
+# md5_block_x86(MD5_CTX *c, ULONG *X,int blocks);
+
+$normal=0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+$A="eax";
+$B="ebx";
+$C="ecx";
+$D="edx";
+$tmp1="edi";
+$tmp2="ebp";
+$X="esi";
+
+# What we need to load into $tmp for the next round
+%Ltmp1=("R0",&Np($C), "R1",&Np($C), "R2",&Np($C), "R3",&Np($D));
+@xo=(
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, # R0
+ 1, 6, 11, 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, # R1
+ 5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2, # R2
+ 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9, # R3
+ );
+
+&md5_block("md5_block_asm_data_order");
+&asm_finish();
+
+sub Np
+ {
+ local($p)=@_;
+ local(%n)=($A,$D,$B,$A,$C,$B,$D,$C);
+ return($n{$p});
+ }
+
+sub R0
+ {
+ local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+ &mov($tmp1,$C) if $pos < 0;
+ &mov($tmp2,&DWP($xo[$ki]*4,$K,"",0)) if $pos < 0; # very first one
+
+ # body proper
+
+ &comment("R0 $ki");
+ &xor($tmp1,$d); # F function - part 2
+
+ &and($tmp1,$b); # F function - part 3
+ &lea($a,&DWP($t,$a,$tmp2,1));
+
+ &xor($tmp1,$d); # F function - part 4
+
+ &add($a,$tmp1);
+ &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0
+ &mov($tmp1,&Np($c)) if $pos == 1; # next tmp1 for R1
+
+ &rotl($a,$s);
+
+ &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+
+ &add($a,$b);
+ }
+
+sub R1
+ {
+ local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+ &comment("R1 $ki");
+
+ &lea($a,&DWP($t,$a,$tmp2,1));
+
+ &xor($tmp1,$b); # G function - part 2
+ &and($tmp1,$d); # G function - part 3
+
+ &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+ &xor($tmp1,$c); # G function - part 4
+
+ &add($a,$tmp1);
+ &mov($tmp1,&Np($c)) if $pos < 1; # G function - part 1
+ &mov($tmp1,&Np($c)) if $pos == 1; # G function - part 1
+
+ &rotl($a,$s);
+
+ &add($a,$b);
+ }
+
+sub R2
+ {
+ local($n,$pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+ # This one is different, only 3 logical operations
+
+if (($n & 1) == 0)
+ {
+ &comment("R2 $ki");
+ # make sure to do 'D' first, not 'B', else we clash with
+ # the last add from the previous round.
+
+ &xor($tmp1,$d); # H function - part 2
+
+ &xor($tmp1,$b); # H function - part 3
+ &lea($a,&DWP($t,$a,$tmp2,1));
+
+ &add($a,$tmp1);
+
+ &rotl($a,$s);
+
+ &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0));
+ &mov($tmp1,&Np($c));
+ }
+else
+ {
+ &comment("R2 $ki");
+ # make sure to do 'D' first, not 'B', else we clash with
+ # the last add from the previous round.
+
+ &lea($a,&DWP($t,$a,$tmp2,1));
+
+ &add($b,$c); # MOVED FORWARD
+ &xor($tmp1,$d); # H function - part 2
+
+ &xor($tmp1,$b); # H function - part 3
+ &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+
+ &add($a,$tmp1);
+ &mov($tmp1,&Np($c)) if $pos < 1; # H function - part 1
+ &mov($tmp1,-1) if $pos == 1; # I function - part 1
+
+ &rotl($a,$s);
+
+ &add($a,$b);
+ }
+ }
+
+sub R3
+ {
+ local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+ &comment("R3 $ki");
+
+ # &not($tmp1)
+ &xor($tmp1,$d) if $pos < 0; # I function - part 2
+
+ &or($tmp1,$b); # I function - part 3
+ &lea($a,&DWP($t,$a,$tmp2,1));
+
+ &xor($tmp1,$c); # I function - part 4
+ &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if $pos != 2; # load X/k value
+ &mov($tmp2,&wparam(0)) if $pos == 2;
+
+ &add($a,$tmp1);
+ &mov($tmp1,-1) if $pos < 1; # H function - part 1
+ &add($K,64) if $pos >=1 && !$normal;
+
+ &rotl($a,$s);
+
+ &xor($tmp1,&Np($d)) if $pos <= 0; # I function - part = first time
+ &mov($tmp1,&DWP( 0,$tmp2,"",0)) if $pos > 0;
+ &add($a,$b);
+ }
+
+
+sub md5_block
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,"",3);
+
+ # parameter 1 is the MD5_CTX structure.
+ # A 0
+ # B 4
+ # C 8
+ # D 12
+
+ &push("esi");
+ &push("edi");
+ &mov($tmp1, &wparam(0)); # edi
+ &mov($X, &wparam(1)); # esi
+ &mov($C, &wparam(2));
+ &push("ebp");
+ &shl($C, 6);
+ &push("ebx");
+ &add($C, $X); # offset we end at
+ &sub($C, 64);
+ &mov($A, &DWP( 0,$tmp1,"",0));
+ &push($C); # Put on the TOS
+ &mov($B, &DWP( 4,$tmp1,"",0));
+ &mov($C, &DWP( 8,$tmp1,"",0));
+ &mov($D, &DWP(12,$tmp1,"",0));
+
+ &set_label("start") unless $normal;
+ &comment("");
+ &comment("R0 section");
+
+ &R0(-2,$A,$B,$C,$D,$X, 0, 7,0xd76aa478);
+ &R0( 0,$D,$A,$B,$C,$X, 1,12,0xe8c7b756);
+ &R0( 0,$C,$D,$A,$B,$X, 2,17,0x242070db);
+ &R0( 0,$B,$C,$D,$A,$X, 3,22,0xc1bdceee);
+ &R0( 0,$A,$B,$C,$D,$X, 4, 7,0xf57c0faf);
+ &R0( 0,$D,$A,$B,$C,$X, 5,12,0x4787c62a);
+ &R0( 0,$C,$D,$A,$B,$X, 6,17,0xa8304613);
+ &R0( 0,$B,$C,$D,$A,$X, 7,22,0xfd469501);
+ &R0( 0,$A,$B,$C,$D,$X, 8, 7,0x698098d8);
+ &R0( 0,$D,$A,$B,$C,$X, 9,12,0x8b44f7af);
+ &R0( 0,$C,$D,$A,$B,$X,10,17,0xffff5bb1);
+ &R0( 0,$B,$C,$D,$A,$X,11,22,0x895cd7be);
+ &R0( 0,$A,$B,$C,$D,$X,12, 7,0x6b901122);
+ &R0( 0,$D,$A,$B,$C,$X,13,12,0xfd987193);
+ &R0( 0,$C,$D,$A,$B,$X,14,17,0xa679438e);
+ &R0( 1,$B,$C,$D,$A,$X,15,22,0x49b40821);
+
+ &comment("");
+ &comment("R1 section");
+ &R1(-1,$A,$B,$C,$D,$X,16, 5,0xf61e2562);
+ &R1( 0,$D,$A,$B,$C,$X,17, 9,0xc040b340);
+ &R1( 0,$C,$D,$A,$B,$X,18,14,0x265e5a51);
+ &R1( 0,$B,$C,$D,$A,$X,19,20,0xe9b6c7aa);
+ &R1( 0,$A,$B,$C,$D,$X,20, 5,0xd62f105d);
+ &R1( 0,$D,$A,$B,$C,$X,21, 9,0x02441453);
+ &R1( 0,$C,$D,$A,$B,$X,22,14,0xd8a1e681);
+ &R1( 0,$B,$C,$D,$A,$X,23,20,0xe7d3fbc8);
+ &R1( 0,$A,$B,$C,$D,$X,24, 5,0x21e1cde6);
+ &R1( 0,$D,$A,$B,$C,$X,25, 9,0xc33707d6);
+ &R1( 0,$C,$D,$A,$B,$X,26,14,0xf4d50d87);
+ &R1( 0,$B,$C,$D,$A,$X,27,20,0x455a14ed);
+ &R1( 0,$A,$B,$C,$D,$X,28, 5,0xa9e3e905);
+ &R1( 0,$D,$A,$B,$C,$X,29, 9,0xfcefa3f8);
+ &R1( 0,$C,$D,$A,$B,$X,30,14,0x676f02d9);
+ &R1( 1,$B,$C,$D,$A,$X,31,20,0x8d2a4c8a);
+
+ &comment("");
+ &comment("R2 section");
+ &R2( 0,-1,$A,$B,$C,$D,$X,32, 4,0xfffa3942);
+ &R2( 1, 0,$D,$A,$B,$C,$X,33,11,0x8771f681);
+ &R2( 2, 0,$C,$D,$A,$B,$X,34,16,0x6d9d6122);
+ &R2( 3, 0,$B,$C,$D,$A,$X,35,23,0xfde5380c);
+ &R2( 4, 0,$A,$B,$C,$D,$X,36, 4,0xa4beea44);
+ &R2( 5, 0,$D,$A,$B,$C,$X,37,11,0x4bdecfa9);
+ &R2( 6, 0,$C,$D,$A,$B,$X,38,16,0xf6bb4b60);
+ &R2( 7, 0,$B,$C,$D,$A,$X,39,23,0xbebfbc70);
+ &R2( 8, 0,$A,$B,$C,$D,$X,40, 4,0x289b7ec6);
+ &R2( 9, 0,$D,$A,$B,$C,$X,41,11,0xeaa127fa);
+ &R2(10, 0,$C,$D,$A,$B,$X,42,16,0xd4ef3085);
+ &R2(11, 0,$B,$C,$D,$A,$X,43,23,0x04881d05);
+ &R2(12, 0,$A,$B,$C,$D,$X,44, 4,0xd9d4d039);
+ &R2(13, 0,$D,$A,$B,$C,$X,45,11,0xe6db99e5);
+ &R2(14, 0,$C,$D,$A,$B,$X,46,16,0x1fa27cf8);
+ &R2(15, 1,$B,$C,$D,$A,$X,47,23,0xc4ac5665);
+
+ &comment("");
+ &comment("R3 section");
+ &R3(-1,$A,$B,$C,$D,$X,48, 6,0xf4292244);
+ &R3( 0,$D,$A,$B,$C,$X,49,10,0x432aff97);
+ &R3( 0,$C,$D,$A,$B,$X,50,15,0xab9423a7);
+ &R3( 0,$B,$C,$D,$A,$X,51,21,0xfc93a039);
+ &R3( 0,$A,$B,$C,$D,$X,52, 6,0x655b59c3);
+ &R3( 0,$D,$A,$B,$C,$X,53,10,0x8f0ccc92);
+ &R3( 0,$C,$D,$A,$B,$X,54,15,0xffeff47d);
+ &R3( 0,$B,$C,$D,$A,$X,55,21,0x85845dd1);
+ &R3( 0,$A,$B,$C,$D,$X,56, 6,0x6fa87e4f);
+ &R3( 0,$D,$A,$B,$C,$X,57,10,0xfe2ce6e0);
+ &R3( 0,$C,$D,$A,$B,$X,58,15,0xa3014314);
+ &R3( 0,$B,$C,$D,$A,$X,59,21,0x4e0811a1);
+ &R3( 0,$A,$B,$C,$D,$X,60, 6,0xf7537e82);
+ &R3( 0,$D,$A,$B,$C,$X,61,10,0xbd3af235);
+ &R3( 0,$C,$D,$A,$B,$X,62,15,0x2ad7d2bb);
+ &R3( 2,$B,$C,$D,$A,$X,63,21,0xeb86d391);
+
+ # &mov($tmp2,&wparam(0)); # done in the last R3
+ # &mov($tmp1, &DWP( 0,$tmp2,"",0)); # done is the last R3
+
+ &add($A,$tmp1);
+ &mov($tmp1, &DWP( 4,$tmp2,"",0));
+
+ &add($B,$tmp1);
+ &mov($tmp1, &DWP( 8,$tmp2,"",0));
+
+ &add($C,$tmp1);
+ &mov($tmp1, &DWP(12,$tmp2,"",0));
+
+ &add($D,$tmp1);
+ &mov(&DWP( 0,$tmp2,"",0),$A);
+
+ &mov(&DWP( 4,$tmp2,"",0),$B);
+ &mov($tmp1,&swtmp(0)) unless $normal;
+
+ &mov(&DWP( 8,$tmp2,"",0),$C);
+ &mov(&DWP(12,$tmp2,"",0),$D);
+
+ &cmp($tmp1,$X) unless $normal; # check count
+ &jae(&label("start")) unless $normal;
+
+ &pop("eax"); # pop the temp variable off the stack
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
+
diff --git a/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-ia64.S b/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-ia64.S
new file mode 100644
index 00000000..e7de08d4
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-ia64.S
@@ -0,0 +1,992 @@
+/* Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+// Common registers are assigned as follows:
+//
+// COMMON
+//
+// t0 Const Tbl Ptr TPtr
+// t1 Round Constant TRound
+// t4 Block residual LenResid
+// t5 Residual Data DTmp
+//
+// {in,out}0 Block 0 Cycle RotateM0
+// {in,out}1 Block Value 12 M12
+// {in,out}2 Block Value 8 M8
+// {in,out}3 Block Value 4 M4
+// {in,out}4 Block Value 0 M0
+// {in,out}5 Block 1 Cycle RotateM1
+// {in,out}6 Block Value 13 M13
+// {in,out}7 Block Value 9 M9
+// {in,out}8 Block Value 5 M5
+// {in,out}9 Block Value 1 M1
+// {in,out}10 Block 2 Cycle RotateM2
+// {in,out}11 Block Value 14 M14
+// {in,out}12 Block Value 10 M10
+// {in,out}13 Block Value 6 M6
+// {in,out}14 Block Value 2 M2
+// {in,out}15 Block 3 Cycle RotateM3
+// {in,out}16 Block Value 15 M15
+// {in,out}17 Block Value 11 M11
+// {in,out}18 Block Value 7 M7
+// {in,out}19 Block Value 3 M3
+// {in,out}20 Scratch Z
+// {in,out}21 Scratch Y
+// {in,out}22 Scratch X
+// {in,out}23 Scratch W
+// {in,out}24 Digest A A
+// {in,out}25 Digest B B
+// {in,out}26 Digest C C
+// {in,out}27 Digest D D
+// {in,out}28 Active Data Ptr DPtr
+// in28 Dummy Value -
+// out28 Dummy Value -
+// bt0 Coroutine Link QUICK_RTN
+//
+/// These predicates are used for computing the padding block(s) and
+/// are shared between the driver and digest co-routines
+//
+// pt0 Extra Pad Block pExtra
+// pt1 Load next word pLoad
+// pt2 Skip next word pSkip
+// pt3 Search for Pad pNoPad
+// pt4 Pad Word 0 pPad0
+// pt5 Pad Word 1 pPad1
+// pt6 Pad Word 2 pPad2
+// pt7 Pad Word 3 pPad3
+
+#define DTmp r19
+#define LenResid r18
+#define QUICK_RTN b6
+#define TPtr r14
+#define TRound r15
+#define pExtra p6
+#define pLoad p7
+#define pNoPad p9
+#define pPad0 p10
+#define pPad1 p11
+#define pPad2 p12
+#define pPad3 p13
+#define pSkip p8
+
+#define A_ out24
+#define B_ out25
+#define C_ out26
+#define D_ out27
+#define DPtr_ out28
+#define M0_ out4
+#define M1_ out9
+#define M10_ out12
+#define M11_ out17
+#define M12_ out1
+#define M13_ out6
+#define M14_ out11
+#define M15_ out16
+#define M2_ out14
+#define M3_ out19
+#define M4_ out3
+#define M5_ out8
+#define M6_ out13
+#define M7_ out18
+#define M8_ out2
+#define M9_ out7
+#define RotateM0_ out0
+#define RotateM1_ out5
+#define RotateM2_ out10
+#define RotateM3_ out15
+#define W_ out23
+#define X_ out22
+#define Y_ out21
+#define Z_ out20
+
+#define A in24
+#define B in25
+#define C in26
+#define D in27
+#define DPtr in28
+#define M0 in4
+#define M1 in9
+#define M10 in12
+#define M11 in17
+#define M12 in1
+#define M13 in6
+#define M14 in11
+#define M15 in16
+#define M2 in14
+#define M3 in19
+#define M4 in3
+#define M5 in8
+#define M6 in13
+#define M7 in18
+#define M8 in2
+#define M9 in7
+#define RotateM0 in0
+#define RotateM1 in5
+#define RotateM2 in10
+#define RotateM3 in15
+#define W in23
+#define X in22
+#define Y in21
+#define Z in20
+
+/* register stack configuration for md5_block_asm_data_order(): */
+#define MD5_NINP 3
+#define MD5_NLOC 0
+#define MD5_NOUT 29
+#define MD5_NROT 0
+
+/* register stack configuration for helpers: */
+#define _NINPUTS MD5_NOUT
+#define _NLOCALS 0
+#define _NOUTPUT 0
+#define _NROTATE 24 /* this must be <= _NINPUTS */
+
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+#define ADDP addp4
+#else
+#define ADDP add
+#endif
+
+#if defined(_HPUX_SOURCE) || defined(B_ENDIAN)
+#define HOST_IS_BIG_ENDIAN
+#endif
+
+// Macros for getting the left and right portions of little-endian words
+
+#define GETLW(dst, src, align) dep.z dst = src, 32 - 8 * align, 8 * align
+#define GETRW(dst, src, align) extr.u dst = src, 8 * align, 32 - 8 * align
+
+// MD5 driver
+//
+// Reads an input block, then calls the digest block
+// subroutine and adds the results to the accumulated
+// digest. It allocates 32 outs which the subroutine
+// uses as it's inputs and rotating
+// registers. Initializes the round constant pointer and
+// takes care of saving/restoring ar.lc
+//
+/// INPUT
+//
+// in0 Context Ptr CtxPtr0
+// in1 Input Data Ptr DPtrIn
+// in2 Integral Blocks BlockCount
+// rp Return Address -
+//
+/// CODE
+//
+// v2 Input Align InAlign
+// t0 Shared w/digest -
+// t1 Shared w/digest -
+// t2 Shared w/digest -
+// t3 Shared w/digest -
+// t4 Shared w/digest -
+// t5 Shared w/digest -
+// t6 PFS Save PFSSave
+// t7 ar.lc Save LCSave
+// t8 Saved PR PRSave
+// t9 2nd CtxPtr CtxPtr1
+// t10 Table Base CTable
+// t11 Table[0] CTable0
+// t13 Accumulator A AccumA
+// t14 Accumulator B AccumB
+// t15 Accumulator C AccumC
+// t16 Accumulator D AccumD
+// pt0 Shared w/digest -
+// pt1 Shared w/digest -
+// pt2 Shared w/digest -
+// pt3 Shared w/digest -
+// pt4 Shared w/digest -
+// pt5 Shared w/digest -
+// pt6 Shared w/digest -
+// pt7 Shared w/digest -
+// pt8 Not Aligned pOff
+// pt8 Blocks Left pAgain
+
+#define AccumA r27
+#define AccumB r28
+#define AccumC r29
+#define AccumD r30
+#define CTable r24
+#define CTable0 r25
+#define CtxPtr0 in0
+#define CtxPtr1 r23
+#define DPtrIn in1
+#define BlockCount in2
+#define InAlign r10
+#define LCSave r21
+#define PFSSave r20
+#define PRSave r22
+#define pAgain p63
+#define pOff p63
+
+ .text
+
+/* md5_block_asm_data_order(MD5_CTX *c, const void *data, size_t num)
+
+ where:
+ c: a pointer to a structure of this type:
+
+ typedef struct MD5state_st
+ {
+ MD5_LONG A,B,C,D;
+ MD5_LONG Nl,Nh;
+ MD5_LONG data[MD5_LBLOCK];
+ unsigned int num;
+ }
+ MD5_CTX;
+
+ data: a pointer to the input data (may be misaligned)
+ num: the number of 16-byte blocks to hash (i.e., the length
+ of DATA is 16*NUM.
+
+ */
+
+ .type md5_block_asm_data_order, @function
+ .global md5_block_asm_data_order
+ .align 32
+ .proc md5_block_asm_data_order
+md5_block_asm_data_order:
+.md5_block:
+ .prologue
+{ .mmi
+ .save ar.pfs, PFSSave
+ alloc PFSSave = ar.pfs, MD5_NINP, MD5_NLOC, MD5_NOUT, MD5_NROT
+ ADDP CtxPtr1 = 8, CtxPtr0
+ mov CTable = ip
+}
+{ .mmi
+ ADDP DPtrIn = 0, DPtrIn
+ ADDP CtxPtr0 = 0, CtxPtr0
+ .save ar.lc, LCSave
+ mov LCSave = ar.lc
+}
+;;
+{ .mmi
+ add CTable = .md5_tbl_data_order#-.md5_block#, CTable
+ and InAlign = 0x3, DPtrIn
+}
+
+{ .mmi
+ ld4 AccumA = [CtxPtr0], 4
+ ld4 AccumC = [CtxPtr1], 4
+ .save pr, PRSave
+ mov PRSave = pr
+ .body
+}
+;;
+{ .mmi
+ ld4 AccumB = [CtxPtr0]
+ ld4 AccumD = [CtxPtr1]
+ dep DPtr_ = 0, DPtrIn, 0, 2
+} ;;
+#ifdef HOST_IS_BIG_ENDIAN
+ rum psr.be;; // switch to little-endian
+#endif
+{ .mmb
+ ld4 CTable0 = [CTable], 4
+ cmp.ne pOff, p0 = 0, InAlign
+(pOff) br.cond.spnt.many .md5_unaligned
+} ;;
+
+// The FF load/compute loop rotates values three times, so that
+// loading into M12 here produces the M0 value, M13 -> M1, etc.
+
+.md5_block_loop0:
+{ .mmi
+ ld4 M12_ = [DPtr_], 4
+ mov TPtr = CTable
+ mov TRound = CTable0
+} ;;
+{ .mmi
+ ld4 M13_ = [DPtr_], 4
+ mov A_ = AccumA
+ mov B_ = AccumB
+} ;;
+{ .mmi
+ ld4 M14_ = [DPtr_], 4
+ mov C_ = AccumC
+ mov D_ = AccumD
+} ;;
+{ .mmb
+ ld4 M15_ = [DPtr_], 4
+ add BlockCount = -1, BlockCount
+ br.call.sptk.many QUICK_RTN = md5_digest_block0
+} ;;
+
+// Now, we add the new digest values and do some clean-up
+// before checking if there's another full block to process
+
+{ .mmi
+ add AccumA = AccumA, A_
+ add AccumB = AccumB, B_
+ cmp.ne pAgain, p0 = 0, BlockCount
+}
+{ .mib
+ add AccumC = AccumC, C_
+ add AccumD = AccumD, D_
+(pAgain) br.cond.dptk.many .md5_block_loop0
+} ;;
+
+.md5_exit:
+#ifdef HOST_IS_BIG_ENDIAN
+ sum psr.be;; // switch back to big-endian mode
+#endif
+{ .mmi
+ st4 [CtxPtr0] = AccumB, -4
+ st4 [CtxPtr1] = AccumD, -4
+ mov pr = PRSave, 0x1ffff ;;
+}
+{ .mmi
+ st4 [CtxPtr0] = AccumA
+ st4 [CtxPtr1] = AccumC
+ mov ar.lc = LCSave
+} ;;
+{ .mib
+ mov ar.pfs = PFSSave
+ br.ret.sptk.few rp
+} ;;
+
+#define MD5UNALIGNED(offset) \
+.md5_process##offset: \
+{ .mib ; \
+ nop 0x0 ; \
+ GETRW(DTmp, DTmp, offset) ; \
+} ;; \
+.md5_block_loop##offset: \
+{ .mmi ; \
+ ld4 Y_ = [DPtr_], 4 ; \
+ mov TPtr = CTable ; \
+ mov TRound = CTable0 ; \
+} ;; \
+{ .mmi ; \
+ ld4 M13_ = [DPtr_], 4 ; \
+ mov A_ = AccumA ; \
+ mov B_ = AccumB ; \
+} ;; \
+{ .mii ; \
+ ld4 M14_ = [DPtr_], 4 ; \
+ GETLW(W_, Y_, offset) ; \
+ mov C_ = AccumC ; \
+} \
+{ .mmi ; \
+ mov D_ = AccumD ;; \
+ or M12_ = W_, DTmp ; \
+ GETRW(DTmp, Y_, offset) ; \
+} \
+{ .mib ; \
+ ld4 M15_ = [DPtr_], 4 ; \
+ add BlockCount = -1, BlockCount ; \
+ br.call.sptk.many QUICK_RTN = md5_digest_block##offset; \
+} ;; \
+{ .mmi ; \
+ add AccumA = AccumA, A_ ; \
+ add AccumB = AccumB, B_ ; \
+ cmp.ne pAgain, p0 = 0, BlockCount ; \
+} \
+{ .mib ; \
+ add AccumC = AccumC, C_ ; \
+ add AccumD = AccumD, D_ ; \
+(pAgain) br.cond.dptk.many .md5_block_loop##offset ; \
+} ;; \
+{ .mib ; \
+ nop 0x0 ; \
+ nop 0x0 ; \
+ br.cond.sptk.many .md5_exit ; \
+} ;;
+
+ .align 32
+.md5_unaligned:
+//
+// Because variable shifts are expensive, we special case each of
+// the four alignements. In practice, this won't hurt too much
+// since only one working set of code will be loaded.
+//
+{ .mib
+ ld4 DTmp = [DPtr_], 4
+ cmp.eq pOff, p0 = 1, InAlign
+(pOff) br.cond.dpnt.many .md5_process1
+} ;;
+{ .mib
+ cmp.eq pOff, p0 = 2, InAlign
+ nop 0x0
+(pOff) br.cond.dpnt.many .md5_process2
+} ;;
+ MD5UNALIGNED(3)
+ MD5UNALIGNED(1)
+ MD5UNALIGNED(2)
+
+ .endp md5_block_asm_data_order
+
+
+// MD5 Perform the F function and load
+//
+// Passed the first 4 words (M0 - M3) and initial (A, B, C, D) values,
+// computes the FF() round of functions, then branches to the common
+// digest code to finish up with GG(), HH, and II().
+//
+// INPUT
+//
+// rp Return Address -
+//
+// CODE
+//
+// v0 PFS bit bucket PFS
+// v1 Loop Trip Count LTrip
+// pt0 Load next word pMore
+
+/* For F round: */
+#define LTrip r9
+#define PFS r8
+#define pMore p6
+
+/* For GHI rounds: */
+#define T r9
+#define U r10
+#define V r11
+
+#define COMPUTE(a, b, s, M, R) \
+{ \
+ .mii ; \
+ ld4 TRound = [TPtr], 4 ; \
+ dep.z Y = Z, 32, 32 ;; \
+ shrp Z = Z, Y, 64 - s ; \
+} ;; \
+{ \
+ .mmi ; \
+ add a = Z, b ; \
+ mov R = M ; \
+ nop 0x0 ; \
+} ;;
+
+#define LOOP(a, b, s, M, R, label) \
+{ .mii ; \
+ ld4 TRound = [TPtr], 4 ; \
+ dep.z Y = Z, 32, 32 ;; \
+ shrp Z = Z, Y, 64 - s ; \
+} ;; \
+{ .mib ; \
+ add a = Z, b ; \
+ mov R = M ; \
+ br.ctop.sptk.many label ; \
+} ;;
+
+// G(B, C, D) = (B & D) | (C & ~D)
+
+#define G(a, b, c, d, M) \
+{ .mmi ; \
+ add Z = M, TRound ; \
+ and Y = b, d ; \
+ andcm X = c, d ; \
+} ;; \
+{ .mii ; \
+ add Z = Z, a ; \
+ or Y = Y, X ;; \
+ add Z = Z, Y ; \
+} ;;
+
+// H(B, C, D) = B ^ C ^ D
+
+#define H(a, b, c, d, M) \
+{ .mmi ; \
+ add Z = M, TRound ; \
+ xor Y = b, c ; \
+ nop 0x0 ; \
+} ;; \
+{ .mii ; \
+ add Z = Z, a ; \
+ xor Y = Y, d ;; \
+ add Z = Z, Y ; \
+} ;;
+
+// I(B, C, D) = C ^ (B | ~D)
+//
+// However, since we have an andcm operator, we use the fact that
+//
+// Y ^ Z == ~Y ^ ~Z
+//
+// to rewrite the expression as
+//
+// I(B, C, D) = ~C ^ (~B & D)
+
+#define I(a, b, c, d, M) \
+{ .mmi ; \
+ add Z = M, TRound ; \
+ andcm Y = d, b ; \
+ andcm X = -1, c ; \
+} ;; \
+{ .mii ; \
+ add Z = Z, a ; \
+ xor Y = Y, X ;; \
+ add Z = Z, Y ; \
+} ;;
+
+#define GG4(label) \
+ G(A, B, C, D, M0) \
+ COMPUTE(A, B, 5, M0, RotateM0) \
+ G(D, A, B, C, M1) \
+ COMPUTE(D, A, 9, M1, RotateM1) \
+ G(C, D, A, B, M2) \
+ COMPUTE(C, D, 14, M2, RotateM2) \
+ G(B, C, D, A, M3) \
+ LOOP(B, C, 20, M3, RotateM3, label)
+
+#define HH4(label) \
+ H(A, B, C, D, M0) \
+ COMPUTE(A, B, 4, M0, RotateM0) \
+ H(D, A, B, C, M1) \
+ COMPUTE(D, A, 11, M1, RotateM1) \
+ H(C, D, A, B, M2) \
+ COMPUTE(C, D, 16, M2, RotateM2) \
+ H(B, C, D, A, M3) \
+ LOOP(B, C, 23, M3, RotateM3, label)
+
+#define II4(label) \
+ I(A, B, C, D, M0) \
+ COMPUTE(A, B, 6, M0, RotateM0) \
+ I(D, A, B, C, M1) \
+ COMPUTE(D, A, 10, M1, RotateM1) \
+ I(C, D, A, B, M2) \
+ COMPUTE(C, D, 15, M2, RotateM2) \
+ I(B, C, D, A, M3) \
+ LOOP(B, C, 21, M3, RotateM3, label)
+
+#define FFLOAD(a, b, c, d, M, N, s) \
+{ .mii ; \
+(pMore) ld4 N = [DPtr], 4 ; \
+ add Z = M, TRound ; \
+ and Y = c, b ; \
+} \
+{ .mmi ; \
+ andcm X = d, b ;; \
+ add Z = Z, a ; \
+ or Y = Y, X ; \
+} ;; \
+{ .mii ; \
+ ld4 TRound = [TPtr], 4 ; \
+ add Z = Z, Y ;; \
+ dep.z Y = Z, 32, 32 ; \
+} ;; \
+{ .mii ; \
+ nop 0x0 ; \
+ shrp Z = Z, Y, 64 - s ;; \
+ add a = Z, b ; \
+} ;;
+
+#define FFLOOP(a, b, c, d, M, N, s, dest) \
+{ .mii ; \
+(pMore) ld4 N = [DPtr], 4 ; \
+ add Z = M, TRound ; \
+ and Y = c, b ; \
+} \
+{ .mmi ; \
+ andcm X = d, b ;; \
+ add Z = Z, a ; \
+ or Y = Y, X ; \
+} ;; \
+{ .mii ; \
+ ld4 TRound = [TPtr], 4 ; \
+ add Z = Z, Y ;; \
+ dep.z Y = Z, 32, 32 ; \
+} ;; \
+{ .mii ; \
+ nop 0x0 ; \
+ shrp Z = Z, Y, 64 - s ;; \
+ add a = Z, b ; \
+} \
+{ .mib ; \
+ cmp.ne pMore, p0 = 0, LTrip ; \
+ add LTrip = -1, LTrip ; \
+ br.ctop.dptk.many dest ; \
+} ;;
+
+ .type md5_digest_block0, @function
+ .align 32
+
+ .proc md5_digest_block0
+ .prologue
+md5_digest_block0:
+ .altrp QUICK_RTN
+ .body
+{ .mmi
+ alloc PFS = ar.pfs, _NINPUTS, _NLOCALS, _NOUTPUT, _NROTATE
+ mov LTrip = 2
+ mov ar.lc = 3
+} ;;
+{ .mii
+ cmp.eq pMore, p0 = r0, r0
+ mov ar.ec = 0
+ nop 0x0
+} ;;
+
+.md5_FF_round0:
+ FFLOAD(A, B, C, D, M12, RotateM0, 7)
+ FFLOAD(D, A, B, C, M13, RotateM1, 12)
+ FFLOAD(C, D, A, B, M14, RotateM2, 17)
+ FFLOOP(B, C, D, A, M15, RotateM3, 22, .md5_FF_round0)
+ //
+ // !!! Fall through to md5_digest_GHI
+ //
+ .endp md5_digest_block0
+
+ .type md5_digest_GHI, @function
+ .align 32
+
+ .proc md5_digest_GHI
+ .prologue
+ .regstk _NINPUTS, _NLOCALS, _NOUTPUT, _NROTATE
+md5_digest_GHI:
+ .altrp QUICK_RTN
+ .body
+//
+// The following sequence shuffles the block counstants round for the
+// next round:
+//
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+// 1 6 11 0 5 10 14 4 9 14 3 8 13 2 7 12
+//
+{ .mmi
+ mov Z = M0
+ mov Y = M15
+ mov ar.lc = 3
+}
+{ .mmi
+ mov X = M2
+ mov W = M9
+ mov V = M4
+} ;;
+
+{ .mmi
+ mov M0 = M1
+ mov M15 = M12
+ mov ar.ec = 1
+}
+{ .mmi
+ mov M2 = M11
+ mov M9 = M14
+ mov M4 = M5
+} ;;
+
+{ .mmi
+ mov M1 = M6
+ mov M12 = M13
+ mov U = M3
+}
+{ .mmi
+ mov M11 = M8
+ mov M14 = M7
+ mov M5 = M10
+} ;;
+
+{ .mmi
+ mov M6 = Y
+ mov M13 = X
+ mov M3 = Z
+}
+{ .mmi
+ mov M8 = W
+ mov M7 = V
+ mov M10 = U
+} ;;
+
+.md5_GG_round:
+ GG4(.md5_GG_round)
+
+// The following sequence shuffles the block constants round for the
+// next round:
+//
+// 1 6 11 0 5 10 14 4 9 14 3 8 13 2 7 12
+// 5 8 11 14 1 4 7 10 13 0 3 6 9 12 15 2
+
+{ .mmi
+ mov Z = M0
+ mov Y = M1
+ mov ar.lc = 3
+}
+{ .mmi
+ mov X = M3
+ mov W = M5
+ mov V = M6
+} ;;
+
+{ .mmi
+ mov M0 = M4
+ mov M1 = M11
+ mov ar.ec = 1
+}
+{ .mmi
+ mov M3 = M9
+ mov U = M8
+ mov T = M13
+} ;;
+
+{ .mmi
+ mov M4 = Z
+ mov M11 = Y
+ mov M5 = M7
+}
+{ .mmi
+ mov M6 = M14
+ mov M8 = M12
+ mov M13 = M15
+} ;;
+
+{ .mmi
+ mov M7 = W
+ mov M14 = V
+ nop 0x0
+}
+{ .mmi
+ mov M9 = X
+ mov M12 = U
+ mov M15 = T
+} ;;
+
+.md5_HH_round:
+ HH4(.md5_HH_round)
+
+// The following sequence shuffles the block constants round for the
+// next round:
+//
+// 5 8 11 14 1 4 7 10 13 0 3 6 9 12 15 2
+// 0 7 14 5 12 3 10 1 8 15 6 13 4 11 2 9
+
+{ .mmi
+ mov Z = M0
+ mov Y = M15
+ mov ar.lc = 3
+}
+{ .mmi
+ mov X = M10
+ mov W = M1
+ mov V = M4
+} ;;
+
+{ .mmi
+ mov M0 = M9
+ mov M15 = M12
+ mov ar.ec = 1
+}
+{ .mmi
+ mov M10 = M11
+ mov M1 = M6
+ mov M4 = M13
+} ;;
+
+{ .mmi
+ mov M9 = M14
+ mov M12 = M5
+ mov U = M3
+}
+{ .mmi
+ mov M11 = M8
+ mov M6 = M7
+ mov M13 = M2
+} ;;
+
+{ .mmi
+ mov M14 = Y
+ mov M5 = X
+ mov M3 = Z
+}
+{ .mmi
+ mov M8 = W
+ mov M7 = V
+ mov M2 = U
+} ;;
+
+.md5_II_round:
+ II4(.md5_II_round)
+
+{ .mib
+ nop 0x0
+ nop 0x0
+ br.ret.sptk.many QUICK_RTN
+} ;;
+
+ .endp md5_digest_GHI
+
+#define FFLOADU(a, b, c, d, M, P, N, s, offset) \
+{ .mii ; \
+(pMore) ld4 N = [DPtr], 4 ; \
+ add Z = M, TRound ; \
+ and Y = c, b ; \
+} \
+{ .mmi ; \
+ andcm X = d, b ;; \
+ add Z = Z, a ; \
+ or Y = Y, X ; \
+} ;; \
+{ .mii ; \
+ ld4 TRound = [TPtr], 4 ; \
+ GETLW(W, P, offset) ; \
+ add Z = Z, Y ; \
+} ;; \
+{ .mii ; \
+ or W = W, DTmp ; \
+ dep.z Y = Z, 32, 32 ;; \
+ shrp Z = Z, Y, 64 - s ; \
+} ;; \
+{ .mii ; \
+ add a = Z, b ; \
+ GETRW(DTmp, P, offset) ; \
+ mov P = W ; \
+} ;;
+
+#define FFLOOPU(a, b, c, d, M, P, N, s, offset) \
+{ .mii ; \
+(pMore) ld4 N = [DPtr], 4 ; \
+ add Z = M, TRound ; \
+ and Y = c, b ; \
+} \
+{ .mmi ; \
+ andcm X = d, b ;; \
+ add Z = Z, a ; \
+ or Y = Y, X ; \
+} ;; \
+{ .mii ; \
+ ld4 TRound = [TPtr], 4 ; \
+(pMore) GETLW(W, P, offset) ; \
+ add Z = Z, Y ; \
+} ;; \
+{ .mii ; \
+(pMore) or W = W, DTmp ; \
+ dep.z Y = Z, 32, 32 ;; \
+ shrp Z = Z, Y, 64 - s ; \
+} ;; \
+{ .mii ; \
+ add a = Z, b ; \
+(pMore) GETRW(DTmp, P, offset) ; \
+(pMore) mov P = W ; \
+} \
+{ .mib ; \
+ cmp.ne pMore, p0 = 0, LTrip ; \
+ add LTrip = -1, LTrip ; \
+ br.ctop.sptk.many .md5_FF_round##offset ; \
+} ;;
+
+#define MD5FBLOCK(offset) \
+ .type md5_digest_block##offset, @function ; \
+ \
+ .align 32 ; \
+ .proc md5_digest_block##offset ; \
+ .prologue ; \
+ .altrp QUICK_RTN ; \
+ .body ; \
+md5_digest_block##offset: \
+{ .mmi ; \
+ alloc PFS = ar.pfs, _NINPUTS, _NLOCALS, _NOUTPUT, _NROTATE ; \
+ mov LTrip = 2 ; \
+ mov ar.lc = 3 ; \
+} ;; \
+{ .mii ; \
+ cmp.eq pMore, p0 = r0, r0 ; \
+ mov ar.ec = 0 ; \
+ nop 0x0 ; \
+} ;; \
+ \
+ .pred.rel "mutex", pLoad, pSkip ; \
+.md5_FF_round##offset: \
+ FFLOADU(A, B, C, D, M12, M13, RotateM0, 7, offset) \
+ FFLOADU(D, A, B, C, M13, M14, RotateM1, 12, offset) \
+ FFLOADU(C, D, A, B, M14, M15, RotateM2, 17, offset) \
+ FFLOOPU(B, C, D, A, M15, RotateM0, RotateM3, 22, offset) \
+ \
+{ .mib ; \
+ nop 0x0 ; \
+ nop 0x0 ; \
+ br.cond.sptk.many md5_digest_GHI ; \
+} ;; \
+ .endp md5_digest_block##offset
+
+MD5FBLOCK(1)
+MD5FBLOCK(2)
+MD5FBLOCK(3)
+
+ .align 64
+ .type md5_constants, @object
+md5_constants:
+.md5_tbl_data_order: // To ensure little-endian data
+ // order, code as bytes.
+ data1 0x78, 0xa4, 0x6a, 0xd7 // 0
+ data1 0x56, 0xb7, 0xc7, 0xe8 // 1
+ data1 0xdb, 0x70, 0x20, 0x24 // 2
+ data1 0xee, 0xce, 0xbd, 0xc1 // 3
+ data1 0xaf, 0x0f, 0x7c, 0xf5 // 4
+ data1 0x2a, 0xc6, 0x87, 0x47 // 5
+ data1 0x13, 0x46, 0x30, 0xa8 // 6
+ data1 0x01, 0x95, 0x46, 0xfd // 7
+ data1 0xd8, 0x98, 0x80, 0x69 // 8
+ data1 0xaf, 0xf7, 0x44, 0x8b // 9
+ data1 0xb1, 0x5b, 0xff, 0xff // 10
+ data1 0xbe, 0xd7, 0x5c, 0x89 // 11
+ data1 0x22, 0x11, 0x90, 0x6b // 12
+ data1 0x93, 0x71, 0x98, 0xfd // 13
+ data1 0x8e, 0x43, 0x79, 0xa6 // 14
+ data1 0x21, 0x08, 0xb4, 0x49 // 15
+ data1 0x62, 0x25, 0x1e, 0xf6 // 16
+ data1 0x40, 0xb3, 0x40, 0xc0 // 17
+ data1 0x51, 0x5a, 0x5e, 0x26 // 18
+ data1 0xaa, 0xc7, 0xb6, 0xe9 // 19
+ data1 0x5d, 0x10, 0x2f, 0xd6 // 20
+ data1 0x53, 0x14, 0x44, 0x02 // 21
+ data1 0x81, 0xe6, 0xa1, 0xd8 // 22
+ data1 0xc8, 0xfb, 0xd3, 0xe7 // 23
+ data1 0xe6, 0xcd, 0xe1, 0x21 // 24
+ data1 0xd6, 0x07, 0x37, 0xc3 // 25
+ data1 0x87, 0x0d, 0xd5, 0xf4 // 26
+ data1 0xed, 0x14, 0x5a, 0x45 // 27
+ data1 0x05, 0xe9, 0xe3, 0xa9 // 28
+ data1 0xf8, 0xa3, 0xef, 0xfc // 29
+ data1 0xd9, 0x02, 0x6f, 0x67 // 30
+ data1 0x8a, 0x4c, 0x2a, 0x8d // 31
+ data1 0x42, 0x39, 0xfa, 0xff // 32
+ data1 0x81, 0xf6, 0x71, 0x87 // 33
+ data1 0x22, 0x61, 0x9d, 0x6d // 34
+ data1 0x0c, 0x38, 0xe5, 0xfd // 35
+ data1 0x44, 0xea, 0xbe, 0xa4 // 36
+ data1 0xa9, 0xcf, 0xde, 0x4b // 37
+ data1 0x60, 0x4b, 0xbb, 0xf6 // 38
+ data1 0x70, 0xbc, 0xbf, 0xbe // 39
+ data1 0xc6, 0x7e, 0x9b, 0x28 // 40
+ data1 0xfa, 0x27, 0xa1, 0xea // 41
+ data1 0x85, 0x30, 0xef, 0xd4 // 42
+ data1 0x05, 0x1d, 0x88, 0x04 // 43
+ data1 0x39, 0xd0, 0xd4, 0xd9 // 44
+ data1 0xe5, 0x99, 0xdb, 0xe6 // 45
+ data1 0xf8, 0x7c, 0xa2, 0x1f // 46
+ data1 0x65, 0x56, 0xac, 0xc4 // 47
+ data1 0x44, 0x22, 0x29, 0xf4 // 48
+ data1 0x97, 0xff, 0x2a, 0x43 // 49
+ data1 0xa7, 0x23, 0x94, 0xab // 50
+ data1 0x39, 0xa0, 0x93, 0xfc // 51
+ data1 0xc3, 0x59, 0x5b, 0x65 // 52
+ data1 0x92, 0xcc, 0x0c, 0x8f // 53
+ data1 0x7d, 0xf4, 0xef, 0xff // 54
+ data1 0xd1, 0x5d, 0x84, 0x85 // 55
+ data1 0x4f, 0x7e, 0xa8, 0x6f // 56
+ data1 0xe0, 0xe6, 0x2c, 0xfe // 57
+ data1 0x14, 0x43, 0x01, 0xa3 // 58
+ data1 0xa1, 0x11, 0x08, 0x4e // 59
+ data1 0x82, 0x7e, 0x53, 0xf7 // 60
+ data1 0x35, 0xf2, 0x3a, 0xbd // 61
+ data1 0xbb, 0xd2, 0xd7, 0x2a // 62
+ data1 0x91, 0xd3, 0x86, 0xeb // 63
+.size md5_constants#,64*4
diff --git a/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-x86_64.S b/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-x86_64.S
new file mode 100644
index 00000000..235d5e4e
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-x86_64.S
@@ -0,0 +1,668 @@
+.text
+.align 16
+
+.globl md5_block_asm_data_order
+.type md5_block_asm_data_order,@function
+md5_block_asm_data_order:
+ pushq %rbp
+ pushq %rbx
+ pushq %r12
+ pushq %r14
+ pushq %r15
+.Lprologue:
+
+
+
+
+ movq %rdi,%rbp
+ shlq $6,%rdx
+ leaq (%rsi,%rdx,1),%rdi
+ movl 0(%rbp),%eax
+ movl 4(%rbp),%ebx
+ movl 8(%rbp),%ecx
+ movl 12(%rbp),%edx
+
+
+
+
+
+
+
+ cmpq %rdi,%rsi
+ je .Lend
+
+
+.Lloop:
+ movl %eax,%r8d
+ movl %ebx,%r9d
+ movl %ecx,%r14d
+ movl %edx,%r15d
+ movl 0(%rsi),%r10d
+ movl %edx,%r11d
+ xorl %ecx,%r11d
+ leal -680876936(%rax,%r10,1),%eax
+ andl %ebx,%r11d
+ xorl %edx,%r11d
+ movl 4(%rsi),%r10d
+ addl %r11d,%eax
+ roll $7,%eax
+ movl %ecx,%r11d
+ addl %ebx,%eax
+ xorl %ebx,%r11d
+ leal -389564586(%rdx,%r10,1),%edx
+ andl %eax,%r11d
+ xorl %ecx,%r11d
+ movl 8(%rsi),%r10d
+ addl %r11d,%edx
+ roll $12,%edx
+ movl %ebx,%r11d
+ addl %eax,%edx
+ xorl %eax,%r11d
+ leal 606105819(%rcx,%r10,1),%ecx
+ andl %edx,%r11d
+ xorl %ebx,%r11d
+ movl 12(%rsi),%r10d
+ addl %r11d,%ecx
+ roll $17,%ecx
+ movl %eax,%r11d
+ addl %edx,%ecx
+ xorl %edx,%r11d
+ leal -1044525330(%rbx,%r10,1),%ebx
+ andl %ecx,%r11d
+ xorl %eax,%r11d
+ movl 16(%rsi),%r10d
+ addl %r11d,%ebx
+ roll $22,%ebx
+ movl %edx,%r11d
+ addl %ecx,%ebx
+ xorl %ecx,%r11d
+ leal -176418897(%rax,%r10,1),%eax
+ andl %ebx,%r11d
+ xorl %edx,%r11d
+ movl 20(%rsi),%r10d
+ addl %r11d,%eax
+ roll $7,%eax
+ movl %ecx,%r11d
+ addl %ebx,%eax
+ xorl %ebx,%r11d
+ leal 1200080426(%rdx,%r10,1),%edx
+ andl %eax,%r11d
+ xorl %ecx,%r11d
+ movl 24(%rsi),%r10d
+ addl %r11d,%edx
+ roll $12,%edx
+ movl %ebx,%r11d
+ addl %eax,%edx
+ xorl %eax,%r11d
+ leal -1473231341(%rcx,%r10,1),%ecx
+ andl %edx,%r11d
+ xorl %ebx,%r11d
+ movl 28(%rsi),%r10d
+ addl %r11d,%ecx
+ roll $17,%ecx
+ movl %eax,%r11d
+ addl %edx,%ecx
+ xorl %edx,%r11d
+ leal -45705983(%rbx,%r10,1),%ebx
+ andl %ecx,%r11d
+ xorl %eax,%r11d
+ movl 32(%rsi),%r10d
+ addl %r11d,%ebx
+ roll $22,%ebx
+ movl %edx,%r11d
+ addl %ecx,%ebx
+ xorl %ecx,%r11d
+ leal 1770035416(%rax,%r10,1),%eax
+ andl %ebx,%r11d
+ xorl %edx,%r11d
+ movl 36(%rsi),%r10d
+ addl %r11d,%eax
+ roll $7,%eax
+ movl %ecx,%r11d
+ addl %ebx,%eax
+ xorl %ebx,%r11d
+ leal -1958414417(%rdx,%r10,1),%edx
+ andl %eax,%r11d
+ xorl %ecx,%r11d
+ movl 40(%rsi),%r10d
+ addl %r11d,%edx
+ roll $12,%edx
+ movl %ebx,%r11d
+ addl %eax,%edx
+ xorl %eax,%r11d
+ leal -42063(%rcx,%r10,1),%ecx
+ andl %edx,%r11d
+ xorl %ebx,%r11d
+ movl 44(%rsi),%r10d
+ addl %r11d,%ecx
+ roll $17,%ecx
+ movl %eax,%r11d
+ addl %edx,%ecx
+ xorl %edx,%r11d
+ leal -1990404162(%rbx,%r10,1),%ebx
+ andl %ecx,%r11d
+ xorl %eax,%r11d
+ movl 48(%rsi),%r10d
+ addl %r11d,%ebx
+ roll $22,%ebx
+ movl %edx,%r11d
+ addl %ecx,%ebx
+ xorl %ecx,%r11d
+ leal 1804603682(%rax,%r10,1),%eax
+ andl %ebx,%r11d
+ xorl %edx,%r11d
+ movl 52(%rsi),%r10d
+ addl %r11d,%eax
+ roll $7,%eax
+ movl %ecx,%r11d
+ addl %ebx,%eax
+ xorl %ebx,%r11d
+ leal -40341101(%rdx,%r10,1),%edx
+ andl %eax,%r11d
+ xorl %ecx,%r11d
+ movl 56(%rsi),%r10d
+ addl %r11d,%edx
+ roll $12,%edx
+ movl %ebx,%r11d
+ addl %eax,%edx
+ xorl %eax,%r11d
+ leal -1502002290(%rcx,%r10,1),%ecx
+ andl %edx,%r11d
+ xorl %ebx,%r11d
+ movl 60(%rsi),%r10d
+ addl %r11d,%ecx
+ roll $17,%ecx
+ movl %eax,%r11d
+ addl %edx,%ecx
+ xorl %edx,%r11d
+ leal 1236535329(%rbx,%r10,1),%ebx
+ andl %ecx,%r11d
+ xorl %eax,%r11d
+ movl 0(%rsi),%r10d
+ addl %r11d,%ebx
+ roll $22,%ebx
+ movl %edx,%r11d
+ addl %ecx,%ebx
+ movl 4(%rsi),%r10d
+ movl %edx,%r11d
+ movl %edx,%r12d
+ notl %r11d
+ leal -165796510(%rax,%r10,1),%eax
+ andl %ebx,%r12d
+ andl %ecx,%r11d
+ movl 24(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %ecx,%r11d
+ addl %r12d,%eax
+ movl %ecx,%r12d
+ roll $5,%eax
+ addl %ebx,%eax
+ notl %r11d
+ leal -1069501632(%rdx,%r10,1),%edx
+ andl %eax,%r12d
+ andl %ebx,%r11d
+ movl 44(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %ebx,%r11d
+ addl %r12d,%edx
+ movl %ebx,%r12d
+ roll $9,%edx
+ addl %eax,%edx
+ notl %r11d
+ leal 643717713(%rcx,%r10,1),%ecx
+ andl %edx,%r12d
+ andl %eax,%r11d
+ movl 0(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %eax,%r11d
+ addl %r12d,%ecx
+ movl %eax,%r12d
+ roll $14,%ecx
+ addl %edx,%ecx
+ notl %r11d
+ leal -373897302(%rbx,%r10,1),%ebx
+ andl %ecx,%r12d
+ andl %edx,%r11d
+ movl 20(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %edx,%r11d
+ addl %r12d,%ebx
+ movl %edx,%r12d
+ roll $20,%ebx
+ addl %ecx,%ebx
+ notl %r11d
+ leal -701558691(%rax,%r10,1),%eax
+ andl %ebx,%r12d
+ andl %ecx,%r11d
+ movl 40(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %ecx,%r11d
+ addl %r12d,%eax
+ movl %ecx,%r12d
+ roll $5,%eax
+ addl %ebx,%eax
+ notl %r11d
+ leal 38016083(%rdx,%r10,1),%edx
+ andl %eax,%r12d
+ andl %ebx,%r11d
+ movl 60(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %ebx,%r11d
+ addl %r12d,%edx
+ movl %ebx,%r12d
+ roll $9,%edx
+ addl %eax,%edx
+ notl %r11d
+ leal -660478335(%rcx,%r10,1),%ecx
+ andl %edx,%r12d
+ andl %eax,%r11d
+ movl 16(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %eax,%r11d
+ addl %r12d,%ecx
+ movl %eax,%r12d
+ roll $14,%ecx
+ addl %edx,%ecx
+ notl %r11d
+ leal -405537848(%rbx,%r10,1),%ebx
+ andl %ecx,%r12d
+ andl %edx,%r11d
+ movl 36(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %edx,%r11d
+ addl %r12d,%ebx
+ movl %edx,%r12d
+ roll $20,%ebx
+ addl %ecx,%ebx
+ notl %r11d
+ leal 568446438(%rax,%r10,1),%eax
+ andl %ebx,%r12d
+ andl %ecx,%r11d
+ movl 56(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %ecx,%r11d
+ addl %r12d,%eax
+ movl %ecx,%r12d
+ roll $5,%eax
+ addl %ebx,%eax
+ notl %r11d
+ leal -1019803690(%rdx,%r10,1),%edx
+ andl %eax,%r12d
+ andl %ebx,%r11d
+ movl 12(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %ebx,%r11d
+ addl %r12d,%edx
+ movl %ebx,%r12d
+ roll $9,%edx
+ addl %eax,%edx
+ notl %r11d
+ leal -187363961(%rcx,%r10,1),%ecx
+ andl %edx,%r12d
+ andl %eax,%r11d
+ movl 32(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %eax,%r11d
+ addl %r12d,%ecx
+ movl %eax,%r12d
+ roll $14,%ecx
+ addl %edx,%ecx
+ notl %r11d
+ leal 1163531501(%rbx,%r10,1),%ebx
+ andl %ecx,%r12d
+ andl %edx,%r11d
+ movl 52(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %edx,%r11d
+ addl %r12d,%ebx
+ movl %edx,%r12d
+ roll $20,%ebx
+ addl %ecx,%ebx
+ notl %r11d
+ leal -1444681467(%rax,%r10,1),%eax
+ andl %ebx,%r12d
+ andl %ecx,%r11d
+ movl 8(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %ecx,%r11d
+ addl %r12d,%eax
+ movl %ecx,%r12d
+ roll $5,%eax
+ addl %ebx,%eax
+ notl %r11d
+ leal -51403784(%rdx,%r10,1),%edx
+ andl %eax,%r12d
+ andl %ebx,%r11d
+ movl 28(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %ebx,%r11d
+ addl %r12d,%edx
+ movl %ebx,%r12d
+ roll $9,%edx
+ addl %eax,%edx
+ notl %r11d
+ leal 1735328473(%rcx,%r10,1),%ecx
+ andl %edx,%r12d
+ andl %eax,%r11d
+ movl 48(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %eax,%r11d
+ addl %r12d,%ecx
+ movl %eax,%r12d
+ roll $14,%ecx
+ addl %edx,%ecx
+ notl %r11d
+ leal -1926607734(%rbx,%r10,1),%ebx
+ andl %ecx,%r12d
+ andl %edx,%r11d
+ movl 0(%rsi),%r10d
+ orl %r11d,%r12d
+ movl %edx,%r11d
+ addl %r12d,%ebx
+ movl %edx,%r12d
+ roll $20,%ebx
+ addl %ecx,%ebx
+ movl 20(%rsi),%r10d
+ movl %ecx,%r11d
+ leal -378558(%rax,%r10,1),%eax
+ movl 32(%rsi),%r10d
+ xorl %edx,%r11d
+ xorl %ebx,%r11d
+ addl %r11d,%eax
+ roll $4,%eax
+ movl %ebx,%r11d
+ addl %ebx,%eax
+ leal -2022574463(%rdx,%r10,1),%edx
+ movl 44(%rsi),%r10d
+ xorl %ecx,%r11d
+ xorl %eax,%r11d
+ addl %r11d,%edx
+ roll $11,%edx
+ movl %eax,%r11d
+ addl %eax,%edx
+ leal 1839030562(%rcx,%r10,1),%ecx
+ movl 56(%rsi),%r10d
+ xorl %ebx,%r11d
+ xorl %edx,%r11d
+ addl %r11d,%ecx
+ roll $16,%ecx
+ movl %edx,%r11d
+ addl %edx,%ecx
+ leal -35309556(%rbx,%r10,1),%ebx
+ movl 4(%rsi),%r10d
+ xorl %eax,%r11d
+ xorl %ecx,%r11d
+ addl %r11d,%ebx
+ roll $23,%ebx
+ movl %ecx,%r11d
+ addl %ecx,%ebx
+ leal -1530992060(%rax,%r10,1),%eax
+ movl 16(%rsi),%r10d
+ xorl %edx,%r11d
+ xorl %ebx,%r11d
+ addl %r11d,%eax
+ roll $4,%eax
+ movl %ebx,%r11d
+ addl %ebx,%eax
+ leal 1272893353(%rdx,%r10,1),%edx
+ movl 28(%rsi),%r10d
+ xorl %ecx,%r11d
+ xorl %eax,%r11d
+ addl %r11d,%edx
+ roll $11,%edx
+ movl %eax,%r11d
+ addl %eax,%edx
+ leal -155497632(%rcx,%r10,1),%ecx
+ movl 40(%rsi),%r10d
+ xorl %ebx,%r11d
+ xorl %edx,%r11d
+ addl %r11d,%ecx
+ roll $16,%ecx
+ movl %edx,%r11d
+ addl %edx,%ecx
+ leal -1094730640(%rbx,%r10,1),%ebx
+ movl 52(%rsi),%r10d
+ xorl %eax,%r11d
+ xorl %ecx,%r11d
+ addl %r11d,%ebx
+ roll $23,%ebx
+ movl %ecx,%r11d
+ addl %ecx,%ebx
+ leal 681279174(%rax,%r10,1),%eax
+ movl 0(%rsi),%r10d
+ xorl %edx,%r11d
+ xorl %ebx,%r11d
+ addl %r11d,%eax
+ roll $4,%eax
+ movl %ebx,%r11d
+ addl %ebx,%eax
+ leal -358537222(%rdx,%r10,1),%edx
+ movl 12(%rsi),%r10d
+ xorl %ecx,%r11d
+ xorl %eax,%r11d
+ addl %r11d,%edx
+ roll $11,%edx
+ movl %eax,%r11d
+ addl %eax,%edx
+ leal -722521979(%rcx,%r10,1),%ecx
+ movl 24(%rsi),%r10d
+ xorl %ebx,%r11d
+ xorl %edx,%r11d
+ addl %r11d,%ecx
+ roll $16,%ecx
+ movl %edx,%r11d
+ addl %edx,%ecx
+ leal 76029189(%rbx,%r10,1),%ebx
+ movl 36(%rsi),%r10d
+ xorl %eax,%r11d
+ xorl %ecx,%r11d
+ addl %r11d,%ebx
+ roll $23,%ebx
+ movl %ecx,%r11d
+ addl %ecx,%ebx
+ leal -640364487(%rax,%r10,1),%eax
+ movl 48(%rsi),%r10d
+ xorl %edx,%r11d
+ xorl %ebx,%r11d
+ addl %r11d,%eax
+ roll $4,%eax
+ movl %ebx,%r11d
+ addl %ebx,%eax
+ leal -421815835(%rdx,%r10,1),%edx
+ movl 60(%rsi),%r10d
+ xorl %ecx,%r11d
+ xorl %eax,%r11d
+ addl %r11d,%edx
+ roll $11,%edx
+ movl %eax,%r11d
+ addl %eax,%edx
+ leal 530742520(%rcx,%r10,1),%ecx
+ movl 8(%rsi),%r10d
+ xorl %ebx,%r11d
+ xorl %edx,%r11d
+ addl %r11d,%ecx
+ roll $16,%ecx
+ movl %edx,%r11d
+ addl %edx,%ecx
+ leal -995338651(%rbx,%r10,1),%ebx
+ movl 0(%rsi),%r10d
+ xorl %eax,%r11d
+ xorl %ecx,%r11d
+ addl %r11d,%ebx
+ roll $23,%ebx
+ movl %ecx,%r11d
+ addl %ecx,%ebx
+ movl 0(%rsi),%r10d
+ movl $4294967295,%r11d
+ xorl %edx,%r11d
+ leal -198630844(%rax,%r10,1),%eax
+ orl %ebx,%r11d
+ xorl %ecx,%r11d
+ addl %r11d,%eax
+ movl 28(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $6,%eax
+ xorl %ecx,%r11d
+ addl %ebx,%eax
+ leal 1126891415(%rdx,%r10,1),%edx
+ orl %eax,%r11d
+ xorl %ebx,%r11d
+ addl %r11d,%edx
+ movl 56(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $10,%edx
+ xorl %ebx,%r11d
+ addl %eax,%edx
+ leal -1416354905(%rcx,%r10,1),%ecx
+ orl %edx,%r11d
+ xorl %eax,%r11d
+ addl %r11d,%ecx
+ movl 20(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $15,%ecx
+ xorl %eax,%r11d
+ addl %edx,%ecx
+ leal -57434055(%rbx,%r10,1),%ebx
+ orl %ecx,%r11d
+ xorl %edx,%r11d
+ addl %r11d,%ebx
+ movl 48(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $21,%ebx
+ xorl %edx,%r11d
+ addl %ecx,%ebx
+ leal 1700485571(%rax,%r10,1),%eax
+ orl %ebx,%r11d
+ xorl %ecx,%r11d
+ addl %r11d,%eax
+ movl 12(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $6,%eax
+ xorl %ecx,%r11d
+ addl %ebx,%eax
+ leal -1894986606(%rdx,%r10,1),%edx
+ orl %eax,%r11d
+ xorl %ebx,%r11d
+ addl %r11d,%edx
+ movl 40(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $10,%edx
+ xorl %ebx,%r11d
+ addl %eax,%edx
+ leal -1051523(%rcx,%r10,1),%ecx
+ orl %edx,%r11d
+ xorl %eax,%r11d
+ addl %r11d,%ecx
+ movl 4(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $15,%ecx
+ xorl %eax,%r11d
+ addl %edx,%ecx
+ leal -2054922799(%rbx,%r10,1),%ebx
+ orl %ecx,%r11d
+ xorl %edx,%r11d
+ addl %r11d,%ebx
+ movl 32(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $21,%ebx
+ xorl %edx,%r11d
+ addl %ecx,%ebx
+ leal 1873313359(%rax,%r10,1),%eax
+ orl %ebx,%r11d
+ xorl %ecx,%r11d
+ addl %r11d,%eax
+ movl 60(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $6,%eax
+ xorl %ecx,%r11d
+ addl %ebx,%eax
+ leal -30611744(%rdx,%r10,1),%edx
+ orl %eax,%r11d
+ xorl %ebx,%r11d
+ addl %r11d,%edx
+ movl 24(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $10,%edx
+ xorl %ebx,%r11d
+ addl %eax,%edx
+ leal -1560198380(%rcx,%r10,1),%ecx
+ orl %edx,%r11d
+ xorl %eax,%r11d
+ addl %r11d,%ecx
+ movl 52(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $15,%ecx
+ xorl %eax,%r11d
+ addl %edx,%ecx
+ leal 1309151649(%rbx,%r10,1),%ebx
+ orl %ecx,%r11d
+ xorl %edx,%r11d
+ addl %r11d,%ebx
+ movl 16(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $21,%ebx
+ xorl %edx,%r11d
+ addl %ecx,%ebx
+ leal -145523070(%rax,%r10,1),%eax
+ orl %ebx,%r11d
+ xorl %ecx,%r11d
+ addl %r11d,%eax
+ movl 44(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $6,%eax
+ xorl %ecx,%r11d
+ addl %ebx,%eax
+ leal -1120210379(%rdx,%r10,1),%edx
+ orl %eax,%r11d
+ xorl %ebx,%r11d
+ addl %r11d,%edx
+ movl 8(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $10,%edx
+ xorl %ebx,%r11d
+ addl %eax,%edx
+ leal 718787259(%rcx,%r10,1),%ecx
+ orl %edx,%r11d
+ xorl %eax,%r11d
+ addl %r11d,%ecx
+ movl 36(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $15,%ecx
+ xorl %eax,%r11d
+ addl %edx,%ecx
+ leal -343485551(%rbx,%r10,1),%ebx
+ orl %ecx,%r11d
+ xorl %edx,%r11d
+ addl %r11d,%ebx
+ movl 0(%rsi),%r10d
+ movl $4294967295,%r11d
+ roll $21,%ebx
+ xorl %edx,%r11d
+ addl %ecx,%ebx
+
+ addl %r8d,%eax
+ addl %r9d,%ebx
+ addl %r14d,%ecx
+ addl %r15d,%edx
+
+
+ addq $64,%rsi
+ cmpq %rdi,%rsi
+ jb .Lloop
+
+
+.Lend:
+ movl %eax,0(%rbp)
+ movl %ebx,4(%rbp)
+ movl %ecx,8(%rbp)
+ movl %edx,12(%rbp)
+
+ movq (%rsp),%r15
+ movq 8(%rsp),%r14
+ movq 16(%rsp),%r12
+ movq 24(%rsp),%rbx
+ movq 32(%rsp),%rbp
+ addq $40,%rsp
+.Lepilogue:
+ .byte 0xf3,0xc3
+.size md5_block_asm_data_order,.-md5_block_asm_data_order
diff --git a/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-x86_64.pl b/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-x86_64.pl
new file mode 100755
index 00000000..f11224d1
--- /dev/null
+++ b/ics-openvpn-stripped/main/openssl/crypto/md5/asm/md5-x86_64.pl
@@ -0,0 +1,370 @@
+#!/usr/bin/perl -w
+#
+# MD5 optimized for AMD64.
+#
+# Author: Marc Bevand <bevand_m (at) epita.fr>
+# Licence: I hereby disclaim the copyright on this code and place it
+# in the public domain.
+#
+
+use strict;
+
+my $code;
+
+# round1_step() does:
+# dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s)
+# %r10d = X[k_next]
+# %r11d = z' (copy of z for the next step)
+# Each round1_step() takes about 5.3 clocks (9 instructions, 1.7 IPC)
+sub round1_step
+{
+ my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+ $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
+ $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
+ $code .= <<EOF;
+ xor $y, %r11d /* y ^ ... */
+ lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
+ and $x, %r11d /* x & ... */
+ xor $z, %r11d /* z ^ ... */
+ mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
+ add %r11d, $dst /* dst += ... */
+ rol \$$s, $dst /* dst <<< s */
+ mov $y, %r11d /* (NEXT STEP) z' = $y */
+ add $x, $dst /* dst += x */
+EOF
+}
+
+# round2_step() does:
+# dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s)
+# %r10d = X[k_next]
+# %r11d = z' (copy of z for the next step)
+# %r12d = z' (copy of z for the next step)
+# Each round2_step() takes about 5.4 clocks (11 instructions, 2.0 IPC)
+sub round2_step
+{
+ my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+ $code .= " mov 1*4(%rsi), %r10d /* (NEXT STEP) X[1] */\n" if ($pos == -1);
+ $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
+ $code .= " mov %edx, %r12d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
+ $code .= <<EOF;
+ not %r11d /* not z */
+ lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
+ and $x, %r12d /* x & z */
+ and $y, %r11d /* y & (not z) */
+ mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
+ or %r11d, %r12d /* (y & (not z)) | (x & z) */
+ mov $y, %r11d /* (NEXT STEP) z' = $y */
+ add %r12d, $dst /* dst += ... */
+ mov $y, %r12d /* (NEXT STEP) z' = $y */
+ rol \$$s, $dst /* dst <<< s */
+ add $x, $dst /* dst += x */
+EOF
+}
+
+# round3_step() does:
+# dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s)
+# %r10d = X[k_next]
+# %r11d = y' (copy of y for the next step)
+# Each round3_step() takes about 4.2 clocks (8 instructions, 1.9 IPC)
+sub round3_step
+{
+ my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+ $code .= " mov 5*4(%rsi), %r10d /* (NEXT STEP) X[5] */\n" if ($pos == -1);
+ $code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
+ $code .= <<EOF;
+ lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
+ mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
+ xor $z, %r11d /* z ^ ... */
+ xor $x, %r11d /* x ^ ... */
+ add %r11d, $dst /* dst += ... */
+ rol \$$s, $dst /* dst <<< s */
+ mov $x, %r11d /* (NEXT STEP) y' = $x */
+ add $x, $dst /* dst += x */
+EOF
+}
+
+# round4_step() does:
+# dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s)
+# %r10d = X[k_next]
+# %r11d = not z' (copy of not z for the next step)
+# Each round4_step() takes about 5.2 clocks (9 instructions, 1.7 IPC)
+sub round4_step
+{
+ my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+ $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
+ $code .= " mov \$0xffffffff, %r11d\n" if ($pos == -1);
+ $code .= " xor %edx, %r11d /* (NEXT STEP) not z' = not %edx*/\n"
+ if ($pos == -1);
+ $code .= <<EOF;
+ lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
+ or $x, %r11d /* x | ... */
+ xor $y, %r11d /* y ^ ... */
+ add %r11d, $dst /* dst += ... */
+ mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
+ mov \$0xffffffff, %r11d
+ rol \$$s, $dst /* dst <<< s */
+ xor $y, %r11d /* (NEXT STEP) not z' = not $y */
+ add $x, $dst /* dst += x */
+EOF
+}
+
+my $flavour = shift;
+my $output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+no warnings qw(uninitialized);
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+$code .= <<EOF;
+.text
+.align 16
+
+.globl md5_block_asm_data_order
+.type md5_block_asm_data_order,\@function,3
+md5_block_asm_data_order:
+ push %rbp
+ push %rbx
+ push %r12
+ push %r14
+ push %r15
+.Lprologue:
+
+ # rdi = arg #1 (ctx, MD5_CTX pointer)
+ # rsi = arg #2 (ptr, data pointer)
+ # rdx = arg #3 (nbr, number of 16-word blocks to process)
+ mov %rdi, %rbp # rbp = ctx
+ shl \$6, %rdx # rdx = nbr in bytes
+ lea (%rsi,%rdx), %rdi # rdi = end
+ mov 0*4(%rbp), %eax # eax = ctx->A
+ mov 1*4(%rbp), %ebx # ebx = ctx->B
+ mov 2*4(%rbp), %ecx # ecx = ctx->C
+ mov 3*4(%rbp), %edx # edx = ctx->D
+ # end is 'rdi'
+ # ptr is 'rsi'
+ # A is 'eax'
+ # B is 'ebx'
+ # C is 'ecx'
+ # D is 'edx'
+
+ cmp %rdi, %rsi # cmp end with ptr
+ je .Lend # jmp if ptr == end
+
+ # BEGIN of loop over 16-word blocks
+.Lloop: # save old values of A, B, C, D
+ mov %eax, %r8d
+ mov %ebx, %r9d
+ mov %ecx, %r14d
+ mov %edx, %r15d
+EOF
+round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17');
+round1_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x49b40821','22');
+
+round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14');
+round2_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x8d2a4c8a','20');
+
+round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16');
+round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23');
+
+round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15');
+round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21');
+$code .= <<EOF;
+ # add old values of A, B, C, D
+ add %r8d, %eax
+ add %r9d, %ebx
+ add %r14d, %ecx
+ add %r15d, %edx
+
+ # loop control
+ add \$64, %rsi # ptr += 64
+ cmp %rdi, %rsi # cmp end with ptr
+ jb .Lloop # jmp if ptr < end
+ # END of loop over 16-word blocks
+
+.Lend:
+ mov %eax, 0*4(%rbp) # ctx->A = A
+ mov %ebx, 1*4(%rbp) # ctx->B = B
+ mov %ecx, 2*4(%rbp) # ctx->C = C
+ mov %edx, 3*4(%rbp) # ctx->D = D
+
+ mov (%rsp),%r15
+ mov 8(%rsp),%r14
+ mov 16(%rsp),%r12
+ mov 24(%rsp),%rbx
+ mov 32(%rsp),%rbp
+ add \$40,%rsp
+.Lepilogue:
+ ret
+.size md5_block_asm_data_order,.-md5_block_asm_data_order
+EOF
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+my $rec="%rcx";
+my $frame="%rdx";
+my $context="%r8";
+my $disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lprologue(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lprologue
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ lea .Lepilogue(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=.Lepilogue
+ jae .Lin_prologue
+
+ lea 40(%rax),%rax
+
+ mov -8(%rax),%rbp
+ mov -16(%rax),%rbx
+ mov -24(%rax),%r12
+ mov -32(%rax),%r14
+ mov -40(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lin_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size se_handler,.-se_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_md5_block_asm_data_order
+ .rva .LSEH_end_md5_block_asm_data_order
+ .rva .LSEH_info_md5_block_asm_data_order
+
+.section .xdata
+.align 8
+.LSEH_info_md5_block_asm_data_order:
+ .byte 9,0,0,0
+ .rva se_handler
+___
+}
+
+print $code;
+
+close STDOUT;