From 3e121542d8b7ab5201c47bbd3ba5611a23c54759 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Parm=C3=A9nides=20GV?= Date: Wed, 11 Jun 2014 11:56:59 +0200 Subject: Correctly connects to millipede. Location keyword on android.cfg isn't supported, EIP corresponding code has been commented out. I think we should support it in ics-openvpn, so that we can show the location instead of the server name. I've updated all opensssl, openvpn, etc. subprojects from rev 813 of ics-openvpn, and jni too. --- app/openssl/crypto/sha/asm/sha1-586.S | 2639 ++++++++++++++++++++++++ app/openssl/crypto/sha/asm/sha1-586.pl | 1107 +++++++++- app/openssl/crypto/sha/asm/sha1-alpha.pl | 322 +++ app/openssl/crypto/sha/asm/sha1-armv4-large.S | 1 + app/openssl/crypto/sha/asm/sha1-armv4-large.pl | 41 +- app/openssl/crypto/sha/asm/sha1-armv4-large.s | 209 +- app/openssl/crypto/sha/asm/sha1-ia64.pl | 193 +- app/openssl/crypto/sha/asm/sha1-mips.S | 1664 +++++++++++++++ app/openssl/crypto/sha/asm/sha1-mips.pl | 354 ++++ app/openssl/crypto/sha/asm/sha1-parisc.pl | 260 +++ app/openssl/crypto/sha/asm/sha1-ppc.pl | 83 +- app/openssl/crypto/sha/asm/sha1-s390x.pl | 50 +- app/openssl/crypto/sha/asm/sha1-sparcv9a.pl | 2 +- app/openssl/crypto/sha/asm/sha1-x86_64.S | 2486 ++++++++++++++++++++++ app/openssl/crypto/sha/asm/sha1-x86_64.pl | 1188 +++++++++-- app/openssl/crypto/sha/asm/sha256-586.S | 258 +++ app/openssl/crypto/sha/asm/sha256-586.pl | 54 +- app/openssl/crypto/sha/asm/sha256-armv4.S | 1 + app/openssl/crypto/sha/asm/sha256-armv4.pl | 55 +- app/openssl/crypto/sha/asm/sha256-armv4.s | 858 ++++++-- app/openssl/crypto/sha/asm/sha256-mips.S | 1998 ++++++++++++++++++ app/openssl/crypto/sha/asm/sha256-x86_64.S | 1778 ++++++++++++++++ app/openssl/crypto/sha/asm/sha512-586.S | 836 ++++++++ app/openssl/crypto/sha/asm/sha512-586.pl | 18 +- app/openssl/crypto/sha/asm/sha512-armv4.S | 1 + app/openssl/crypto/sha/asm/sha512-armv4.pl | 357 +++- app/openssl/crypto/sha/asm/sha512-armv4.s | 1635 +++++++++++++-- app/openssl/crypto/sha/asm/sha512-mips.pl | 455 ++++ app/openssl/crypto/sha/asm/sha512-parisc.pl | 793 +++++++ app/openssl/crypto/sha/asm/sha512-ppc.pl | 114 +- app/openssl/crypto/sha/asm/sha512-s390x.pl | 63 +- app/openssl/crypto/sha/asm/sha512-sparcv9.pl | 6 +- app/openssl/crypto/sha/asm/sha512-x86_64.S | 1802 ++++++++++++++++ app/openssl/crypto/sha/asm/sha512-x86_64.pl | 89 +- 34 files changed, 20720 insertions(+), 1050 deletions(-) create mode 100644 app/openssl/crypto/sha/asm/sha1-586.S create mode 100644 app/openssl/crypto/sha/asm/sha1-alpha.pl create mode 120000 app/openssl/crypto/sha/asm/sha1-armv4-large.S create mode 100644 app/openssl/crypto/sha/asm/sha1-mips.S create mode 100644 app/openssl/crypto/sha/asm/sha1-mips.pl create mode 100644 app/openssl/crypto/sha/asm/sha1-parisc.pl create mode 100644 app/openssl/crypto/sha/asm/sha1-x86_64.S create mode 100644 app/openssl/crypto/sha/asm/sha256-586.S create mode 120000 app/openssl/crypto/sha/asm/sha256-armv4.S create mode 100644 app/openssl/crypto/sha/asm/sha256-mips.S create mode 100644 app/openssl/crypto/sha/asm/sha256-x86_64.S create mode 100644 app/openssl/crypto/sha/asm/sha512-586.S create mode 120000 app/openssl/crypto/sha/asm/sha512-armv4.S create mode 100644 app/openssl/crypto/sha/asm/sha512-mips.pl create mode 100755 app/openssl/crypto/sha/asm/sha512-parisc.pl create mode 100644 app/openssl/crypto/sha/asm/sha512-x86_64.S (limited to 'app/openssl/crypto/sha/asm') diff --git a/app/openssl/crypto/sha/asm/sha1-586.S b/app/openssl/crypto/sha/asm/sha1-586.S new file mode 100644 index 00000000..47bef2a9 --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha1-586.S @@ -0,0 +1,2639 @@ +.file "sha1-586.s" +.text +.globl sha1_block_data_order +.type sha1_block_data_order,@function +.align 16 +sha1_block_data_order: +.L_sha1_block_data_order_begin: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + call .L000pic_point +.L000pic_point: + popl %ebp + leal _GLOBAL_OFFSET_TABLE_+[.-.L000pic_point](%ebp),%esi + movl OPENSSL_ia32cap_P@GOT(%esi),%esi + leal .LK_XX_XX-.L000pic_point(%ebp),%ebp + movl (%esi),%eax + movl 4(%esi),%edx + testl $512,%edx + jz .L001x86 + testl $16777216,%eax + jz .L001x86 + jmp .Lssse3_shortcut +.align 16 +.L001x86: + movl 20(%esp),%ebp + movl 24(%esp),%esi + movl 28(%esp),%eax + subl $76,%esp + shll $6,%eax + addl %esi,%eax + movl %eax,104(%esp) + movl 16(%ebp),%edi + jmp .L002loop +.align 16 +.L002loop: + movl (%esi),%eax + movl 4(%esi),%ebx + movl 8(%esi),%ecx + movl 12(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,(%esp) + movl %ebx,4(%esp) + movl %ecx,8(%esp) + movl %edx,12(%esp) + movl 16(%esi),%eax + movl 20(%esi),%ebx + movl 24(%esi),%ecx + movl 28(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,16(%esp) + movl %ebx,20(%esp) + movl %ecx,24(%esp) + movl %edx,28(%esp) + movl 32(%esi),%eax + movl 36(%esi),%ebx + movl 40(%esi),%ecx + movl 44(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,32(%esp) + movl %ebx,36(%esp) + movl %ecx,40(%esp) + movl %edx,44(%esp) + movl 48(%esi),%eax + movl 52(%esi),%ebx + movl 56(%esi),%ecx + movl 60(%esi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + movl %eax,48(%esp) + movl %ebx,52(%esp) + movl %ecx,56(%esp) + movl %edx,60(%esp) + movl %esi,100(%esp) + movl (%ebp),%eax + movl 4(%ebp),%ebx + movl 8(%ebp),%ecx + movl 12(%ebp),%edx + + movl %ecx,%esi + movl %eax,%ebp + roll $5,%ebp + xorl %edx,%esi + addl %edi,%ebp + movl (%esp),%edi + andl %ebx,%esi + rorl $2,%ebx + xorl %edx,%esi + leal 1518500249(%ebp,%edi,1),%ebp + addl %esi,%ebp + + movl %ebx,%edi + movl %ebp,%esi + roll $5,%ebp + xorl %ecx,%edi + addl %edx,%ebp + movl 4(%esp),%edx + andl %eax,%edi + rorl $2,%eax + xorl %ecx,%edi + leal 1518500249(%ebp,%edx,1),%ebp + addl %edi,%ebp + + movl %eax,%edx + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%edx + addl %ecx,%ebp + movl 8(%esp),%ecx + andl %esi,%edx + rorl $2,%esi + xorl %ebx,%edx + leal 1518500249(%ebp,%ecx,1),%ebp + addl %edx,%ebp + + movl %esi,%ecx + movl %ebp,%edx + roll $5,%ebp + xorl %eax,%ecx + addl %ebx,%ebp + movl 12(%esp),%ebx + andl %edi,%ecx + rorl $2,%edi + xorl %eax,%ecx + leal 1518500249(%ebp,%ebx,1),%ebp + addl %ecx,%ebp + + movl %edi,%ebx + movl %ebp,%ecx + roll $5,%ebp + xorl %esi,%ebx + addl %eax,%ebp + movl 16(%esp),%eax + andl %edx,%ebx + rorl $2,%edx + xorl %esi,%ebx + leal 1518500249(%ebp,%eax,1),%ebp + addl %ebx,%ebp + + movl %edx,%eax + movl %ebp,%ebx + roll $5,%ebp + xorl %edi,%eax + addl %esi,%ebp + movl 20(%esp),%esi + andl %ecx,%eax + rorl $2,%ecx + xorl %edi,%eax + leal 1518500249(%ebp,%esi,1),%ebp + addl %eax,%ebp + + movl %ecx,%esi + movl %ebp,%eax + roll $5,%ebp + xorl %edx,%esi + addl %edi,%ebp + movl 24(%esp),%edi + andl %ebx,%esi + rorl $2,%ebx + xorl %edx,%esi + leal 1518500249(%ebp,%edi,1),%ebp + addl %esi,%ebp + + movl %ebx,%edi + movl %ebp,%esi + roll $5,%ebp + xorl %ecx,%edi + addl %edx,%ebp + movl 28(%esp),%edx + andl %eax,%edi + rorl $2,%eax + xorl %ecx,%edi + leal 1518500249(%ebp,%edx,1),%ebp + addl %edi,%ebp + + movl %eax,%edx + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%edx + addl %ecx,%ebp + movl 32(%esp),%ecx + andl %esi,%edx + rorl $2,%esi + xorl %ebx,%edx + leal 1518500249(%ebp,%ecx,1),%ebp + addl %edx,%ebp + + movl %esi,%ecx + movl %ebp,%edx + roll $5,%ebp + xorl %eax,%ecx + addl %ebx,%ebp + movl 36(%esp),%ebx + andl %edi,%ecx + rorl $2,%edi + xorl %eax,%ecx + leal 1518500249(%ebp,%ebx,1),%ebp + addl %ecx,%ebp + + movl %edi,%ebx + movl %ebp,%ecx + roll $5,%ebp + xorl %esi,%ebx + addl %eax,%ebp + movl 40(%esp),%eax + andl %edx,%ebx + rorl $2,%edx + xorl %esi,%ebx + leal 1518500249(%ebp,%eax,1),%ebp + addl %ebx,%ebp + + movl %edx,%eax + movl %ebp,%ebx + roll $5,%ebp + xorl %edi,%eax + addl %esi,%ebp + movl 44(%esp),%esi + andl %ecx,%eax + rorl $2,%ecx + xorl %edi,%eax + leal 1518500249(%ebp,%esi,1),%ebp + addl %eax,%ebp + + movl %ecx,%esi + movl %ebp,%eax + roll $5,%ebp + xorl %edx,%esi + addl %edi,%ebp + movl 48(%esp),%edi + andl %ebx,%esi + rorl $2,%ebx + xorl %edx,%esi + leal 1518500249(%ebp,%edi,1),%ebp + addl %esi,%ebp + + movl %ebx,%edi + movl %ebp,%esi + roll $5,%ebp + xorl %ecx,%edi + addl %edx,%ebp + movl 52(%esp),%edx + andl %eax,%edi + rorl $2,%eax + xorl %ecx,%edi + leal 1518500249(%ebp,%edx,1),%ebp + addl %edi,%ebp + + movl %eax,%edx + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%edx + addl %ecx,%ebp + movl 56(%esp),%ecx + andl %esi,%edx + rorl $2,%esi + xorl %ebx,%edx + leal 1518500249(%ebp,%ecx,1),%ebp + addl %edx,%ebp + + movl %esi,%ecx + movl %ebp,%edx + roll $5,%ebp + xorl %eax,%ecx + addl %ebx,%ebp + movl 60(%esp),%ebx + andl %edi,%ecx + rorl $2,%edi + xorl %eax,%ecx + leal 1518500249(%ebp,%ebx,1),%ebp + movl (%esp),%ebx + addl %ebp,%ecx + + movl %edi,%ebp + xorl 8(%esp),%ebx + xorl %esi,%ebp + xorl 32(%esp),%ebx + andl %edx,%ebp + xorl 52(%esp),%ebx + roll $1,%ebx + xorl %esi,%ebp + addl %ebp,%eax + movl %ecx,%ebp + rorl $2,%edx + movl %ebx,(%esp) + roll $5,%ebp + leal 1518500249(%ebx,%eax,1),%ebx + movl 4(%esp),%eax + addl %ebp,%ebx + + movl %edx,%ebp + xorl 12(%esp),%eax + xorl %edi,%ebp + xorl 36(%esp),%eax + andl %ecx,%ebp + xorl 56(%esp),%eax + roll $1,%eax + xorl %edi,%ebp + addl %ebp,%esi + movl %ebx,%ebp + rorl $2,%ecx + movl %eax,4(%esp) + roll $5,%ebp + leal 1518500249(%eax,%esi,1),%eax + movl 8(%esp),%esi + addl %ebp,%eax + + movl %ecx,%ebp + xorl 16(%esp),%esi + xorl %edx,%ebp + xorl 40(%esp),%esi + andl %ebx,%ebp + xorl 60(%esp),%esi + roll $1,%esi + xorl %edx,%ebp + addl %ebp,%edi + movl %eax,%ebp + rorl $2,%ebx + movl %esi,8(%esp) + roll $5,%ebp + leal 1518500249(%esi,%edi,1),%esi + movl 12(%esp),%edi + addl %ebp,%esi + + movl %ebx,%ebp + xorl 20(%esp),%edi + xorl %ecx,%ebp + xorl 44(%esp),%edi + andl %eax,%ebp + xorl (%esp),%edi + roll $1,%edi + xorl %ecx,%ebp + addl %ebp,%edx + movl %esi,%ebp + rorl $2,%eax + movl %edi,12(%esp) + roll $5,%ebp + leal 1518500249(%edi,%edx,1),%edi + movl 16(%esp),%edx + addl %ebp,%edi + + movl %esi,%ebp + xorl 24(%esp),%edx + xorl %eax,%ebp + xorl 48(%esp),%edx + xorl %ebx,%ebp + xorl 4(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,16(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 20(%esp),%ecx + addl %ebp,%edx + + movl %edi,%ebp + xorl 28(%esp),%ecx + xorl %esi,%ebp + xorl 52(%esp),%ecx + xorl %eax,%ebp + xorl 8(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,20(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 24(%esp),%ebx + addl %ebp,%ecx + + movl %edx,%ebp + xorl 32(%esp),%ebx + xorl %edi,%ebp + xorl 56(%esp),%ebx + xorl %esi,%ebp + xorl 12(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,24(%esp) + leal 1859775393(%ebx,%eax,1),%ebx + movl 28(%esp),%eax + addl %ebp,%ebx + + movl %ecx,%ebp + xorl 36(%esp),%eax + xorl %edx,%ebp + xorl 60(%esp),%eax + xorl %edi,%ebp + xorl 16(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,28(%esp) + leal 1859775393(%eax,%esi,1),%eax + movl 32(%esp),%esi + addl %ebp,%eax + + movl %ebx,%ebp + xorl 40(%esp),%esi + xorl %ecx,%ebp + xorl (%esp),%esi + xorl %edx,%ebp + xorl 20(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,32(%esp) + leal 1859775393(%esi,%edi,1),%esi + movl 36(%esp),%edi + addl %ebp,%esi + + movl %eax,%ebp + xorl 44(%esp),%edi + xorl %ebx,%ebp + xorl 4(%esp),%edi + xorl %ecx,%ebp + xorl 24(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,36(%esp) + leal 1859775393(%edi,%edx,1),%edi + movl 40(%esp),%edx + addl %ebp,%edi + + movl %esi,%ebp + xorl 48(%esp),%edx + xorl %eax,%ebp + xorl 8(%esp),%edx + xorl %ebx,%ebp + xorl 28(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,40(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 44(%esp),%ecx + addl %ebp,%edx + + movl %edi,%ebp + xorl 52(%esp),%ecx + xorl %esi,%ebp + xorl 12(%esp),%ecx + xorl %eax,%ebp + xorl 32(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,44(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 48(%esp),%ebx + addl %ebp,%ecx + + movl %edx,%ebp + xorl 56(%esp),%ebx + xorl %edi,%ebp + xorl 16(%esp),%ebx + xorl %esi,%ebp + xorl 36(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,48(%esp) + leal 1859775393(%ebx,%eax,1),%ebx + movl 52(%esp),%eax + addl %ebp,%ebx + + movl %ecx,%ebp + xorl 60(%esp),%eax + xorl %edx,%ebp + xorl 20(%esp),%eax + xorl %edi,%ebp + xorl 40(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,52(%esp) + leal 1859775393(%eax,%esi,1),%eax + movl 56(%esp),%esi + addl %ebp,%eax + + movl %ebx,%ebp + xorl (%esp),%esi + xorl %ecx,%ebp + xorl 24(%esp),%esi + xorl %edx,%ebp + xorl 44(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,56(%esp) + leal 1859775393(%esi,%edi,1),%esi + movl 60(%esp),%edi + addl %ebp,%esi + + movl %eax,%ebp + xorl 4(%esp),%edi + xorl %ebx,%ebp + xorl 28(%esp),%edi + xorl %ecx,%ebp + xorl 48(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,60(%esp) + leal 1859775393(%edi,%edx,1),%edi + movl (%esp),%edx + addl %ebp,%edi + + movl %esi,%ebp + xorl 8(%esp),%edx + xorl %eax,%ebp + xorl 32(%esp),%edx + xorl %ebx,%ebp + xorl 52(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 4(%esp),%ecx + addl %ebp,%edx + + movl %edi,%ebp + xorl 12(%esp),%ecx + xorl %esi,%ebp + xorl 36(%esp),%ecx + xorl %eax,%ebp + xorl 56(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,4(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 8(%esp),%ebx + addl %ebp,%ecx + + movl %edx,%ebp + xorl 16(%esp),%ebx + xorl %edi,%ebp + xorl 40(%esp),%ebx + xorl %esi,%ebp + xorl 60(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,8(%esp) + leal 1859775393(%ebx,%eax,1),%ebx + movl 12(%esp),%eax + addl %ebp,%ebx + + movl %ecx,%ebp + xorl 20(%esp),%eax + xorl %edx,%ebp + xorl 44(%esp),%eax + xorl %edi,%ebp + xorl (%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,12(%esp) + leal 1859775393(%eax,%esi,1),%eax + movl 16(%esp),%esi + addl %ebp,%eax + + movl %ebx,%ebp + xorl 24(%esp),%esi + xorl %ecx,%ebp + xorl 48(%esp),%esi + xorl %edx,%ebp + xorl 4(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,16(%esp) + leal 1859775393(%esi,%edi,1),%esi + movl 20(%esp),%edi + addl %ebp,%esi + + movl %eax,%ebp + xorl 28(%esp),%edi + xorl %ebx,%ebp + xorl 52(%esp),%edi + xorl %ecx,%ebp + xorl 8(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,20(%esp) + leal 1859775393(%edi,%edx,1),%edi + movl 24(%esp),%edx + addl %ebp,%edi + + movl %esi,%ebp + xorl 32(%esp),%edx + xorl %eax,%ebp + xorl 56(%esp),%edx + xorl %ebx,%ebp + xorl 12(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,24(%esp) + leal 1859775393(%edx,%ecx,1),%edx + movl 28(%esp),%ecx + addl %ebp,%edx + + movl %edi,%ebp + xorl 36(%esp),%ecx + xorl %esi,%ebp + xorl 60(%esp),%ecx + xorl %eax,%ebp + xorl 16(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,28(%esp) + leal 1859775393(%ecx,%ebx,1),%ecx + movl 32(%esp),%ebx + addl %ebp,%ecx + + movl %edi,%ebp + xorl 40(%esp),%ebx + xorl %esi,%ebp + xorl (%esp),%ebx + andl %edx,%ebp + xorl 20(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,32(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 36(%esp),%eax + addl %ebp,%ebx + + movl %edx,%ebp + xorl 44(%esp),%eax + xorl %edi,%ebp + xorl 4(%esp),%eax + andl %ecx,%ebp + xorl 24(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,36(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl 40(%esp),%esi + addl %ebp,%eax + + movl %ecx,%ebp + xorl 48(%esp),%esi + xorl %edx,%ebp + xorl 8(%esp),%esi + andl %ebx,%ebp + xorl 28(%esp),%esi + roll $1,%esi + addl %edi,%ebp + rorl $2,%ebx + movl %eax,%edi + roll $5,%edi + movl %esi,40(%esp) + leal 2400959708(%esi,%ebp,1),%esi + movl %ecx,%ebp + addl %edi,%esi + andl %edx,%ebp + movl 44(%esp),%edi + addl %ebp,%esi + + movl %ebx,%ebp + xorl 52(%esp),%edi + xorl %ecx,%ebp + xorl 12(%esp),%edi + andl %eax,%ebp + xorl 32(%esp),%edi + roll $1,%edi + addl %edx,%ebp + rorl $2,%eax + movl %esi,%edx + roll $5,%edx + movl %edi,44(%esp) + leal 2400959708(%edi,%ebp,1),%edi + movl %ebx,%ebp + addl %edx,%edi + andl %ecx,%ebp + movl 48(%esp),%edx + addl %ebp,%edi + + movl %eax,%ebp + xorl 56(%esp),%edx + xorl %ebx,%ebp + xorl 16(%esp),%edx + andl %esi,%ebp + xorl 36(%esp),%edx + roll $1,%edx + addl %ecx,%ebp + rorl $2,%esi + movl %edi,%ecx + roll $5,%ecx + movl %edx,48(%esp) + leal 2400959708(%edx,%ebp,1),%edx + movl %eax,%ebp + addl %ecx,%edx + andl %ebx,%ebp + movl 52(%esp),%ecx + addl %ebp,%edx + + movl %esi,%ebp + xorl 60(%esp),%ecx + xorl %eax,%ebp + xorl 20(%esp),%ecx + andl %edi,%ebp + xorl 40(%esp),%ecx + roll $1,%ecx + addl %ebx,%ebp + rorl $2,%edi + movl %edx,%ebx + roll $5,%ebx + movl %ecx,52(%esp) + leal 2400959708(%ecx,%ebp,1),%ecx + movl %esi,%ebp + addl %ebx,%ecx + andl %eax,%ebp + movl 56(%esp),%ebx + addl %ebp,%ecx + + movl %edi,%ebp + xorl (%esp),%ebx + xorl %esi,%ebp + xorl 24(%esp),%ebx + andl %edx,%ebp + xorl 44(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,56(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 60(%esp),%eax + addl %ebp,%ebx + + movl %edx,%ebp + xorl 4(%esp),%eax + xorl %edi,%ebp + xorl 28(%esp),%eax + andl %ecx,%ebp + xorl 48(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,60(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl (%esp),%esi + addl %ebp,%eax + + movl %ecx,%ebp + xorl 8(%esp),%esi + xorl %edx,%ebp + xorl 32(%esp),%esi + andl %ebx,%ebp + xorl 52(%esp),%esi + roll $1,%esi + addl %edi,%ebp + rorl $2,%ebx + movl %eax,%edi + roll $5,%edi + movl %esi,(%esp) + leal 2400959708(%esi,%ebp,1),%esi + movl %ecx,%ebp + addl %edi,%esi + andl %edx,%ebp + movl 4(%esp),%edi + addl %ebp,%esi + + movl %ebx,%ebp + xorl 12(%esp),%edi + xorl %ecx,%ebp + xorl 36(%esp),%edi + andl %eax,%ebp + xorl 56(%esp),%edi + roll $1,%edi + addl %edx,%ebp + rorl $2,%eax + movl %esi,%edx + roll $5,%edx + movl %edi,4(%esp) + leal 2400959708(%edi,%ebp,1),%edi + movl %ebx,%ebp + addl %edx,%edi + andl %ecx,%ebp + movl 8(%esp),%edx + addl %ebp,%edi + + movl %eax,%ebp + xorl 16(%esp),%edx + xorl %ebx,%ebp + xorl 40(%esp),%edx + andl %esi,%ebp + xorl 60(%esp),%edx + roll $1,%edx + addl %ecx,%ebp + rorl $2,%esi + movl %edi,%ecx + roll $5,%ecx + movl %edx,8(%esp) + leal 2400959708(%edx,%ebp,1),%edx + movl %eax,%ebp + addl %ecx,%edx + andl %ebx,%ebp + movl 12(%esp),%ecx + addl %ebp,%edx + + movl %esi,%ebp + xorl 20(%esp),%ecx + xorl %eax,%ebp + xorl 44(%esp),%ecx + andl %edi,%ebp + xorl (%esp),%ecx + roll $1,%ecx + addl %ebx,%ebp + rorl $2,%edi + movl %edx,%ebx + roll $5,%ebx + movl %ecx,12(%esp) + leal 2400959708(%ecx,%ebp,1),%ecx + movl %esi,%ebp + addl %ebx,%ecx + andl %eax,%ebp + movl 16(%esp),%ebx + addl %ebp,%ecx + + movl %edi,%ebp + xorl 24(%esp),%ebx + xorl %esi,%ebp + xorl 48(%esp),%ebx + andl %edx,%ebp + xorl 4(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,16(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 20(%esp),%eax + addl %ebp,%ebx + + movl %edx,%ebp + xorl 28(%esp),%eax + xorl %edi,%ebp + xorl 52(%esp),%eax + andl %ecx,%ebp + xorl 8(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,20(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl 24(%esp),%esi + addl %ebp,%eax + + movl %ecx,%ebp + xorl 32(%esp),%esi + xorl %edx,%ebp + xorl 56(%esp),%esi + andl %ebx,%ebp + xorl 12(%esp),%esi + roll $1,%esi + addl %edi,%ebp + rorl $2,%ebx + movl %eax,%edi + roll $5,%edi + movl %esi,24(%esp) + leal 2400959708(%esi,%ebp,1),%esi + movl %ecx,%ebp + addl %edi,%esi + andl %edx,%ebp + movl 28(%esp),%edi + addl %ebp,%esi + + movl %ebx,%ebp + xorl 36(%esp),%edi + xorl %ecx,%ebp + xorl 60(%esp),%edi + andl %eax,%ebp + xorl 16(%esp),%edi + roll $1,%edi + addl %edx,%ebp + rorl $2,%eax + movl %esi,%edx + roll $5,%edx + movl %edi,28(%esp) + leal 2400959708(%edi,%ebp,1),%edi + movl %ebx,%ebp + addl %edx,%edi + andl %ecx,%ebp + movl 32(%esp),%edx + addl %ebp,%edi + + movl %eax,%ebp + xorl 40(%esp),%edx + xorl %ebx,%ebp + xorl (%esp),%edx + andl %esi,%ebp + xorl 20(%esp),%edx + roll $1,%edx + addl %ecx,%ebp + rorl $2,%esi + movl %edi,%ecx + roll $5,%ecx + movl %edx,32(%esp) + leal 2400959708(%edx,%ebp,1),%edx + movl %eax,%ebp + addl %ecx,%edx + andl %ebx,%ebp + movl 36(%esp),%ecx + addl %ebp,%edx + + movl %esi,%ebp + xorl 44(%esp),%ecx + xorl %eax,%ebp + xorl 4(%esp),%ecx + andl %edi,%ebp + xorl 24(%esp),%ecx + roll $1,%ecx + addl %ebx,%ebp + rorl $2,%edi + movl %edx,%ebx + roll $5,%ebx + movl %ecx,36(%esp) + leal 2400959708(%ecx,%ebp,1),%ecx + movl %esi,%ebp + addl %ebx,%ecx + andl %eax,%ebp + movl 40(%esp),%ebx + addl %ebp,%ecx + + movl %edi,%ebp + xorl 48(%esp),%ebx + xorl %esi,%ebp + xorl 8(%esp),%ebx + andl %edx,%ebp + xorl 28(%esp),%ebx + roll $1,%ebx + addl %eax,%ebp + rorl $2,%edx + movl %ecx,%eax + roll $5,%eax + movl %ebx,40(%esp) + leal 2400959708(%ebx,%ebp,1),%ebx + movl %edi,%ebp + addl %eax,%ebx + andl %esi,%ebp + movl 44(%esp),%eax + addl %ebp,%ebx + + movl %edx,%ebp + xorl 52(%esp),%eax + xorl %edi,%ebp + xorl 12(%esp),%eax + andl %ecx,%ebp + xorl 32(%esp),%eax + roll $1,%eax + addl %esi,%ebp + rorl $2,%ecx + movl %ebx,%esi + roll $5,%esi + movl %eax,44(%esp) + leal 2400959708(%eax,%ebp,1),%eax + movl %edx,%ebp + addl %esi,%eax + andl %edi,%ebp + movl 48(%esp),%esi + addl %ebp,%eax + + movl %ebx,%ebp + xorl 56(%esp),%esi + xorl %ecx,%ebp + xorl 16(%esp),%esi + xorl %edx,%ebp + xorl 36(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,48(%esp) + leal 3395469782(%esi,%edi,1),%esi + movl 52(%esp),%edi + addl %ebp,%esi + + movl %eax,%ebp + xorl 60(%esp),%edi + xorl %ebx,%ebp + xorl 20(%esp),%edi + xorl %ecx,%ebp + xorl 40(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,52(%esp) + leal 3395469782(%edi,%edx,1),%edi + movl 56(%esp),%edx + addl %ebp,%edi + + movl %esi,%ebp + xorl (%esp),%edx + xorl %eax,%ebp + xorl 24(%esp),%edx + xorl %ebx,%ebp + xorl 44(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,56(%esp) + leal 3395469782(%edx,%ecx,1),%edx + movl 60(%esp),%ecx + addl %ebp,%edx + + movl %edi,%ebp + xorl 4(%esp),%ecx + xorl %esi,%ebp + xorl 28(%esp),%ecx + xorl %eax,%ebp + xorl 48(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,60(%esp) + leal 3395469782(%ecx,%ebx,1),%ecx + movl (%esp),%ebx + addl %ebp,%ecx + + movl %edx,%ebp + xorl 8(%esp),%ebx + xorl %edi,%ebp + xorl 32(%esp),%ebx + xorl %esi,%ebp + xorl 52(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,(%esp) + leal 3395469782(%ebx,%eax,1),%ebx + movl 4(%esp),%eax + addl %ebp,%ebx + + movl %ecx,%ebp + xorl 12(%esp),%eax + xorl %edx,%ebp + xorl 36(%esp),%eax + xorl %edi,%ebp + xorl 56(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,4(%esp) + leal 3395469782(%eax,%esi,1),%eax + movl 8(%esp),%esi + addl %ebp,%eax + + movl %ebx,%ebp + xorl 16(%esp),%esi + xorl %ecx,%ebp + xorl 40(%esp),%esi + xorl %edx,%ebp + xorl 60(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,8(%esp) + leal 3395469782(%esi,%edi,1),%esi + movl 12(%esp),%edi + addl %ebp,%esi + + movl %eax,%ebp + xorl 20(%esp),%edi + xorl %ebx,%ebp + xorl 44(%esp),%edi + xorl %ecx,%ebp + xorl (%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,12(%esp) + leal 3395469782(%edi,%edx,1),%edi + movl 16(%esp),%edx + addl %ebp,%edi + + movl %esi,%ebp + xorl 24(%esp),%edx + xorl %eax,%ebp + xorl 48(%esp),%edx + xorl %ebx,%ebp + xorl 4(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,16(%esp) + leal 3395469782(%edx,%ecx,1),%edx + movl 20(%esp),%ecx + addl %ebp,%edx + + movl %edi,%ebp + xorl 28(%esp),%ecx + xorl %esi,%ebp + xorl 52(%esp),%ecx + xorl %eax,%ebp + xorl 8(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,20(%esp) + leal 3395469782(%ecx,%ebx,1),%ecx + movl 24(%esp),%ebx + addl %ebp,%ecx + + movl %edx,%ebp + xorl 32(%esp),%ebx + xorl %edi,%ebp + xorl 56(%esp),%ebx + xorl %esi,%ebp + xorl 12(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,24(%esp) + leal 3395469782(%ebx,%eax,1),%ebx + movl 28(%esp),%eax + addl %ebp,%ebx + + movl %ecx,%ebp + xorl 36(%esp),%eax + xorl %edx,%ebp + xorl 60(%esp),%eax + xorl %edi,%ebp + xorl 16(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + movl %eax,28(%esp) + leal 3395469782(%eax,%esi,1),%eax + movl 32(%esp),%esi + addl %ebp,%eax + + movl %ebx,%ebp + xorl 40(%esp),%esi + xorl %ecx,%ebp + xorl (%esp),%esi + xorl %edx,%ebp + xorl 20(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + movl %esi,32(%esp) + leal 3395469782(%esi,%edi,1),%esi + movl 36(%esp),%edi + addl %ebp,%esi + + movl %eax,%ebp + xorl 44(%esp),%edi + xorl %ebx,%ebp + xorl 4(%esp),%edi + xorl %ecx,%ebp + xorl 24(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + movl %edi,36(%esp) + leal 3395469782(%edi,%edx,1),%edi + movl 40(%esp),%edx + addl %ebp,%edi + + movl %esi,%ebp + xorl 48(%esp),%edx + xorl %eax,%ebp + xorl 8(%esp),%edx + xorl %ebx,%ebp + xorl 28(%esp),%edx + roll $1,%edx + addl %ebp,%ecx + rorl $2,%esi + movl %edi,%ebp + roll $5,%ebp + movl %edx,40(%esp) + leal 3395469782(%edx,%ecx,1),%edx + movl 44(%esp),%ecx + addl %ebp,%edx + + movl %edi,%ebp + xorl 52(%esp),%ecx + xorl %esi,%ebp + xorl 12(%esp),%ecx + xorl %eax,%ebp + xorl 32(%esp),%ecx + roll $1,%ecx + addl %ebp,%ebx + rorl $2,%edi + movl %edx,%ebp + roll $5,%ebp + movl %ecx,44(%esp) + leal 3395469782(%ecx,%ebx,1),%ecx + movl 48(%esp),%ebx + addl %ebp,%ecx + + movl %edx,%ebp + xorl 56(%esp),%ebx + xorl %edi,%ebp + xorl 16(%esp),%ebx + xorl %esi,%ebp + xorl 36(%esp),%ebx + roll $1,%ebx + addl %ebp,%eax + rorl $2,%edx + movl %ecx,%ebp + roll $5,%ebp + movl %ebx,48(%esp) + leal 3395469782(%ebx,%eax,1),%ebx + movl 52(%esp),%eax + addl %ebp,%ebx + + movl %ecx,%ebp + xorl 60(%esp),%eax + xorl %edx,%ebp + xorl 20(%esp),%eax + xorl %edi,%ebp + xorl 40(%esp),%eax + roll $1,%eax + addl %ebp,%esi + rorl $2,%ecx + movl %ebx,%ebp + roll $5,%ebp + leal 3395469782(%eax,%esi,1),%eax + movl 56(%esp),%esi + addl %ebp,%eax + + movl %ebx,%ebp + xorl (%esp),%esi + xorl %ecx,%ebp + xorl 24(%esp),%esi + xorl %edx,%ebp + xorl 44(%esp),%esi + roll $1,%esi + addl %ebp,%edi + rorl $2,%ebx + movl %eax,%ebp + roll $5,%ebp + leal 3395469782(%esi,%edi,1),%esi + movl 60(%esp),%edi + addl %ebp,%esi + + movl %eax,%ebp + xorl 4(%esp),%edi + xorl %ebx,%ebp + xorl 28(%esp),%edi + xorl %ecx,%ebp + xorl 48(%esp),%edi + roll $1,%edi + addl %ebp,%edx + rorl $2,%eax + movl %esi,%ebp + roll $5,%ebp + leal 3395469782(%edi,%edx,1),%edi + addl %ebp,%edi + movl 96(%esp),%ebp + movl 100(%esp),%edx + addl (%ebp),%edi + addl 4(%ebp),%esi + addl 8(%ebp),%eax + addl 12(%ebp),%ebx + addl 16(%ebp),%ecx + movl %edi,(%ebp) + addl $64,%edx + movl %esi,4(%ebp) + cmpl 104(%esp),%edx + movl %eax,8(%ebp) + movl %ecx,%edi + movl %ebx,12(%ebp) + movl %edx,%esi + movl %ecx,16(%ebp) + jb .L002loop + addl $76,%esp + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.size sha1_block_data_order,.-.L_sha1_block_data_order_begin +.type _sha1_block_data_order_ssse3,@function +.align 16 +_sha1_block_data_order_ssse3: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + call .L003pic_point +.L003pic_point: + popl %ebp + leal .LK_XX_XX-.L003pic_point(%ebp),%ebp +.Lssse3_shortcut: + movdqa (%ebp),%xmm7 + movdqa 16(%ebp),%xmm0 + movdqa 32(%ebp),%xmm1 + movdqa 48(%ebp),%xmm2 + movdqa 64(%ebp),%xmm6 + movl 20(%esp),%edi + movl 24(%esp),%ebp + movl 28(%esp),%edx + movl %esp,%esi + subl $208,%esp + andl $-64,%esp + movdqa %xmm0,112(%esp) + movdqa %xmm1,128(%esp) + movdqa %xmm2,144(%esp) + shll $6,%edx + movdqa %xmm7,160(%esp) + addl %ebp,%edx + movdqa %xmm6,176(%esp) + addl $64,%ebp + movl %edi,192(%esp) + movl %ebp,196(%esp) + movl %edx,200(%esp) + movl %esi,204(%esp) + movl (%edi),%eax + movl 4(%edi),%ebx + movl 8(%edi),%ecx + movl 12(%edi),%edx + movl 16(%edi),%edi + movl %ebx,%esi + movdqu -64(%ebp),%xmm0 + movdqu -48(%ebp),%xmm1 + movdqu -32(%ebp),%xmm2 + movdqu -16(%ebp),%xmm3 +.byte 102,15,56,0,198 +.byte 102,15,56,0,206 +.byte 102,15,56,0,214 + movdqa %xmm7,96(%esp) +.byte 102,15,56,0,222 + paddd %xmm7,%xmm0 + paddd %xmm7,%xmm1 + paddd %xmm7,%xmm2 + movdqa %xmm0,(%esp) + psubd %xmm7,%xmm0 + movdqa %xmm1,16(%esp) + psubd %xmm7,%xmm1 + movdqa %xmm2,32(%esp) + psubd %xmm7,%xmm2 + movdqa %xmm1,%xmm4 + jmp .L004loop +.align 16 +.L004loop: + addl (%esp),%edi + xorl %edx,%ecx +.byte 102,15,58,15,224,8 + movdqa %xmm3,%xmm6 + movl %eax,%ebp + roll $5,%eax + paddd %xmm3,%xmm7 + movdqa %xmm0,64(%esp) + andl %ecx,%esi + xorl %edx,%ecx + psrldq $4,%xmm6 + xorl %edx,%esi + addl %eax,%edi + pxor %xmm0,%xmm4 + rorl $2,%ebx + addl %esi,%edi + pxor %xmm2,%xmm6 + addl 4(%esp),%edx + xorl %ecx,%ebx + movl %edi,%esi + roll $5,%edi + pxor %xmm6,%xmm4 + andl %ebx,%ebp + xorl %ecx,%ebx + movdqa %xmm7,48(%esp) + xorl %ecx,%ebp + addl %edi,%edx + movdqa %xmm4,%xmm0 + movdqa %xmm4,%xmm6 + rorl $7,%eax + addl %ebp,%edx + addl 8(%esp),%ecx + xorl %ebx,%eax + pslldq $12,%xmm0 + paddd %xmm4,%xmm4 + movl %edx,%ebp + roll $5,%edx + andl %eax,%esi + xorl %ebx,%eax + psrld $31,%xmm6 + xorl %ebx,%esi + addl %edx,%ecx + movdqa %xmm0,%xmm7 + rorl $7,%edi + addl %esi,%ecx + psrld $30,%xmm0 + por %xmm6,%xmm4 + addl 12(%esp),%ebx + xorl %eax,%edi + movl %ecx,%esi + roll $5,%ecx + pslld $2,%xmm7 + pxor %xmm0,%xmm4 + andl %edi,%ebp + xorl %eax,%edi + movdqa 96(%esp),%xmm0 + xorl %eax,%ebp + addl %ecx,%ebx + pxor %xmm7,%xmm4 + movdqa %xmm2,%xmm5 + rorl $7,%edx + addl %ebp,%ebx + addl 16(%esp),%eax + xorl %edi,%edx +.byte 102,15,58,15,233,8 + movdqa %xmm4,%xmm7 + movl %ebx,%ebp + roll $5,%ebx + paddd %xmm4,%xmm0 + movdqa %xmm1,80(%esp) + andl %edx,%esi + xorl %edi,%edx + psrldq $4,%xmm7 + xorl %edi,%esi + addl %ebx,%eax + pxor %xmm1,%xmm5 + rorl $7,%ecx + addl %esi,%eax + pxor %xmm3,%xmm7 + addl 20(%esp),%edi + xorl %edx,%ecx + movl %eax,%esi + roll $5,%eax + pxor %xmm7,%xmm5 + andl %ecx,%ebp + xorl %edx,%ecx + movdqa %xmm0,(%esp) + xorl %edx,%ebp + addl %eax,%edi + movdqa %xmm5,%xmm1 + movdqa %xmm5,%xmm7 + rorl $7,%ebx + addl %ebp,%edi + addl 24(%esp),%edx + xorl %ecx,%ebx + pslldq $12,%xmm1 + paddd %xmm5,%xmm5 + movl %edi,%ebp + roll $5,%edi + andl %ebx,%esi + xorl %ecx,%ebx + psrld $31,%xmm7 + xorl %ecx,%esi + addl %edi,%edx + movdqa %xmm1,%xmm0 + rorl $7,%eax + addl %esi,%edx + psrld $30,%xmm1 + por %xmm7,%xmm5 + addl 28(%esp),%ecx + xorl %ebx,%eax + movl %edx,%esi + roll $5,%edx + pslld $2,%xmm0 + pxor %xmm1,%xmm5 + andl %eax,%ebp + xorl %ebx,%eax + movdqa 112(%esp),%xmm1 + xorl %ebx,%ebp + addl %edx,%ecx + pxor %xmm0,%xmm5 + movdqa %xmm3,%xmm6 + rorl $7,%edi + addl %ebp,%ecx + addl 32(%esp),%ebx + xorl %eax,%edi +.byte 102,15,58,15,242,8 + movdqa %xmm5,%xmm0 + movl %ecx,%ebp + roll $5,%ecx + paddd %xmm5,%xmm1 + movdqa %xmm2,96(%esp) + andl %edi,%esi + xorl %eax,%edi + psrldq $4,%xmm0 + xorl %eax,%esi + addl %ecx,%ebx + pxor %xmm2,%xmm6 + rorl $7,%edx + addl %esi,%ebx + pxor %xmm4,%xmm0 + addl 36(%esp),%eax + xorl %edi,%edx + movl %ebx,%esi + roll $5,%ebx + pxor %xmm0,%xmm6 + andl %edx,%ebp + xorl %edi,%edx + movdqa %xmm1,16(%esp) + xorl %edi,%ebp + addl %ebx,%eax + movdqa %xmm6,%xmm2 + movdqa %xmm6,%xmm0 + rorl $7,%ecx + addl %ebp,%eax + addl 40(%esp),%edi + xorl %edx,%ecx + pslldq $12,%xmm2 + paddd %xmm6,%xmm6 + movl %eax,%ebp + roll $5,%eax + andl %ecx,%esi + xorl %edx,%ecx + psrld $31,%xmm0 + xorl %edx,%esi + addl %eax,%edi + movdqa %xmm2,%xmm1 + rorl $7,%ebx + addl %esi,%edi + psrld $30,%xmm2 + por %xmm0,%xmm6 + addl 44(%esp),%edx + xorl %ecx,%ebx + movdqa 64(%esp),%xmm0 + movl %edi,%esi + roll $5,%edi + pslld $2,%xmm1 + pxor %xmm2,%xmm6 + andl %ebx,%ebp + xorl %ecx,%ebx + movdqa 112(%esp),%xmm2 + xorl %ecx,%ebp + addl %edi,%edx + pxor %xmm1,%xmm6 + movdqa %xmm4,%xmm7 + rorl $7,%eax + addl %ebp,%edx + addl 48(%esp),%ecx + xorl %ebx,%eax +.byte 102,15,58,15,251,8 + movdqa %xmm6,%xmm1 + movl %edx,%ebp + roll $5,%edx + paddd %xmm6,%xmm2 + movdqa %xmm3,64(%esp) + andl %eax,%esi + xorl %ebx,%eax + psrldq $4,%xmm1 + xorl %ebx,%esi + addl %edx,%ecx + pxor %xmm3,%xmm7 + rorl $7,%edi + addl %esi,%ecx + pxor %xmm5,%xmm1 + addl 52(%esp),%ebx + xorl %eax,%edi + movl %ecx,%esi + roll $5,%ecx + pxor %xmm1,%xmm7 + andl %edi,%ebp + xorl %eax,%edi + movdqa %xmm2,32(%esp) + xorl %eax,%ebp + addl %ecx,%ebx + movdqa %xmm7,%xmm3 + movdqa %xmm7,%xmm1 + rorl $7,%edx + addl %ebp,%ebx + addl 56(%esp),%eax + xorl %edi,%edx + pslldq $12,%xmm3 + paddd %xmm7,%xmm7 + movl %ebx,%ebp + roll $5,%ebx + andl %edx,%esi + xorl %edi,%edx + psrld $31,%xmm1 + xorl %edi,%esi + addl %ebx,%eax + movdqa %xmm3,%xmm2 + rorl $7,%ecx + addl %esi,%eax + psrld $30,%xmm3 + por %xmm1,%xmm7 + addl 60(%esp),%edi + xorl %edx,%ecx + movdqa 80(%esp),%xmm1 + movl %eax,%esi + roll $5,%eax + pslld $2,%xmm2 + pxor %xmm3,%xmm7 + andl %ecx,%ebp + xorl %edx,%ecx + movdqa 112(%esp),%xmm3 + xorl %edx,%ebp + addl %eax,%edi + pxor %xmm2,%xmm7 + rorl $7,%ebx + addl %ebp,%edi + movdqa %xmm7,%xmm2 + addl (%esp),%edx + pxor %xmm4,%xmm0 +.byte 102,15,58,15,214,8 + xorl %ecx,%ebx + movl %edi,%ebp + roll $5,%edi + pxor %xmm1,%xmm0 + movdqa %xmm4,80(%esp) + andl %ebx,%esi + xorl %ecx,%ebx + movdqa %xmm3,%xmm4 + paddd %xmm7,%xmm3 + xorl %ecx,%esi + addl %edi,%edx + pxor %xmm2,%xmm0 + rorl $7,%eax + addl %esi,%edx + addl 4(%esp),%ecx + xorl %ebx,%eax + movdqa %xmm0,%xmm2 + movdqa %xmm3,48(%esp) + movl %edx,%esi + roll $5,%edx + andl %eax,%ebp + xorl %ebx,%eax + pslld $2,%xmm0 + xorl %ebx,%ebp + addl %edx,%ecx + psrld $30,%xmm2 + rorl $7,%edi + addl %ebp,%ecx + addl 8(%esp),%ebx + xorl %eax,%edi + movl %ecx,%ebp + roll $5,%ecx + por %xmm2,%xmm0 + andl %edi,%esi + xorl %eax,%edi + movdqa 96(%esp),%xmm2 + xorl %eax,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + addl 12(%esp),%eax + movdqa %xmm0,%xmm3 + xorl %edi,%edx + movl %ebx,%esi + roll $5,%ebx + andl %edx,%ebp + xorl %edi,%edx + xorl %edi,%ebp + addl %ebx,%eax + rorl $7,%ecx + addl %ebp,%eax + addl 16(%esp),%edi + pxor %xmm5,%xmm1 +.byte 102,15,58,15,223,8 + xorl %edx,%esi + movl %eax,%ebp + roll $5,%eax + pxor %xmm2,%xmm1 + movdqa %xmm5,96(%esp) + xorl %ecx,%esi + addl %eax,%edi + movdqa %xmm4,%xmm5 + paddd %xmm0,%xmm4 + rorl $7,%ebx + addl %esi,%edi + pxor %xmm3,%xmm1 + addl 20(%esp),%edx + xorl %ecx,%ebp + movl %edi,%esi + roll $5,%edi + movdqa %xmm1,%xmm3 + movdqa %xmm4,(%esp) + xorl %ebx,%ebp + addl %edi,%edx + rorl $7,%eax + addl %ebp,%edx + pslld $2,%xmm1 + addl 24(%esp),%ecx + xorl %ebx,%esi + psrld $30,%xmm3 + movl %edx,%ebp + roll $5,%edx + xorl %eax,%esi + addl %edx,%ecx + rorl $7,%edi + addl %esi,%ecx + por %xmm3,%xmm1 + addl 28(%esp),%ebx + xorl %eax,%ebp + movdqa 64(%esp),%xmm3 + movl %ecx,%esi + roll $5,%ecx + xorl %edi,%ebp + addl %ecx,%ebx + rorl $7,%edx + movdqa %xmm1,%xmm4 + addl %ebp,%ebx + addl 32(%esp),%eax + pxor %xmm6,%xmm2 +.byte 102,15,58,15,224,8 + xorl %edi,%esi + movl %ebx,%ebp + roll $5,%ebx + pxor %xmm3,%xmm2 + movdqa %xmm6,64(%esp) + xorl %edx,%esi + addl %ebx,%eax + movdqa 128(%esp),%xmm6 + paddd %xmm1,%xmm5 + rorl $7,%ecx + addl %esi,%eax + pxor %xmm4,%xmm2 + addl 36(%esp),%edi + xorl %edx,%ebp + movl %eax,%esi + roll $5,%eax + movdqa %xmm2,%xmm4 + movdqa %xmm5,16(%esp) + xorl %ecx,%ebp + addl %eax,%edi + rorl $7,%ebx + addl %ebp,%edi + pslld $2,%xmm2 + addl 40(%esp),%edx + xorl %ecx,%esi + psrld $30,%xmm4 + movl %edi,%ebp + roll $5,%edi + xorl %ebx,%esi + addl %edi,%edx + rorl $7,%eax + addl %esi,%edx + por %xmm4,%xmm2 + addl 44(%esp),%ecx + xorl %ebx,%ebp + movdqa 80(%esp),%xmm4 + movl %edx,%esi + roll $5,%edx + xorl %eax,%ebp + addl %edx,%ecx + rorl $7,%edi + movdqa %xmm2,%xmm5 + addl %ebp,%ecx + addl 48(%esp),%ebx + pxor %xmm7,%xmm3 +.byte 102,15,58,15,233,8 + xorl %eax,%esi + movl %ecx,%ebp + roll $5,%ecx + pxor %xmm4,%xmm3 + movdqa %xmm7,80(%esp) + xorl %edi,%esi + addl %ecx,%ebx + movdqa %xmm6,%xmm7 + paddd %xmm2,%xmm6 + rorl $7,%edx + addl %esi,%ebx + pxor %xmm5,%xmm3 + addl 52(%esp),%eax + xorl %edi,%ebp + movl %ebx,%esi + roll $5,%ebx + movdqa %xmm3,%xmm5 + movdqa %xmm6,32(%esp) + xorl %edx,%ebp + addl %ebx,%eax + rorl $7,%ecx + addl %ebp,%eax + pslld $2,%xmm3 + addl 56(%esp),%edi + xorl %edx,%esi + psrld $30,%xmm5 + movl %eax,%ebp + roll $5,%eax + xorl %ecx,%esi + addl %eax,%edi + rorl $7,%ebx + addl %esi,%edi + por %xmm5,%xmm3 + addl 60(%esp),%edx + xorl %ecx,%ebp + movdqa 96(%esp),%xmm5 + movl %edi,%esi + roll $5,%edi + xorl %ebx,%ebp + addl %edi,%edx + rorl $7,%eax + movdqa %xmm3,%xmm6 + addl %ebp,%edx + addl (%esp),%ecx + pxor %xmm0,%xmm4 +.byte 102,15,58,15,242,8 + xorl %ebx,%esi + movl %edx,%ebp + roll $5,%edx + pxor %xmm5,%xmm4 + movdqa %xmm0,96(%esp) + xorl %eax,%esi + addl %edx,%ecx + movdqa %xmm7,%xmm0 + paddd %xmm3,%xmm7 + rorl $7,%edi + addl %esi,%ecx + pxor %xmm6,%xmm4 + addl 4(%esp),%ebx + xorl %eax,%ebp + movl %ecx,%esi + roll $5,%ecx + movdqa %xmm4,%xmm6 + movdqa %xmm7,48(%esp) + xorl %edi,%ebp + addl %ecx,%ebx + rorl $7,%edx + addl %ebp,%ebx + pslld $2,%xmm4 + addl 8(%esp),%eax + xorl %edi,%esi + psrld $30,%xmm6 + movl %ebx,%ebp + roll $5,%ebx + xorl %edx,%esi + addl %ebx,%eax + rorl $7,%ecx + addl %esi,%eax + por %xmm6,%xmm4 + addl 12(%esp),%edi + xorl %edx,%ebp + movdqa 64(%esp),%xmm6 + movl %eax,%esi + roll $5,%eax + xorl %ecx,%ebp + addl %eax,%edi + rorl $7,%ebx + movdqa %xmm4,%xmm7 + addl %ebp,%edi + addl 16(%esp),%edx + pxor %xmm1,%xmm5 +.byte 102,15,58,15,251,8 + xorl %ecx,%esi + movl %edi,%ebp + roll $5,%edi + pxor %xmm6,%xmm5 + movdqa %xmm1,64(%esp) + xorl %ebx,%esi + addl %edi,%edx + movdqa %xmm0,%xmm1 + paddd %xmm4,%xmm0 + rorl $7,%eax + addl %esi,%edx + pxor %xmm7,%xmm5 + addl 20(%esp),%ecx + xorl %ebx,%ebp + movl %edx,%esi + roll $5,%edx + movdqa %xmm5,%xmm7 + movdqa %xmm0,(%esp) + xorl %eax,%ebp + addl %edx,%ecx + rorl $7,%edi + addl %ebp,%ecx + pslld $2,%xmm5 + addl 24(%esp),%ebx + xorl %eax,%esi + psrld $30,%xmm7 + movl %ecx,%ebp + roll $5,%ecx + xorl %edi,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + por %xmm7,%xmm5 + addl 28(%esp),%eax + xorl %edi,%ebp + movdqa 80(%esp),%xmm7 + movl %ebx,%esi + roll $5,%ebx + xorl %edx,%ebp + addl %ebx,%eax + rorl $7,%ecx + movdqa %xmm5,%xmm0 + addl %ebp,%eax + movl %ecx,%ebp + pxor %xmm2,%xmm6 +.byte 102,15,58,15,196,8 + xorl %edx,%ecx + addl 32(%esp),%edi + andl %edx,%ebp + pxor %xmm7,%xmm6 + movdqa %xmm2,80(%esp) + andl %ecx,%esi + rorl $7,%ebx + movdqa %xmm1,%xmm2 + paddd %xmm5,%xmm1 + addl %ebp,%edi + movl %eax,%ebp + pxor %xmm0,%xmm6 + roll $5,%eax + addl %esi,%edi + xorl %edx,%ecx + addl %eax,%edi + movdqa %xmm6,%xmm0 + movdqa %xmm1,16(%esp) + movl %ebx,%esi + xorl %ecx,%ebx + addl 36(%esp),%edx + andl %ecx,%esi + pslld $2,%xmm6 + andl %ebx,%ebp + rorl $7,%eax + psrld $30,%xmm0 + addl %esi,%edx + movl %edi,%esi + roll $5,%edi + addl %ebp,%edx + xorl %ecx,%ebx + addl %edi,%edx + por %xmm0,%xmm6 + movl %eax,%ebp + xorl %ebx,%eax + movdqa 96(%esp),%xmm0 + addl 40(%esp),%ecx + andl %ebx,%ebp + andl %eax,%esi + rorl $7,%edi + addl %ebp,%ecx + movdqa %xmm6,%xmm1 + movl %edx,%ebp + roll $5,%edx + addl %esi,%ecx + xorl %ebx,%eax + addl %edx,%ecx + movl %edi,%esi + xorl %eax,%edi + addl 44(%esp),%ebx + andl %eax,%esi + andl %edi,%ebp + rorl $7,%edx + addl %esi,%ebx + movl %ecx,%esi + roll $5,%ecx + addl %ebp,%ebx + xorl %eax,%edi + addl %ecx,%ebx + movl %edx,%ebp + pxor %xmm3,%xmm7 +.byte 102,15,58,15,205,8 + xorl %edi,%edx + addl 48(%esp),%eax + andl %edi,%ebp + pxor %xmm0,%xmm7 + movdqa %xmm3,96(%esp) + andl %edx,%esi + rorl $7,%ecx + movdqa 144(%esp),%xmm3 + paddd %xmm6,%xmm2 + addl %ebp,%eax + movl %ebx,%ebp + pxor %xmm1,%xmm7 + roll $5,%ebx + addl %esi,%eax + xorl %edi,%edx + addl %ebx,%eax + movdqa %xmm7,%xmm1 + movdqa %xmm2,32(%esp) + movl %ecx,%esi + xorl %edx,%ecx + addl 52(%esp),%edi + andl %edx,%esi + pslld $2,%xmm7 + andl %ecx,%ebp + rorl $7,%ebx + psrld $30,%xmm1 + addl %esi,%edi + movl %eax,%esi + roll $5,%eax + addl %ebp,%edi + xorl %edx,%ecx + addl %eax,%edi + por %xmm1,%xmm7 + movl %ebx,%ebp + xorl %ecx,%ebx + movdqa 64(%esp),%xmm1 + addl 56(%esp),%edx + andl %ecx,%ebp + andl %ebx,%esi + rorl $7,%eax + addl %ebp,%edx + movdqa %xmm7,%xmm2 + movl %edi,%ebp + roll $5,%edi + addl %esi,%edx + xorl %ecx,%ebx + addl %edi,%edx + movl %eax,%esi + xorl %ebx,%eax + addl 60(%esp),%ecx + andl %ebx,%esi + andl %eax,%ebp + rorl $7,%edi + addl %esi,%ecx + movl %edx,%esi + roll $5,%edx + addl %ebp,%ecx + xorl %ebx,%eax + addl %edx,%ecx + movl %edi,%ebp + pxor %xmm4,%xmm0 +.byte 102,15,58,15,214,8 + xorl %eax,%edi + addl (%esp),%ebx + andl %eax,%ebp + pxor %xmm1,%xmm0 + movdqa %xmm4,64(%esp) + andl %edi,%esi + rorl $7,%edx + movdqa %xmm3,%xmm4 + paddd %xmm7,%xmm3 + addl %ebp,%ebx + movl %ecx,%ebp + pxor %xmm2,%xmm0 + roll $5,%ecx + addl %esi,%ebx + xorl %eax,%edi + addl %ecx,%ebx + movdqa %xmm0,%xmm2 + movdqa %xmm3,48(%esp) + movl %edx,%esi + xorl %edi,%edx + addl 4(%esp),%eax + andl %edi,%esi + pslld $2,%xmm0 + andl %edx,%ebp + rorl $7,%ecx + psrld $30,%xmm2 + addl %esi,%eax + movl %ebx,%esi + roll $5,%ebx + addl %ebp,%eax + xorl %edi,%edx + addl %ebx,%eax + por %xmm2,%xmm0 + movl %ecx,%ebp + xorl %edx,%ecx + movdqa 80(%esp),%xmm2 + addl 8(%esp),%edi + andl %edx,%ebp + andl %ecx,%esi + rorl $7,%ebx + addl %ebp,%edi + movdqa %xmm0,%xmm3 + movl %eax,%ebp + roll $5,%eax + addl %esi,%edi + xorl %edx,%ecx + addl %eax,%edi + movl %ebx,%esi + xorl %ecx,%ebx + addl 12(%esp),%edx + andl %ecx,%esi + andl %ebx,%ebp + rorl $7,%eax + addl %esi,%edx + movl %edi,%esi + roll $5,%edi + addl %ebp,%edx + xorl %ecx,%ebx + addl %edi,%edx + movl %eax,%ebp + pxor %xmm5,%xmm1 +.byte 102,15,58,15,223,8 + xorl %ebx,%eax + addl 16(%esp),%ecx + andl %ebx,%ebp + pxor %xmm2,%xmm1 + movdqa %xmm5,80(%esp) + andl %eax,%esi + rorl $7,%edi + movdqa %xmm4,%xmm5 + paddd %xmm0,%xmm4 + addl %ebp,%ecx + movl %edx,%ebp + pxor %xmm3,%xmm1 + roll $5,%edx + addl %esi,%ecx + xorl %ebx,%eax + addl %edx,%ecx + movdqa %xmm1,%xmm3 + movdqa %xmm4,(%esp) + movl %edi,%esi + xorl %eax,%edi + addl 20(%esp),%ebx + andl %eax,%esi + pslld $2,%xmm1 + andl %edi,%ebp + rorl $7,%edx + psrld $30,%xmm3 + addl %esi,%ebx + movl %ecx,%esi + roll $5,%ecx + addl %ebp,%ebx + xorl %eax,%edi + addl %ecx,%ebx + por %xmm3,%xmm1 + movl %edx,%ebp + xorl %edi,%edx + movdqa 96(%esp),%xmm3 + addl 24(%esp),%eax + andl %edi,%ebp + andl %edx,%esi + rorl $7,%ecx + addl %ebp,%eax + movdqa %xmm1,%xmm4 + movl %ebx,%ebp + roll $5,%ebx + addl %esi,%eax + xorl %edi,%edx + addl %ebx,%eax + movl %ecx,%esi + xorl %edx,%ecx + addl 28(%esp),%edi + andl %edx,%esi + andl %ecx,%ebp + rorl $7,%ebx + addl %esi,%edi + movl %eax,%esi + roll $5,%eax + addl %ebp,%edi + xorl %edx,%ecx + addl %eax,%edi + movl %ebx,%ebp + pxor %xmm6,%xmm2 +.byte 102,15,58,15,224,8 + xorl %ecx,%ebx + addl 32(%esp),%edx + andl %ecx,%ebp + pxor %xmm3,%xmm2 + movdqa %xmm6,96(%esp) + andl %ebx,%esi + rorl $7,%eax + movdqa %xmm5,%xmm6 + paddd %xmm1,%xmm5 + addl %ebp,%edx + movl %edi,%ebp + pxor %xmm4,%xmm2 + roll $5,%edi + addl %esi,%edx + xorl %ecx,%ebx + addl %edi,%edx + movdqa %xmm2,%xmm4 + movdqa %xmm5,16(%esp) + movl %eax,%esi + xorl %ebx,%eax + addl 36(%esp),%ecx + andl %ebx,%esi + pslld $2,%xmm2 + andl %eax,%ebp + rorl $7,%edi + psrld $30,%xmm4 + addl %esi,%ecx + movl %edx,%esi + roll $5,%edx + addl %ebp,%ecx + xorl %ebx,%eax + addl %edx,%ecx + por %xmm4,%xmm2 + movl %edi,%ebp + xorl %eax,%edi + movdqa 64(%esp),%xmm4 + addl 40(%esp),%ebx + andl %eax,%ebp + andl %edi,%esi + rorl $7,%edx + addl %ebp,%ebx + movdqa %xmm2,%xmm5 + movl %ecx,%ebp + roll $5,%ecx + addl %esi,%ebx + xorl %eax,%edi + addl %ecx,%ebx + movl %edx,%esi + xorl %edi,%edx + addl 44(%esp),%eax + andl %edi,%esi + andl %edx,%ebp + rorl $7,%ecx + addl %esi,%eax + movl %ebx,%esi + roll $5,%ebx + addl %ebp,%eax + xorl %edi,%edx + addl %ebx,%eax + addl 48(%esp),%edi + pxor %xmm7,%xmm3 +.byte 102,15,58,15,233,8 + xorl %edx,%esi + movl %eax,%ebp + roll $5,%eax + pxor %xmm4,%xmm3 + movdqa %xmm7,64(%esp) + xorl %ecx,%esi + addl %eax,%edi + movdqa %xmm6,%xmm7 + paddd %xmm2,%xmm6 + rorl $7,%ebx + addl %esi,%edi + pxor %xmm5,%xmm3 + addl 52(%esp),%edx + xorl %ecx,%ebp + movl %edi,%esi + roll $5,%edi + movdqa %xmm3,%xmm5 + movdqa %xmm6,32(%esp) + xorl %ebx,%ebp + addl %edi,%edx + rorl $7,%eax + addl %ebp,%edx + pslld $2,%xmm3 + addl 56(%esp),%ecx + xorl %ebx,%esi + psrld $30,%xmm5 + movl %edx,%ebp + roll $5,%edx + xorl %eax,%esi + addl %edx,%ecx + rorl $7,%edi + addl %esi,%ecx + por %xmm5,%xmm3 + addl 60(%esp),%ebx + xorl %eax,%ebp + movl %ecx,%esi + roll $5,%ecx + xorl %edi,%ebp + addl %ecx,%ebx + rorl $7,%edx + addl %ebp,%ebx + addl (%esp),%eax + paddd %xmm3,%xmm7 + xorl %edi,%esi + movl %ebx,%ebp + roll $5,%ebx + xorl %edx,%esi + movdqa %xmm7,48(%esp) + addl %ebx,%eax + rorl $7,%ecx + addl %esi,%eax + addl 4(%esp),%edi + xorl %edx,%ebp + movl %eax,%esi + roll $5,%eax + xorl %ecx,%ebp + addl %eax,%edi + rorl $7,%ebx + addl %ebp,%edi + addl 8(%esp),%edx + xorl %ecx,%esi + movl %edi,%ebp + roll $5,%edi + xorl %ebx,%esi + addl %edi,%edx + rorl $7,%eax + addl %esi,%edx + addl 12(%esp),%ecx + xorl %ebx,%ebp + movl %edx,%esi + roll $5,%edx + xorl %eax,%ebp + addl %edx,%ecx + rorl $7,%edi + addl %ebp,%ecx + movl 196(%esp),%ebp + cmpl 200(%esp),%ebp + je .L005done + movdqa 160(%esp),%xmm7 + movdqa 176(%esp),%xmm6 + movdqu (%ebp),%xmm0 + movdqu 16(%ebp),%xmm1 + movdqu 32(%ebp),%xmm2 + movdqu 48(%ebp),%xmm3 + addl $64,%ebp +.byte 102,15,56,0,198 + movl %ebp,196(%esp) + movdqa %xmm7,96(%esp) + addl 16(%esp),%ebx + xorl %eax,%esi +.byte 102,15,56,0,206 + movl %ecx,%ebp + roll $5,%ecx + paddd %xmm7,%xmm0 + xorl %edi,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + movdqa %xmm0,(%esp) + addl 20(%esp),%eax + xorl %edi,%ebp + psubd %xmm7,%xmm0 + movl %ebx,%esi + roll $5,%ebx + xorl %edx,%ebp + addl %ebx,%eax + rorl $7,%ecx + addl %ebp,%eax + addl 24(%esp),%edi + xorl %edx,%esi + movl %eax,%ebp + roll $5,%eax + xorl %ecx,%esi + addl %eax,%edi + rorl $7,%ebx + addl %esi,%edi + addl 28(%esp),%edx + xorl %ecx,%ebp + movl %edi,%esi + roll $5,%edi + xorl %ebx,%ebp + addl %edi,%edx + rorl $7,%eax + addl %ebp,%edx + addl 32(%esp),%ecx + xorl %ebx,%esi +.byte 102,15,56,0,214 + movl %edx,%ebp + roll $5,%edx + paddd %xmm7,%xmm1 + xorl %eax,%esi + addl %edx,%ecx + rorl $7,%edi + addl %esi,%ecx + movdqa %xmm1,16(%esp) + addl 36(%esp),%ebx + xorl %eax,%ebp + psubd %xmm7,%xmm1 + movl %ecx,%esi + roll $5,%ecx + xorl %edi,%ebp + addl %ecx,%ebx + rorl $7,%edx + addl %ebp,%ebx + addl 40(%esp),%eax + xorl %edi,%esi + movl %ebx,%ebp + roll $5,%ebx + xorl %edx,%esi + addl %ebx,%eax + rorl $7,%ecx + addl %esi,%eax + addl 44(%esp),%edi + xorl %edx,%ebp + movl %eax,%esi + roll $5,%eax + xorl %ecx,%ebp + addl %eax,%edi + rorl $7,%ebx + addl %ebp,%edi + addl 48(%esp),%edx + xorl %ecx,%esi +.byte 102,15,56,0,222 + movl %edi,%ebp + roll $5,%edi + paddd %xmm7,%xmm2 + xorl %ebx,%esi + addl %edi,%edx + rorl $7,%eax + addl %esi,%edx + movdqa %xmm2,32(%esp) + addl 52(%esp),%ecx + xorl %ebx,%ebp + psubd %xmm7,%xmm2 + movl %edx,%esi + roll $5,%edx + xorl %eax,%ebp + addl %edx,%ecx + rorl $7,%edi + addl %ebp,%ecx + addl 56(%esp),%ebx + xorl %eax,%esi + movl %ecx,%ebp + roll $5,%ecx + xorl %edi,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + addl 60(%esp),%eax + xorl %edi,%ebp + movl %ebx,%esi + roll $5,%ebx + xorl %edx,%ebp + addl %ebx,%eax + rorl $7,%ecx + addl %ebp,%eax + movl 192(%esp),%ebp + addl (%ebp),%eax + addl 4(%ebp),%esi + addl 8(%ebp),%ecx + movl %eax,(%ebp) + addl 12(%ebp),%edx + movl %esi,4(%ebp) + addl 16(%ebp),%edi + movl %ecx,8(%ebp) + movl %esi,%ebx + movl %edx,12(%ebp) + movl %edi,16(%ebp) + movdqa %xmm1,%xmm4 + jmp .L004loop +.align 16 +.L005done: + addl 16(%esp),%ebx + xorl %eax,%esi + movl %ecx,%ebp + roll $5,%ecx + xorl %edi,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + addl 20(%esp),%eax + xorl %edi,%ebp + movl %ebx,%esi + roll $5,%ebx + xorl %edx,%ebp + addl %ebx,%eax + rorl $7,%ecx + addl %ebp,%eax + addl 24(%esp),%edi + xorl %edx,%esi + movl %eax,%ebp + roll $5,%eax + xorl %ecx,%esi + addl %eax,%edi + rorl $7,%ebx + addl %esi,%edi + addl 28(%esp),%edx + xorl %ecx,%ebp + movl %edi,%esi + roll $5,%edi + xorl %ebx,%ebp + addl %edi,%edx + rorl $7,%eax + addl %ebp,%edx + addl 32(%esp),%ecx + xorl %ebx,%esi + movl %edx,%ebp + roll $5,%edx + xorl %eax,%esi + addl %edx,%ecx + rorl $7,%edi + addl %esi,%ecx + addl 36(%esp),%ebx + xorl %eax,%ebp + movl %ecx,%esi + roll $5,%ecx + xorl %edi,%ebp + addl %ecx,%ebx + rorl $7,%edx + addl %ebp,%ebx + addl 40(%esp),%eax + xorl %edi,%esi + movl %ebx,%ebp + roll $5,%ebx + xorl %edx,%esi + addl %ebx,%eax + rorl $7,%ecx + addl %esi,%eax + addl 44(%esp),%edi + xorl %edx,%ebp + movl %eax,%esi + roll $5,%eax + xorl %ecx,%ebp + addl %eax,%edi + rorl $7,%ebx + addl %ebp,%edi + addl 48(%esp),%edx + xorl %ecx,%esi + movl %edi,%ebp + roll $5,%edi + xorl %ebx,%esi + addl %edi,%edx + rorl $7,%eax + addl %esi,%edx + addl 52(%esp),%ecx + xorl %ebx,%ebp + movl %edx,%esi + roll $5,%edx + xorl %eax,%ebp + addl %edx,%ecx + rorl $7,%edi + addl %ebp,%ecx + addl 56(%esp),%ebx + xorl %eax,%esi + movl %ecx,%ebp + roll $5,%ecx + xorl %edi,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + addl 60(%esp),%eax + xorl %edi,%ebp + movl %ebx,%esi + roll $5,%ebx + xorl %edx,%ebp + addl %ebx,%eax + rorl $7,%ecx + addl %ebp,%eax + movl 192(%esp),%ebp + addl (%ebp),%eax + movl 204(%esp),%esp + addl 4(%ebp),%esi + addl 8(%ebp),%ecx + movl %eax,(%ebp) + addl 12(%ebp),%edx + movl %esi,4(%ebp) + addl 16(%ebp),%edi + movl %ecx,8(%ebp) + movl %edx,12(%ebp) + movl %edi,16(%ebp) + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.size _sha1_block_data_order_ssse3,.-_sha1_block_data_order_ssse3 +.align 64 +.LK_XX_XX: +.long 1518500249,1518500249,1518500249,1518500249 +.long 1859775393,1859775393,1859775393,1859775393 +.long 2400959708,2400959708,2400959708,2400959708 +.long 3395469782,3395469782,3395469782,3395469782 +.long 66051,67438087,134810123,202182159 +.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115 +.byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82 +.byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112 +.byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 +.comm OPENSSL_ia32cap_P,8,4 diff --git a/app/openssl/crypto/sha/asm/sha1-586.pl b/app/openssl/crypto/sha/asm/sha1-586.pl index a1f87628..2b119ffa 100644 --- a/app/openssl/crypto/sha/asm/sha1-586.pl +++ b/app/openssl/crypto/sha/asm/sha1-586.pl @@ -12,6 +12,8 @@ # commentary below], and in 2006 the rest was rewritten in order to # gain freedom to liberate licensing terms. +# January, September 2004. +# # It was noted that Intel IA-32 C compiler generates code which # performs ~30% *faster* on P4 CPU than original *hand-coded* # SHA1 assembler implementation. To address this problem (and @@ -31,12 +33,92 @@ # ---------------------------------------------------------------- # +# August 2009. +# +# George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as +# '(c&d) + (b&(c^d))', which allows to accumulate partial results +# and lighten "pressure" on scratch registers. This resulted in +# >12% performance improvement on contemporary AMD cores (with no +# degradation on other CPUs:-). Also, the code was revised to maximize +# "distance" between instructions producing input to 'lea' instruction +# and the 'lea' instruction itself, which is essential for Intel Atom +# core and resulted in ~15% improvement. + +# October 2010. +# +# Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it +# is to offload message schedule denoted by Wt in NIST specification, +# or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel, +# and in SSE2 context was first explored by Dean Gaudet in 2004, see +# http://arctic.org/~dean/crypto/sha1.html. Since then several things +# have changed that made it interesting again: +# +# a) XMM units became faster and wider; +# b) instruction set became more versatile; +# c) an important observation was made by Max Locktykhin, which made +# it possible to reduce amount of instructions required to perform +# the operation in question, for further details see +# http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/. + +# April 2011. +# +# Add AVX code path, probably most controversial... The thing is that +# switch to AVX alone improves performance by as little as 4% in +# comparison to SSSE3 code path. But below result doesn't look like +# 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as +# pair of µ-ops, and it's the additional µ-ops, two per round, that +# make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded +# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with +# equivalent 'sh[rl]d' that is responsible for the impressive 5.1 +# cycles per processed byte. But 'sh[rl]d' is not something that used +# to be fast, nor does it appear to be fast in upcoming Bulldozer +# [according to its optimization manual]. Which is why AVX code path +# is guarded by *both* AVX and synthetic bit denoting Intel CPUs. +# One can argue that it's unfair to AMD, but without 'sh[rl]d' it +# makes no sense to keep the AVX code path. If somebody feels that +# strongly, it's probably more appropriate to discuss possibility of +# using vector rotate XOP on AMD... + +###################################################################### +# Current performance is summarized in following table. Numbers are +# CPU clock cycles spent to process single byte (less is better). +# +# x86 SSSE3 AVX +# Pentium 15.7 - +# PIII 11.5 - +# P4 10.6 - +# AMD K8 7.1 - +# Core2 7.3 6.1/+20% - +# Atom 12.5 9.5(*)/+32% - +# Westmere 7.3 5.6/+30% - +# Sandy Bridge 8.8 6.2/+40% 5.1(**)/+70% +# +# (*) Loop is 1056 instructions long and expected result is ~8.25. +# It remains mystery [to me] why ILP is limited to 1.7. +# +# (**) As per above comment, the result is for AVX *plus* sh[rl]d. + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; push(@INC,"${dir}","${dir}../../perlasm"); require "x86asm.pl"; &asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386"); +$xmm=$ymm=0; +for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); } + +$ymm=1 if ($xmm && + `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` + =~ /GNU assembler version ([2-9]\.[0-9]+)/ && + $1>=2.19); # first version supporting AVX + +$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" && + `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ && + $1>=2.03); # first version supporting AVX + +&external_label("OPENSSL_ia32cap_P") if ($xmm); + + $A="eax"; $B="ebx"; $C="ecx"; @@ -47,6 +129,10 @@ $tmp1="ebp"; @V=($A,$B,$C,$D,$E,$T); +$alt=0; # 1 denotes alternative IALU implementation, which performs + # 8% *worse* on P4, same on Westmere and Atom, 2% better on + # Sandy Bridge... + sub BODY_00_15 { local($n,$a,$b,$c,$d,$e,$f)=@_; @@ -59,16 +145,18 @@ sub BODY_00_15 &rotl($tmp1,5); # tmp1=ROTATE(a,5) &xor($f,$d); &add($tmp1,$e); # tmp1+=e; - &and($f,$b); - &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded + &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded # with xi, also note that e becomes # f in next round... - &xor($f,$d); # f holds F_00_19(b,c,d) + &and($f,$b); &rotr($b,2); # b=ROTATE(b,30) - &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi + &xor($f,$d); # f holds F_00_19(b,c,d) + &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi - if ($n==15) { &add($f,$tmp1); } # f+=tmp1 + if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round + &add($f,$tmp1); } # f+=tmp1 else { &add($tmp1,$f); } # f becomes a in next round + &mov($tmp1,$a) if ($alt && $n==15); } sub BODY_16_19 @@ -77,22 +165,41 @@ sub BODY_16_19 &comment("16_19 $n"); - &mov($f,&swtmp($n%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) - &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d) - &xor($f,&swtmp(($n+2)%16)); - &xor($tmp1,$d); - &xor($f,&swtmp(($n+8)%16)); - &and($tmp1,$b); # tmp1 holds F_00_19(b,c,d) - &rotr($b,2); # b=ROTATE(b,30) +if ($alt) { + &xor($c,$d); + &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) + &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d + &xor($f,&swtmp(($n+8)%16)); + &xor($tmp1,$d); # tmp1=F_00_19(b,c,d) + &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd + &rotl($f,1); # f=ROTATE(f,1) + &add($e,$tmp1); # e+=F_00_19(b,c,d) + &xor($c,$d); # restore $c + &mov($tmp1,$a); # b in next round + &rotr($b,$n==16?2:7); # b=ROTATE(b,30) + &mov(&swtmp($n%16),$f); # xi=f + &rotl($a,5); # ROTATE(a,5) + &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e + &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round + &add($f,$a); # f+=ROTATE(a,5) +} else { + &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d) + &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) + &xor($tmp1,$d); + &xor($f,&swtmp(($n+8)%16)); + &and($tmp1,$b); &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd &rotl($f,1); # f=ROTATE(f,1) &xor($tmp1,$d); # tmp1=F_00_19(b,c,d) - &mov(&swtmp($n%16),$f); # xi=f - &lea($f,&DWP(0x5a827999,$f,$e));# f+=K_00_19+e - &mov($e,$a); # e becomes volatile - &rotl($e,5); # e=ROTATE(a,5) - &add($f,$tmp1); # f+=F_00_19(b,c,d) - &add($f,$e); # f+=ROTATE(a,5) + &add($e,$tmp1); # e+=F_00_19(b,c,d) + &mov($tmp1,$a); + &rotr($b,2); # b=ROTATE(b,30) + &mov(&swtmp($n%16),$f); # xi=f + &rotl($tmp1,5); # ROTATE(a,5) + &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e + &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round + &add($f,$tmp1); # f+=ROTATE(a,5) +} } sub BODY_20_39 @@ -102,21 +209,41 @@ sub BODY_20_39 &comment("20_39 $n"); +if ($alt) { + &xor($tmp1,$c); # tmp1 to hold F_20_39(b,c,d), b^=c + &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) + &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d) + &xor($f,&swtmp(($n+8)%16)); + &add($e,$tmp1); # e+=F_20_39(b,c,d) + &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd + &rotl($f,1); # f=ROTATE(f,1) + &mov($tmp1,$a); # b in next round + &rotr($b,7); # b=ROTATE(b,30) + &mov(&swtmp($n%16),$f) if($n<77);# xi=f + &rotl($a,5); # ROTATE(a,5) + &xor($b,$c) if($n==39);# warm up for BODY_40_59 + &and($tmp1,$b) if($n==39); + &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY + &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round + &add($f,$a); # f+=ROTATE(a,5) + &rotr($a,5) if ($n==79); +} else { &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d) - &mov($f,&swtmp($n%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) - &rotr($b,2); # b=ROTATE(b,30) - &xor($f,&swtmp(($n+2)%16)); + &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) &xor($tmp1,$c); &xor($f,&swtmp(($n+8)%16)); &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d) &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd &rotl($f,1); # f=ROTATE(f,1) - &add($tmp1,$e); - &mov(&swtmp($n%16),$f); # xi=f - &mov($e,$a); # e becomes volatile - &rotl($e,5); # e=ROTATE(a,5) - &lea($f,&DWP($K,$f,$tmp1)); # f+=K_20_39+e - &add($f,$e); # f+=ROTATE(a,5) + &add($e,$tmp1); # e+=F_20_39(b,c,d) + &rotr($b,2); # b=ROTATE(b,30) + &mov($tmp1,$a); + &rotl($tmp1,5); # ROTATE(a,5) + &mov(&swtmp($n%16),$f) if($n<77);# xi=f + &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY + &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round + &add($f,$tmp1); # f+=ROTATE(a,5) +} } sub BODY_40_59 @@ -125,41 +252,86 @@ sub BODY_40_59 &comment("40_59 $n"); - &mov($f,&swtmp($n%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) - &mov($tmp1,&swtmp(($n+2)%16)); - &xor($f,$tmp1); - &mov($tmp1,&swtmp(($n+8)%16)); - &xor($f,$tmp1); - &mov($tmp1,&swtmp(($n+13)%16)); - &xor($f,$tmp1); # f holds xa^xb^xc^xd - &mov($tmp1,$b); # tmp1 to hold F_40_59(b,c,d) +if ($alt) { + &add($e,$tmp1); # e+=b&(c^d) + &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) + &mov($tmp1,$d); + &xor($f,&swtmp(($n+8)%16)); + &xor($c,$d); # restore $c + &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd &rotl($f,1); # f=ROTATE(f,1) - &or($tmp1,$c); - &mov(&swtmp($n%16),$f); # xi=f - &and($tmp1,$d); - &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e - &mov($e,$b); # e becomes volatile and is used - # to calculate F_40_59(b,c,d) + &and($tmp1,$c); + &rotr($b,7); # b=ROTATE(b,30) + &add($e,$tmp1); # e+=c&d + &mov($tmp1,$a); # b in next round + &mov(&swtmp($n%16),$f); # xi=f + &rotl($a,5); # ROTATE(a,5) + &xor($b,$c) if ($n<59); + &and($tmp1,$b) if ($n<59);# tmp1 to hold F_40_59(b,c,d) + &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d)) + &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round + &add($f,$a); # f+=ROTATE(a,5) +} else { + &mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d) + &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) + &xor($tmp1,$d); + &xor($f,&swtmp(($n+8)%16)); + &and($tmp1,$b); + &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd + &rotl($f,1); # f=ROTATE(f,1) + &add($tmp1,$e); # b&(c^d)+=e &rotr($b,2); # b=ROTATE(b,30) - &and($e,$c); - &or($tmp1,$e); # tmp1 holds F_40_59(b,c,d) - &mov($e,$a); - &rotl($e,5); # e=ROTATE(a,5) - &add($f,$tmp1); # f+=tmp1; + &mov($e,$a); # e becomes volatile + &rotl($e,5); # ROTATE(a,5) + &mov(&swtmp($n%16),$f); # xi=f + &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d)) + &mov($tmp1,$c); &add($f,$e); # f+=ROTATE(a,5) + &and($tmp1,$d); + &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round + &add($f,$tmp1); # f+=c&d +} } &function_begin("sha1_block_data_order"); +if ($xmm) { + &static_label("ssse3_shortcut"); + &static_label("avx_shortcut") if ($ymm); + &static_label("K_XX_XX"); + + &call (&label("pic_point")); # make it PIC! + &set_label("pic_point"); + &blindpop($tmp1); + &picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point")); + &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); + + &mov ($A,&DWP(0,$T)); + &mov ($D,&DWP(4,$T)); + &test ($D,1<<9); # check SSSE3 bit + &jz (&label("x86")); + &test ($A,1<<24); # check FXSR bit + &jz (&label("x86")); + if ($ymm) { + &and ($D,1<<28); # mask AVX bit + &and ($A,1<<30); # mask "Intel CPU" bit + &or ($A,$D); + &cmp ($A,1<<28|1<<30); + &je (&label("avx_shortcut")); + } + &jmp (&label("ssse3_shortcut")); + &set_label("x86",16); +} &mov($tmp1,&wparam(0)); # SHA_CTX *c &mov($T,&wparam(1)); # const void *input &mov($A,&wparam(2)); # size_t num - &stack_push(16); # allocate X[16] + &stack_push(16+3); # allocate X[16] &shl($A,6); &add($A,$T); &mov(&wparam(2),$A); # pointer beyond the end of input &mov($E,&DWP(16,$tmp1));# pre-load E + &jmp(&label("loop")); - &set_label("loop",16); +&set_label("loop",16); # copy input chunk to X, but reversing byte order! for ($i=0; $i<16; $i+=4) @@ -213,8 +385,845 @@ sub BODY_40_59 &mov(&DWP(16,$tmp1),$C); &jb(&label("loop")); - &stack_pop(16); + &stack_pop(16+3); &function_end("sha1_block_data_order"); + +if ($xmm) { +###################################################################### +# The SSSE3 implementation. +# +# %xmm[0-7] are used as ring @X[] buffer containing quadruples of last +# 32 elements of the message schedule or Xupdate outputs. First 4 +# quadruples are simply byte-swapped input, next 4 are calculated +# according to method originally suggested by Dean Gaudet (modulo +# being implemented in SSSE3). Once 8 quadruples or 32 elements are +# collected, it switches to routine proposed by Max Locktyukhin. +# +# Calculations inevitably require temporary reqisters, and there are +# no %xmm registers left to spare. For this reason part of the ring +# buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring +# buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] - +# X[-5], and X[4] - X[-4]... +# +# Another notable optimization is aggressive stack frame compression +# aiming to minimize amount of 9-byte instructions... +# +# Yet another notable optimization is "jumping" $B variable. It means +# that there is no register permanently allocated for $B value. This +# allowed to eliminate one instruction from body_20_39... +# +my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded +my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4 +my @V=($A,$B,$C,$D,$E); +my $j=0; # hash round +my @T=($T,$tmp1); +my $inp; + +my $_rol=sub { &rol(@_) }; +my $_ror=sub { &ror(@_) }; + +&function_begin("_sha1_block_data_order_ssse3"); + &call (&label("pic_point")); # make it PIC! + &set_label("pic_point"); + &blindpop($tmp1); + &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); +&set_label("ssse3_shortcut"); + + &movdqa (@X[3],&QWP(0,$tmp1)); # K_00_19 + &movdqa (@X[4],&QWP(16,$tmp1)); # K_20_39 + &movdqa (@X[5],&QWP(32,$tmp1)); # K_40_59 + &movdqa (@X[6],&QWP(48,$tmp1)); # K_60_79 + &movdqa (@X[2],&QWP(64,$tmp1)); # pbswap mask + + &mov ($E,&wparam(0)); # load argument block + &mov ($inp=@T[1],&wparam(1)); + &mov ($D,&wparam(2)); + &mov (@T[0],"esp"); + + # stack frame layout + # + # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area + # X[4]+K X[5]+K X[6]+K X[7]+K + # X[8]+K X[9]+K X[10]+K X[11]+K + # X[12]+K X[13]+K X[14]+K X[15]+K + # + # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area + # X[4] X[5] X[6] X[7] + # X[8] X[9] X[10] X[11] # even borrowed for K_00_19 + # + # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants + # K_40_59 K_40_59 K_40_59 K_40_59 + # K_60_79 K_60_79 K_60_79 K_60_79 + # K_00_19 K_00_19 K_00_19 K_00_19 + # pbswap mask + # + # +192 ctx # argument block + # +196 inp + # +200 end + # +204 esp + &sub ("esp",208); + &and ("esp",-64); + + &movdqa (&QWP(112+0,"esp"),@X[4]); # copy constants + &movdqa (&QWP(112+16,"esp"),@X[5]); + &movdqa (&QWP(112+32,"esp"),@X[6]); + &shl ($D,6); # len*64 + &movdqa (&QWP(112+48,"esp"),@X[3]); + &add ($D,$inp); # end of input + &movdqa (&QWP(112+64,"esp"),@X[2]); + &add ($inp,64); + &mov (&DWP(192+0,"esp"),$E); # save argument block + &mov (&DWP(192+4,"esp"),$inp); + &mov (&DWP(192+8,"esp"),$D); + &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp + + &mov ($A,&DWP(0,$E)); # load context + &mov ($B,&DWP(4,$E)); + &mov ($C,&DWP(8,$E)); + &mov ($D,&DWP(12,$E)); + &mov ($E,&DWP(16,$E)); + &mov (@T[0],$B); # magic seed + + &movdqu (@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3] + &movdqu (@X[-3&7],&QWP(-48,$inp)); + &movdqu (@X[-2&7],&QWP(-32,$inp)); + &movdqu (@X[-1&7],&QWP(-16,$inp)); + &pshufb (@X[-4&7],@X[2]); # byte swap + &pshufb (@X[-3&7],@X[2]); + &pshufb (@X[-2&7],@X[2]); + &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot + &pshufb (@X[-1&7],@X[2]); + &paddd (@X[-4&7],@X[3]); # add K_00_19 + &paddd (@X[-3&7],@X[3]); + &paddd (@X[-2&7],@X[3]); + &movdqa (&QWP(0,"esp"),@X[-4&7]); # X[]+K xfer to IALU + &psubd (@X[-4&7],@X[3]); # restore X[] + &movdqa (&QWP(0+16,"esp"),@X[-3&7]); + &psubd (@X[-3&7],@X[3]); + &movdqa (&QWP(0+32,"esp"),@X[-2&7]); + &psubd (@X[-2&7],@X[3]); + &movdqa (@X[0],@X[-3&7]); + &jmp (&label("loop")); + +###################################################################### +# SSE instruction sequence is first broken to groups of indepentent +# instructions, independent in respect to their inputs and shifter +# (not all architectures have more than one). Then IALU instructions +# are "knitted in" between the SSE groups. Distance is maintained for +# SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer +# [which allegedly also implements SSSE3]... +# +# Temporary registers usage. X[2] is volatile at the entry and at the +# end is restored from backtrace ring buffer. X[3] is expected to +# contain current K_XX_XX constant and is used to caclulate X[-1]+K +# from previous round, it becomes volatile the moment the value is +# saved to stack for transfer to IALU. X[4] becomes volatile whenever +# X[-4] is accumulated and offloaded to backtrace ring buffer, at the +# end it is loaded with next K_XX_XX [which becomes X[3] in next +# round]... +# +sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4 +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 40 instructions + my ($a,$b,$c,$d,$e); + + eval(shift(@insns)); + eval(shift(@insns)); + &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]" + &movdqa (@X[2],@X[-1&7]); + eval(shift(@insns)); + eval(shift(@insns)); + + &paddd (@X[3],@X[-1&7]); + &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer + eval(shift(@insns)); + eval(shift(@insns)); + &psrldq (@X[2],4); # "X[-3]", 3 dwords + eval(shift(@insns)); + eval(shift(@insns)); + &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]" + eval(shift(@insns)); + eval(shift(@insns)); + + &pxor (@X[2],@X[-2&7]); # "X[-3]"^"X[-8]" + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &pxor (@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]" + eval(shift(@insns)); + eval(shift(@insns)); + &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + + &movdqa (@X[4],@X[0]); + &movdqa (@X[2],@X[0]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &pslldq (@X[4],12); # "X[0]"<<96, extract one dword + &paddd (@X[0],@X[0]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &psrld (@X[2],31); + eval(shift(@insns)); + eval(shift(@insns)); + &movdqa (@X[3],@X[4]); + eval(shift(@insns)); + eval(shift(@insns)); + + &psrld (@X[4],30); + &por (@X[0],@X[2]); # "X[0]"<<<=1 + eval(shift(@insns)); + eval(shift(@insns)); + &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer + eval(shift(@insns)); + eval(shift(@insns)); + + &pslld (@X[3],2); + &pxor (@X[0],@X[4]); + eval(shift(@insns)); + eval(shift(@insns)); + &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX + eval(shift(@insns)); + eval(shift(@insns)); + + &pxor (@X[0],@X[3]); # "X[0]"^=("X[0]"<<96)<<<2 + &movdqa (@X[1],@X[-2&7]) if ($Xi<7); + eval(shift(@insns)); + eval(shift(@insns)); + + foreach (@insns) { eval; } # remaining instructions [if any] + + $Xi++; push(@X,shift(@X)); # "rotate" X[] +} + +sub Xupdate_ssse3_32_79() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions + my ($a,$b,$c,$d,$e); + + &movdqa (@X[2],@X[-1&7]) if ($Xi==8); + eval(shift(@insns)); # body_20_39 + &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" + &palignr(@X[2],@X[-2&7],8); # compose "X[-6]" + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + + &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]" + &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer + eval(shift(@insns)); + eval(shift(@insns)); + if ($Xi%5) { + &movdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX... + } else { # ... or load next one + &movdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp")); + } + &paddd (@X[3],@X[-1&7]); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &pxor (@X[0],@X[2]); # "X[0]"^="X[-6]" + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + + &movdqa (@X[2],@X[0]); + &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &pslld (@X[0],2); + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + &psrld (@X[2],30); + eval(shift(@insns)); + eval(shift(@insns)); # rol + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &por (@X[0],@X[2]); # "X[0]"<<<=2 + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer + eval(shift(@insns)); + eval(shift(@insns)); # rol + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # ror + &movdqa (@X[3],@X[0]) if ($Xi<19); + eval(shift(@insns)); + + foreach (@insns) { eval; } # remaining instructions + + $Xi++; push(@X,shift(@X)); # "rotate" X[] +} + +sub Xuplast_ssse3_80() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + eval(shift(@insns)); + &paddd (@X[3],@X[-1&7]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU + + foreach (@insns) { eval; } # remaining instructions + + &mov ($inp=@T[1],&DWP(192+4,"esp")); + &cmp ($inp,&DWP(192+8,"esp")); + &je (&label("done")); + + &movdqa (@X[3],&QWP(112+48,"esp")); # K_00_19 + &movdqa (@X[2],&QWP(112+64,"esp")); # pbswap mask + &movdqu (@X[-4&7],&QWP(0,$inp)); # load input + &movdqu (@X[-3&7],&QWP(16,$inp)); + &movdqu (@X[-2&7],&QWP(32,$inp)); + &movdqu (@X[-1&7],&QWP(48,$inp)); + &add ($inp,64); + &pshufb (@X[-4&7],@X[2]); # byte swap + &mov (&DWP(192+4,"esp"),$inp); + &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot + + $Xi=0; +} + +sub Xloop_ssse3() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + eval(shift(@insns)); + eval(shift(@insns)); + &pshufb (@X[($Xi-3)&7],@X[2]); + eval(shift(@insns)); + eval(shift(@insns)); + &paddd (@X[($Xi-4)&7],@X[3]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &movdqa (&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + &psubd (@X[($Xi-4)&7],@X[3]); + + foreach (@insns) { eval; } + $Xi++; +} + +sub Xtail_ssse3() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + foreach (@insns) { eval; } +} + +sub body_00_19 () { + ( + '($a,$b,$c,$d,$e)=@V;'. + '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer + '&xor ($c,$d);', + '&mov (@T[1],$a);', # $b in next round + '&$_rol ($a,5);', + '&and (@T[0],$c);', # ($b&($c^$d)) + '&xor ($c,$d);', # restore $c + '&xor (@T[0],$d);', + '&add ($e,$a);', + '&$_ror ($b,$j?7:2);', # $b>>>2 + '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' + ); +} + +sub body_20_39 () { + ( + '($a,$b,$c,$d,$e)=@V;'. + '&add ($e,&DWP(4*($j++&15),"esp"));', # X[]+K xfer + '&xor (@T[0],$d);', # ($b^$d) + '&mov (@T[1],$a);', # $b in next round + '&$_rol ($a,5);', + '&xor (@T[0],$c);', # ($b^$d^$c) + '&add ($e,$a);', + '&$_ror ($b,7);', # $b>>>2 + '&add ($e,@T[0]);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));' + ); +} + +sub body_40_59 () { + ( + '($a,$b,$c,$d,$e)=@V;'. + '&mov (@T[1],$c);', + '&xor ($c,$d);', + '&add ($e,&DWP(4*($j++&15),"esp"));', # X[]+K xfer + '&and (@T[1],$d);', + '&and (@T[0],$c);', # ($b&($c^$d)) + '&$_ror ($b,7);', # $b>>>2 + '&add ($e,@T[1]);', + '&mov (@T[1],$a);', # $b in next round + '&$_rol ($a,5);', + '&add ($e,@T[0]);', + '&xor ($c,$d);', # restore $c + '&add ($e,$a);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));' + ); +} + +&set_label("loop",16); + &Xupdate_ssse3_16_31(\&body_00_19); + &Xupdate_ssse3_16_31(\&body_00_19); + &Xupdate_ssse3_16_31(\&body_00_19); + &Xupdate_ssse3_16_31(\&body_00_19); + &Xupdate_ssse3_32_79(\&body_00_19); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xupdate_ssse3_32_79(\&body_40_59); + &Xupdate_ssse3_32_79(\&body_40_59); + &Xupdate_ssse3_32_79(\&body_40_59); + &Xupdate_ssse3_32_79(\&body_40_59); + &Xupdate_ssse3_32_79(\&body_40_59); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xuplast_ssse3_80(\&body_20_39); # can jump to "done" + + $saved_j=$j; @saved_V=@V; + + &Xloop_ssse3(\&body_20_39); + &Xloop_ssse3(\&body_20_39); + &Xloop_ssse3(\&body_20_39); + + &mov (@T[1],&DWP(192,"esp")); # update context + &add ($A,&DWP(0,@T[1])); + &add (@T[0],&DWP(4,@T[1])); # $b + &add ($C,&DWP(8,@T[1])); + &mov (&DWP(0,@T[1]),$A); + &add ($D,&DWP(12,@T[1])); + &mov (&DWP(4,@T[1]),@T[0]); + &add ($E,&DWP(16,@T[1])); + &mov (&DWP(8,@T[1]),$C); + &mov ($B,@T[0]); + &mov (&DWP(12,@T[1]),$D); + &mov (&DWP(16,@T[1]),$E); + &movdqa (@X[0],@X[-3&7]); + + &jmp (&label("loop")); + +&set_label("done",16); $j=$saved_j; @V=@saved_V; + + &Xtail_ssse3(\&body_20_39); + &Xtail_ssse3(\&body_20_39); + &Xtail_ssse3(\&body_20_39); + + &mov (@T[1],&DWP(192,"esp")); # update context + &add ($A,&DWP(0,@T[1])); + &mov ("esp",&DWP(192+12,"esp")); # restore %esp + &add (@T[0],&DWP(4,@T[1])); # $b + &add ($C,&DWP(8,@T[1])); + &mov (&DWP(0,@T[1]),$A); + &add ($D,&DWP(12,@T[1])); + &mov (&DWP(4,@T[1]),@T[0]); + &add ($E,&DWP(16,@T[1])); + &mov (&DWP(8,@T[1]),$C); + &mov (&DWP(12,@T[1]),$D); + &mov (&DWP(16,@T[1]),$E); + +&function_end("_sha1_block_data_order_ssse3"); + +if ($ymm) { +my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded +my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4 +my @V=($A,$B,$C,$D,$E); +my $j=0; # hash round +my @T=($T,$tmp1); +my $inp; + +my $_rol=sub { &shld(@_[0],@_) }; +my $_ror=sub { &shrd(@_[0],@_) }; + +&function_begin("_sha1_block_data_order_avx"); + &call (&label("pic_point")); # make it PIC! + &set_label("pic_point"); + &blindpop($tmp1); + &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); +&set_label("avx_shortcut"); + &vzeroall(); + + &vmovdqa(@X[3],&QWP(0,$tmp1)); # K_00_19 + &vmovdqa(@X[4],&QWP(16,$tmp1)); # K_20_39 + &vmovdqa(@X[5],&QWP(32,$tmp1)); # K_40_59 + &vmovdqa(@X[6],&QWP(48,$tmp1)); # K_60_79 + &vmovdqa(@X[2],&QWP(64,$tmp1)); # pbswap mask + + &mov ($E,&wparam(0)); # load argument block + &mov ($inp=@T[1],&wparam(1)); + &mov ($D,&wparam(2)); + &mov (@T[0],"esp"); + + # stack frame layout + # + # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area + # X[4]+K X[5]+K X[6]+K X[7]+K + # X[8]+K X[9]+K X[10]+K X[11]+K + # X[12]+K X[13]+K X[14]+K X[15]+K + # + # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area + # X[4] X[5] X[6] X[7] + # X[8] X[9] X[10] X[11] # even borrowed for K_00_19 + # + # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants + # K_40_59 K_40_59 K_40_59 K_40_59 + # K_60_79 K_60_79 K_60_79 K_60_79 + # K_00_19 K_00_19 K_00_19 K_00_19 + # pbswap mask + # + # +192 ctx # argument block + # +196 inp + # +200 end + # +204 esp + &sub ("esp",208); + &and ("esp",-64); + + &vmovdqa(&QWP(112+0,"esp"),@X[4]); # copy constants + &vmovdqa(&QWP(112+16,"esp"),@X[5]); + &vmovdqa(&QWP(112+32,"esp"),@X[6]); + &shl ($D,6); # len*64 + &vmovdqa(&QWP(112+48,"esp"),@X[3]); + &add ($D,$inp); # end of input + &vmovdqa(&QWP(112+64,"esp"),@X[2]); + &add ($inp,64); + &mov (&DWP(192+0,"esp"),$E); # save argument block + &mov (&DWP(192+4,"esp"),$inp); + &mov (&DWP(192+8,"esp"),$D); + &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp + + &mov ($A,&DWP(0,$E)); # load context + &mov ($B,&DWP(4,$E)); + &mov ($C,&DWP(8,$E)); + &mov ($D,&DWP(12,$E)); + &mov ($E,&DWP(16,$E)); + &mov (@T[0],$B); # magic seed + + &vmovdqu(@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3] + &vmovdqu(@X[-3&7],&QWP(-48,$inp)); + &vmovdqu(@X[-2&7],&QWP(-32,$inp)); + &vmovdqu(@X[-1&7],&QWP(-16,$inp)); + &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap + &vpshufb(@X[-3&7],@X[-3&7],@X[2]); + &vpshufb(@X[-2&7],@X[-2&7],@X[2]); + &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot + &vpshufb(@X[-1&7],@X[-1&7],@X[2]); + &vpaddd (@X[0],@X[-4&7],@X[3]); # add K_00_19 + &vpaddd (@X[1],@X[-3&7],@X[3]); + &vpaddd (@X[2],@X[-2&7],@X[3]); + &vmovdqa(&QWP(0,"esp"),@X[0]); # X[]+K xfer to IALU + &vmovdqa(&QWP(0+16,"esp"),@X[1]); + &vmovdqa(&QWP(0+32,"esp"),@X[2]); + &jmp (&label("loop")); + +sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4 +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 40 instructions + my ($a,$b,$c,$d,$e); + + eval(shift(@insns)); + eval(shift(@insns)); + &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]" + eval(shift(@insns)); + eval(shift(@insns)); + + &vpaddd (@X[3],@X[3],@X[-1&7]); + &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer + eval(shift(@insns)); + eval(shift(@insns)); + &vpsrldq(@X[2],@X[-1&7],4); # "X[-3]", 3 dwords + eval(shift(@insns)); + eval(shift(@insns)); + &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]" + eval(shift(@insns)); + eval(shift(@insns)); + + &vpxor (@X[2],@X[2],@X[-2&7]); # "X[-3]"^"X[-8]" + eval(shift(@insns)); + eval(shift(@insns)); + &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + + &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]" + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &vpsrld (@X[2],@X[0],31); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &vpslldq(@X[4],@X[0],12); # "X[0]"<<96, extract one dword + &vpaddd (@X[0],@X[0],@X[0]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &vpsrld (@X[3],@X[4],30); + &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=1 + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &vpslld (@X[4],@X[4],2); + &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer + eval(shift(@insns)); + eval(shift(@insns)); + &vpxor (@X[0],@X[0],@X[3]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &vpxor (@X[0],@X[0],@X[4]); # "X[0]"^=("X[0]"<<96)<<<2 + eval(shift(@insns)); + eval(shift(@insns)); + &vmovdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX + eval(shift(@insns)); + eval(shift(@insns)); + + foreach (@insns) { eval; } # remaining instructions [if any] + + $Xi++; push(@X,shift(@X)); # "rotate" X[] +} + +sub Xupdate_avx_32_79() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions + my ($a,$b,$c,$d,$e); + + &vpalignr(@X[2],@X[-1&7],@X[-2&7],8); # compose "X[-6]" + &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + + &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]" + &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer + eval(shift(@insns)); + eval(shift(@insns)); + if ($Xi%5) { + &vmovdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX... + } else { # ... or load next one + &vmovdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp")); + } + &vpaddd (@X[3],@X[3],@X[-1&7]); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-6]" + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + + &vpsrld (@X[2],@X[0],30); + &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &vpslld (@X[0],@X[0],2); + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=2 + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer + eval(shift(@insns)); + eval(shift(@insns)); # rol + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + foreach (@insns) { eval; } # remaining instructions + + $Xi++; push(@X,shift(@X)); # "rotate" X[] +} + +sub Xuplast_avx_80() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + eval(shift(@insns)); + &vpaddd (@X[3],@X[3],@X[-1&7]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU + + foreach (@insns) { eval; } # remaining instructions + + &mov ($inp=@T[1],&DWP(192+4,"esp")); + &cmp ($inp,&DWP(192+8,"esp")); + &je (&label("done")); + + &vmovdqa(@X[3],&QWP(112+48,"esp")); # K_00_19 + &vmovdqa(@X[2],&QWP(112+64,"esp")); # pbswap mask + &vmovdqu(@X[-4&7],&QWP(0,$inp)); # load input + &vmovdqu(@X[-3&7],&QWP(16,$inp)); + &vmovdqu(@X[-2&7],&QWP(32,$inp)); + &vmovdqu(@X[-1&7],&QWP(48,$inp)); + &add ($inp,64); + &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap + &mov (&DWP(192+4,"esp"),$inp); + &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot + + $Xi=0; +} + +sub Xloop_avx() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + eval(shift(@insns)); + eval(shift(@insns)); + &vpshufb (@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]); + eval(shift(@insns)); + eval(shift(@insns)); + &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@X[3]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vmovdqa (&QWP(0+16*$Xi,"esp"),@X[$Xi&7]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + + foreach (@insns) { eval; } + $Xi++; +} + +sub Xtail_avx() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + foreach (@insns) { eval; } +} + +&set_label("loop",16); + &Xupdate_avx_16_31(\&body_00_19); + &Xupdate_avx_16_31(\&body_00_19); + &Xupdate_avx_16_31(\&body_00_19); + &Xupdate_avx_16_31(\&body_00_19); + &Xupdate_avx_32_79(\&body_00_19); + &Xupdate_avx_32_79(\&body_20_39); + &Xupdate_avx_32_79(\&body_20_39); + &Xupdate_avx_32_79(\&body_20_39); + &Xupdate_avx_32_79(\&body_20_39); + &Xupdate_avx_32_79(\&body_20_39); + &Xupdate_avx_32_79(\&body_40_59); + &Xupdate_avx_32_79(\&body_40_59); + &Xupdate_avx_32_79(\&body_40_59); + &Xupdate_avx_32_79(\&body_40_59); + &Xupdate_avx_32_79(\&body_40_59); + &Xupdate_avx_32_79(\&body_20_39); + &Xuplast_avx_80(\&body_20_39); # can jump to "done" + + $saved_j=$j; @saved_V=@V; + + &Xloop_avx(\&body_20_39); + &Xloop_avx(\&body_20_39); + &Xloop_avx(\&body_20_39); + + &mov (@T[1],&DWP(192,"esp")); # update context + &add ($A,&DWP(0,@T[1])); + &add (@T[0],&DWP(4,@T[1])); # $b + &add ($C,&DWP(8,@T[1])); + &mov (&DWP(0,@T[1]),$A); + &add ($D,&DWP(12,@T[1])); + &mov (&DWP(4,@T[1]),@T[0]); + &add ($E,&DWP(16,@T[1])); + &mov (&DWP(8,@T[1]),$C); + &mov ($B,@T[0]); + &mov (&DWP(12,@T[1]),$D); + &mov (&DWP(16,@T[1]),$E); + + &jmp (&label("loop")); + +&set_label("done",16); $j=$saved_j; @V=@saved_V; + + &Xtail_avx(\&body_20_39); + &Xtail_avx(\&body_20_39); + &Xtail_avx(\&body_20_39); + + &vzeroall(); + + &mov (@T[1],&DWP(192,"esp")); # update context + &add ($A,&DWP(0,@T[1])); + &mov ("esp",&DWP(192+12,"esp")); # restore %esp + &add (@T[0],&DWP(4,@T[1])); # $b + &add ($C,&DWP(8,@T[1])); + &mov (&DWP(0,@T[1]),$A); + &add ($D,&DWP(12,@T[1])); + &mov (&DWP(4,@T[1]),@T[0]); + &add ($E,&DWP(16,@T[1])); + &mov (&DWP(8,@T[1]),$C); + &mov (&DWP(12,@T[1]),$D); + &mov (&DWP(16,@T[1]),$E); +&function_end("_sha1_block_data_order_avx"); +} +&set_label("K_XX_XX",64); +&data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999); # K_00_19 +&data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1); # K_20_39 +&data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59 +&data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79 +&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask +} &asciz("SHA1 block transform for x86, CRYPTOGAMS by "); &asm_finish(); diff --git a/app/openssl/crypto/sha/asm/sha1-alpha.pl b/app/openssl/crypto/sha/asm/sha1-alpha.pl new file mode 100644 index 00000000..6c4b9251 --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha1-alpha.pl @@ -0,0 +1,322 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# SHA1 block procedure for Alpha. + +# On 21264 performance is 33% better than code generated by vendor +# compiler, and 75% better than GCC [3.4], and in absolute terms is +# 8.7 cycles per processed byte. Implementation features vectorized +# byte swap, but not Xupdate. + +@X=( "\$0", "\$1", "\$2", "\$3", "\$4", "\$5", "\$6", "\$7", + "\$8", "\$9", "\$10", "\$11", "\$12", "\$13", "\$14", "\$15"); +$ctx="a0"; # $16 +$inp="a1"; +$num="a2"; +$A="a3"; +$B="a4"; # 20 +$C="a5"; +$D="t8"; +$E="t9"; @V=($A,$B,$C,$D,$E); +$t0="t10"; # 24 +$t1="t11"; +$t2="ra"; +$t3="t12"; +$K="AT"; # 28 + +sub BODY_00_19 { +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +$code.=<<___ if ($i==0); + ldq_u @X[0],0+0($inp) + ldq_u @X[1],0+7($inp) +___ +$code.=<<___ if (!($i&1) && $i<14); + ldq_u @X[$i+2],($i+2)*4+0($inp) + ldq_u @X[$i+3],($i+2)*4+7($inp) +___ +$code.=<<___ if (!($i&1) && $i<15); + extql @X[$i],$inp,@X[$i] + extqh @X[$i+1],$inp,@X[$i+1] + + or @X[$i+1],@X[$i],@X[$i] # pair of 32-bit values are fetched + + srl @X[$i],24,$t0 # vectorized byte swap + srl @X[$i],8,$t2 + + sll @X[$i],8,$t3 + sll @X[$i],24,@X[$i] + zapnot $t0,0x11,$t0 + zapnot $t2,0x22,$t2 + + zapnot @X[$i],0x88,@X[$i] + or $t0,$t2,$t0 + zapnot $t3,0x44,$t3 + sll $a,5,$t1 + + or @X[$i],$t0,@X[$i] + addl $K,$e,$e + and $b,$c,$t2 + zapnot $a,0xf,$a + + or @X[$i],$t3,@X[$i] + srl $a,27,$t0 + bic $d,$b,$t3 + sll $b,30,$b + + extll @X[$i],4,@X[$i+1] # extract upper half + or $t2,$t3,$t2 + addl @X[$i],$e,$e + + addl $t1,$e,$e + srl $b,32,$t3 + zapnot @X[$i],0xf,@X[$i] + + addl $t0,$e,$e + addl $t2,$e,$e + or $t3,$b,$b +___ +$code.=<<___ if (($i&1) && $i<15); + sll $a,5,$t1 + addl $K,$e,$e + and $b,$c,$t2 + zapnot $a,0xf,$a + + srl $a,27,$t0 + addl @X[$i%16],$e,$e + bic $d,$b,$t3 + sll $b,30,$b + + or $t2,$t3,$t2 + addl $t1,$e,$e + srl $b,32,$t3 + zapnot @X[$i],0xf,@X[$i] + + addl $t0,$e,$e + addl $t2,$e,$e + or $t3,$b,$b +___ +$code.=<<___ if ($i>=15); # with forward Xupdate + sll $a,5,$t1 + addl $K,$e,$e + and $b,$c,$t2 + xor @X[($j+2)%16],@X[$j%16],@X[$j%16] + + zapnot $a,0xf,$a + addl @X[$i%16],$e,$e + bic $d,$b,$t3 + xor @X[($j+8)%16],@X[$j%16],@X[$j%16] + + srl $a,27,$t0 + addl $t1,$e,$e + or $t2,$t3,$t2 + xor @X[($j+13)%16],@X[$j%16],@X[$j%16] + + sll $b,30,$b + addl $t0,$e,$e + srl @X[$j%16],31,$t1 + + addl $t2,$e,$e + srl $b,32,$t3 + addl @X[$j%16],@X[$j%16],@X[$j%16] + + or $t3,$b,$b + zapnot @X[$i%16],0xf,@X[$i%16] + or $t1,@X[$j%16],@X[$j%16] +___ +} + +sub BODY_20_39 { +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +$code.=<<___ if ($i<79); # with forward Xupdate + sll $a,5,$t1 + addl $K,$e,$e + zapnot $a,0xf,$a + xor @X[($j+2)%16],@X[$j%16],@X[$j%16] + + sll $b,30,$t3 + addl $t1,$e,$e + xor $b,$c,$t2 + xor @X[($j+8)%16],@X[$j%16],@X[$j%16] + + srl $b,2,$b + addl @X[$i%16],$e,$e + xor $d,$t2,$t2 + xor @X[($j+13)%16],@X[$j%16],@X[$j%16] + + srl @X[$j%16],31,$t1 + addl $t2,$e,$e + srl $a,27,$t0 + addl @X[$j%16],@X[$j%16],@X[$j%16] + + or $t3,$b,$b + addl $t0,$e,$e + or $t1,@X[$j%16],@X[$j%16] +___ +$code.=<<___ if ($i<77); + zapnot @X[$i%16],0xf,@X[$i%16] +___ +$code.=<<___ if ($i==79); # with context fetch + sll $a,5,$t1 + addl $K,$e,$e + zapnot $a,0xf,$a + ldl @X[0],0($ctx) + + sll $b,30,$t3 + addl $t1,$e,$e + xor $b,$c,$t2 + ldl @X[1],4($ctx) + + srl $b,2,$b + addl @X[$i%16],$e,$e + xor $d,$t2,$t2 + ldl @X[2],8($ctx) + + srl $a,27,$t0 + addl $t2,$e,$e + ldl @X[3],12($ctx) + + or $t3,$b,$b + addl $t0,$e,$e + ldl @X[4],16($ctx) +___ +} + +sub BODY_40_59 { +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +$code.=<<___; # with forward Xupdate + sll $a,5,$t1 + addl $K,$e,$e + zapnot $a,0xf,$a + xor @X[($j+2)%16],@X[$j%16],@X[$j%16] + + srl $a,27,$t0 + and $b,$c,$t2 + and $b,$d,$t3 + xor @X[($j+8)%16],@X[$j%16],@X[$j%16] + + sll $b,30,$b + addl $t1,$e,$e + xor @X[($j+13)%16],@X[$j%16],@X[$j%16] + + srl @X[$j%16],31,$t1 + addl $t0,$e,$e + or $t2,$t3,$t2 + and $c,$d,$t3 + + or $t2,$t3,$t2 + srl $b,32,$t3 + addl @X[$i%16],$e,$e + addl @X[$j%16],@X[$j%16],@X[$j%16] + + or $t3,$b,$b + addl $t2,$e,$e + or $t1,@X[$j%16],@X[$j%16] + zapnot @X[$i%16],0xf,@X[$i%16] +___ +} + +$code=<<___; +#ifdef __linux__ +#include +#else +#include +#include +#endif + +.text + +.set noat +.set noreorder +.globl sha1_block_data_order +.align 5 +.ent sha1_block_data_order +sha1_block_data_order: + lda sp,-64(sp) + stq ra,0(sp) + stq s0,8(sp) + stq s1,16(sp) + stq s2,24(sp) + stq s3,32(sp) + stq s4,40(sp) + stq s5,48(sp) + stq fp,56(sp) + .mask 0x0400fe00,-64 + .frame sp,64,ra + .prologue 0 + + ldl $A,0($ctx) + ldl $B,4($ctx) + sll $num,6,$num + ldl $C,8($ctx) + ldl $D,12($ctx) + ldl $E,16($ctx) + addq $inp,$num,$num + +.Lloop: + .set noreorder + ldah $K,23170(zero) + zapnot $B,0xf,$B + lda $K,31129($K) # K_00_19 +___ +for ($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); } + +$code.=<<___; + ldah $K,28378(zero) + lda $K,-5215($K) # K_20_39 +___ +for (;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } + +$code.=<<___; + ldah $K,-28900(zero) + lda $K,-17188($K) # K_40_59 +___ +for (;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } + +$code.=<<___; + ldah $K,-13725(zero) + lda $K,-15914($K) # K_60_79 +___ +for (;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } + +$code.=<<___; + addl @X[0],$A,$A + addl @X[1],$B,$B + addl @X[2],$C,$C + addl @X[3],$D,$D + addl @X[4],$E,$E + stl $A,0($ctx) + stl $B,4($ctx) + addq $inp,64,$inp + stl $C,8($ctx) + stl $D,12($ctx) + stl $E,16($ctx) + cmpult $inp,$num,$t1 + bne $t1,.Lloop + + .set noreorder + ldq ra,0(sp) + ldq s0,8(sp) + ldq s1,16(sp) + ldq s2,24(sp) + ldq s3,32(sp) + ldq s4,40(sp) + ldq s5,48(sp) + ldq fp,56(sp) + lda sp,64(sp) + ret (ra) +.end sha1_block_data_order +.ascii "SHA1 block transform for Alpha, CRYPTOGAMS by " +.align 2 +___ +$output=shift and open STDOUT,">$output"; +print $code; +close STDOUT; diff --git a/app/openssl/crypto/sha/asm/sha1-armv4-large.S b/app/openssl/crypto/sha/asm/sha1-armv4-large.S new file mode 120000 index 00000000..6523cbdd --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha1-armv4-large.S @@ -0,0 +1 @@ +sha1-armv4-large.s \ No newline at end of file diff --git a/app/openssl/crypto/sha/asm/sha1-armv4-large.pl b/app/openssl/crypto/sha/asm/sha1-armv4-large.pl index 79e3f613..33da3e0e 100644 --- a/app/openssl/crypto/sha/asm/sha1-armv4-large.pl +++ b/app/openssl/crypto/sha/asm/sha1-armv4-large.pl @@ -47,6 +47,10 @@ # Cortex A8 core and in absolute terms ~870 cycles per input block # [or 13.6 cycles per byte]. +# February 2011. +# +# Profiler-assisted and platform-specific optimization resulted in 10% +# improvement on Cortex A8 core and 12.2 cycles per byte. while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} open STDOUT,">$output"; @@ -76,31 +80,41 @@ $code.=<<___; add $e,$K,$e,ror#2 @ E+=K_xx_xx ldr $t3,[$Xi,#2*4] eor $t0,$t0,$t1 - eor $t2,$t2,$t3 + eor $t2,$t2,$t3 @ 1 cycle stall eor $t1,$c,$d @ F_xx_xx mov $t0,$t0,ror#31 add $e,$e,$a,ror#27 @ E+=ROR(A,27) eor $t0,$t0,$t2,ror#31 + str $t0,[$Xi,#-4]! $opt1 @ F_xx_xx $opt2 @ F_xx_xx add $e,$e,$t0 @ E+=X[i] - str $t0,[$Xi,#-4]! ___ } sub BODY_00_15 { my ($a,$b,$c,$d,$e)=@_; $code.=<<___; - ldrb $t0,[$inp],#4 - ldrb $t1,[$inp,#-1] - ldrb $t2,[$inp,#-2] +#if __ARM_ARCH__<7 + ldrb $t1,[$inp,#2] + ldrb $t0,[$inp,#3] + ldrb $t2,[$inp,#1] add $e,$K,$e,ror#2 @ E+=K_00_19 - ldrb $t3,[$inp,#-3] + ldrb $t3,[$inp],#4 + orr $t0,$t0,$t1,lsl#8 + eor $t1,$c,$d @ F_xx_xx + orr $t0,$t0,$t2,lsl#16 add $e,$e,$a,ror#27 @ E+=ROR(A,27) - orr $t0,$t1,$t0,lsl#24 + orr $t0,$t0,$t3,lsl#24 +#else + ldr $t0,[$inp],#4 @ handles unaligned + add $e,$K,$e,ror#2 @ E+=K_00_19 eor $t1,$c,$d @ F_xx_xx - orr $t0,$t0,$t2,lsl#8 - orr $t0,$t0,$t3,lsl#16 + add $e,$e,$a,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev $t0,$t0 @ byte swap +#endif +#endif and $t1,$b,$t1,ror#2 add $e,$e,$t0 @ E+=X[i] eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D) @@ -136,6 +150,8 @@ ___ } $code=<<___; +#include "arm_arch.h" + .text .global sha1_block_data_order @@ -161,7 +177,7 @@ for($i=0;$i<5;$i++) { $code.=<<___; teq $Xi,sp bne .L_00_15 @ [((11+4)*5+2)*3] - sub sp,sp,#5*4 + sub sp,sp,#25*4 ___ &BODY_00_15(@V); unshift(@V,pop(@V)); &BODY_16_19(@V); unshift(@V,pop(@V)); @@ -171,7 +187,6 @@ ___ $code.=<<___; ldr $K,.LK_20_39 @ [+15+16*4] - sub sp,sp,#20*4 cmn sp,#0 @ [+3], clear carry to denote 20_39 .L_20_39_or_60_79: ___ @@ -210,10 +225,14 @@ $code.=<<___; teq $inp,$len bne .Lloop @ [+18], total 1307 +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else ldmia sp!,{r4-r12,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet bx lr @ interoperable with Thumb ISA:-) +#endif .align 2 .LK_00_19: .word 0x5a827999 .LK_20_39: .word 0x6ed9eba1 diff --git a/app/openssl/crypto/sha/asm/sha1-armv4-large.s b/app/openssl/crypto/sha/asm/sha1-armv4-large.s index 7f687d9f..639ae78a 100644 --- a/app/openssl/crypto/sha/asm/sha1-armv4-large.s +++ b/app/openssl/crypto/sha/asm/sha1-armv4-large.s @@ -1,3 +1,5 @@ +#include "arm_arch.h" + .text .global sha1_block_data_order @@ -16,76 +18,126 @@ sha1_block_data_order: mov r6,r6,ror#30 mov r7,r7,ror#30 @ [6] .L_00_15: - ldrb r9,[r1],#4 - ldrb r10,[r1,#-1] - ldrb r11,[r1,#-2] +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] add r7,r8,r7,ror#2 @ E+=K_00_19 - ldrb r12,[r1,#-3] + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r5,r6 @ F_xx_xx + orr r9,r9,r11,lsl#16 add r7,r7,r3,ror#27 @ E+=ROR(A,27) - orr r9,r10,r9,lsl#24 + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r7,r8,r7,ror#2 @ E+=K_00_19 eor r10,r5,r6 @ F_xx_xx - orr r9,r9,r11,lsl#8 - orr r9,r9,r12,lsl#16 + add r7,r7,r3,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif and r10,r4,r10,ror#2 add r7,r7,r9 @ E+=X[i] eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r7,r7,r10 @ E+=F_00_19(B,C,D) - ldrb r9,[r1],#4 - ldrb r10,[r1,#-1] - ldrb r11,[r1,#-2] +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] add r6,r8,r6,ror#2 @ E+=K_00_19 - ldrb r12,[r1,#-3] + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r4,r5 @ F_xx_xx + orr r9,r9,r11,lsl#16 add r6,r6,r7,ror#27 @ E+=ROR(A,27) - orr r9,r10,r9,lsl#24 + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r6,r8,r6,ror#2 @ E+=K_00_19 eor r10,r4,r5 @ F_xx_xx - orr r9,r9,r11,lsl#8 - orr r9,r9,r12,lsl#16 + add r6,r6,r7,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif and r10,r3,r10,ror#2 add r6,r6,r9 @ E+=X[i] eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r6,r6,r10 @ E+=F_00_19(B,C,D) - ldrb r9,[r1],#4 - ldrb r10,[r1,#-1] - ldrb r11,[r1,#-2] +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] add r5,r8,r5,ror#2 @ E+=K_00_19 - ldrb r12,[r1,#-3] + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r3,r4 @ F_xx_xx + orr r9,r9,r11,lsl#16 add r5,r5,r6,ror#27 @ E+=ROR(A,27) - orr r9,r10,r9,lsl#24 + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r5,r8,r5,ror#2 @ E+=K_00_19 eor r10,r3,r4 @ F_xx_xx - orr r9,r9,r11,lsl#8 - orr r9,r9,r12,lsl#16 + add r5,r5,r6,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif and r10,r7,r10,ror#2 add r5,r5,r9 @ E+=X[i] eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r5,r5,r10 @ E+=F_00_19(B,C,D) - ldrb r9,[r1],#4 - ldrb r10,[r1,#-1] - ldrb r11,[r1,#-2] +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] add r4,r8,r4,ror#2 @ E+=K_00_19 - ldrb r12,[r1,#-3] + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r7,r3 @ F_xx_xx + orr r9,r9,r11,lsl#16 add r4,r4,r5,ror#27 @ E+=ROR(A,27) - orr r9,r10,r9,lsl#24 + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r4,r8,r4,ror#2 @ E+=K_00_19 eor r10,r7,r3 @ F_xx_xx - orr r9,r9,r11,lsl#8 - orr r9,r9,r12,lsl#16 + add r4,r4,r5,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif and r10,r6,r10,ror#2 add r4,r4,r9 @ E+=X[i] eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) str r9,[r14,#-4]! add r4,r4,r10 @ E+=F_00_19(B,C,D) - ldrb r9,[r1],#4 - ldrb r10,[r1,#-1] - ldrb r11,[r1,#-2] +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] add r3,r8,r3,ror#2 @ E+=K_00_19 - ldrb r12,[r1,#-3] + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r6,r7 @ F_xx_xx + orr r9,r9,r11,lsl#16 add r3,r3,r4,ror#27 @ E+=ROR(A,27) - orr r9,r10,r9,lsl#24 + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r3,r8,r3,ror#2 @ E+=K_00_19 eor r10,r6,r7 @ F_xx_xx - orr r9,r9,r11,lsl#8 - orr r9,r9,r12,lsl#16 + add r3,r3,r4,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif and r10,r5,r10,ror#2 add r3,r3,r9 @ E+=X[i] eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) @@ -93,17 +145,27 @@ sha1_block_data_order: add r3,r3,r10 @ E+=F_00_19(B,C,D) teq r14,sp bne .L_00_15 @ [((11+4)*5+2)*3] - sub sp,sp,#5*4 - ldrb r9,[r1],#4 - ldrb r10,[r1,#-1] - ldrb r11,[r1,#-2] + sub sp,sp,#25*4 +#if __ARM_ARCH__<7 + ldrb r10,[r1,#2] + ldrb r9,[r1,#3] + ldrb r11,[r1,#1] add r7,r8,r7,ror#2 @ E+=K_00_19 - ldrb r12,[r1,#-3] + ldrb r12,[r1],#4 + orr r9,r9,r10,lsl#8 + eor r10,r5,r6 @ F_xx_xx + orr r9,r9,r11,lsl#16 add r7,r7,r3,ror#27 @ E+=ROR(A,27) - orr r9,r10,r9,lsl#24 + orr r9,r9,r12,lsl#24 +#else + ldr r9,[r1],#4 @ handles unaligned + add r7,r8,r7,ror#2 @ E+=K_00_19 eor r10,r5,r6 @ F_xx_xx - orr r9,r9,r11,lsl#8 - orr r9,r9,r12,lsl#16 + add r7,r7,r3,ror#27 @ E+=ROR(A,27) +#ifdef __ARMEL__ + rev r9,r9 @ byte swap +#endif +#endif and r10,r4,r10,ror#2 add r7,r7,r9 @ E+=X[i] eor r10,r10,r6,ror#2 @ F_00_19(B,C,D) @@ -115,15 +177,15 @@ sha1_block_data_order: add r6,r8,r6,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r4,r5 @ F_xx_xx mov r9,r9,ror#31 add r6,r6,r7,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! and r10,r3,r10,ror#2 @ F_xx_xx @ F_xx_xx add r6,r6,r9 @ E+=X[i] - str r9,[r14,#-4]! eor r10,r10,r5,ror#2 @ F_00_19(B,C,D) add r6,r6,r10 @ E+=F_00_19(B,C,D) ldr r9,[r14,#15*4] @@ -132,15 +194,15 @@ sha1_block_data_order: add r5,r8,r5,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r3,r4 @ F_xx_xx mov r9,r9,ror#31 add r5,r5,r6,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! and r10,r7,r10,ror#2 @ F_xx_xx @ F_xx_xx add r5,r5,r9 @ E+=X[i] - str r9,[r14,#-4]! eor r10,r10,r4,ror#2 @ F_00_19(B,C,D) add r5,r5,r10 @ E+=F_00_19(B,C,D) ldr r9,[r14,#15*4] @@ -149,15 +211,15 @@ sha1_block_data_order: add r4,r8,r4,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r7,r3 @ F_xx_xx mov r9,r9,ror#31 add r4,r4,r5,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! and r10,r6,r10,ror#2 @ F_xx_xx @ F_xx_xx add r4,r4,r9 @ E+=X[i] - str r9,[r14,#-4]! eor r10,r10,r3,ror#2 @ F_00_19(B,C,D) add r4,r4,r10 @ E+=F_00_19(B,C,D) ldr r9,[r14,#15*4] @@ -166,20 +228,19 @@ sha1_block_data_order: add r3,r8,r3,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r6,r7 @ F_xx_xx mov r9,r9,ror#31 add r3,r3,r4,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! and r10,r5,r10,ror#2 @ F_xx_xx @ F_xx_xx add r3,r3,r9 @ E+=X[i] - str r9,[r14,#-4]! eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) add r3,r3,r10 @ E+=F_00_19(B,C,D) ldr r8,.LK_20_39 @ [+15+16*4] - sub sp,sp,#20*4 cmn sp,#0 @ [+3], clear carry to denote 20_39 .L_20_39_or_60_79: ldr r9,[r14,#15*4] @@ -188,15 +249,15 @@ sha1_block_data_order: add r7,r8,r7,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r5,r6 @ F_xx_xx mov r9,r9,ror#31 add r7,r7,r3,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! eor r10,r4,r10,ror#2 @ F_xx_xx @ F_xx_xx add r7,r7,r9 @ E+=X[i] - str r9,[r14,#-4]! add r7,r7,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] @@ -204,15 +265,15 @@ sha1_block_data_order: add r6,r8,r6,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r4,r5 @ F_xx_xx mov r9,r9,ror#31 add r6,r6,r7,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! eor r10,r3,r10,ror#2 @ F_xx_xx @ F_xx_xx add r6,r6,r9 @ E+=X[i] - str r9,[r14,#-4]! add r6,r6,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] @@ -220,15 +281,15 @@ sha1_block_data_order: add r5,r8,r5,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r3,r4 @ F_xx_xx mov r9,r9,ror#31 add r5,r5,r6,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! eor r10,r7,r10,ror#2 @ F_xx_xx @ F_xx_xx add r5,r5,r9 @ E+=X[i] - str r9,[r14,#-4]! add r5,r5,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] @@ -236,15 +297,15 @@ sha1_block_data_order: add r4,r8,r4,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r7,r3 @ F_xx_xx mov r9,r9,ror#31 add r4,r4,r5,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! eor r10,r6,r10,ror#2 @ F_xx_xx @ F_xx_xx add r4,r4,r9 @ E+=X[i] - str r9,[r14,#-4]! add r4,r4,r10 @ E+=F_20_39(B,C,D) ldr r9,[r14,#15*4] ldr r10,[r14,#13*4] @@ -252,15 +313,15 @@ sha1_block_data_order: add r3,r8,r3,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r6,r7 @ F_xx_xx mov r9,r9,ror#31 add r3,r3,r4,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! eor r10,r5,r10,ror#2 @ F_xx_xx @ F_xx_xx add r3,r3,r9 @ E+=X[i] - str r9,[r14,#-4]! add r3,r3,r10 @ E+=F_20_39(B,C,D) teq r14,sp @ preserve carry bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] @@ -275,15 +336,15 @@ sha1_block_data_order: add r7,r8,r7,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r5,r6 @ F_xx_xx mov r9,r9,ror#31 add r7,r7,r3,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! and r10,r4,r10,ror#2 @ F_xx_xx and r11,r5,r6 @ F_xx_xx add r7,r7,r9 @ E+=X[i] - str r9,[r14,#-4]! add r7,r7,r10 @ E+=F_40_59(B,C,D) add r7,r7,r11,ror#2 ldr r9,[r14,#15*4] @@ -292,15 +353,15 @@ sha1_block_data_order: add r6,r8,r6,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r4,r5 @ F_xx_xx mov r9,r9,ror#31 add r6,r6,r7,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! and r10,r3,r10,ror#2 @ F_xx_xx and r11,r4,r5 @ F_xx_xx add r6,r6,r9 @ E+=X[i] - str r9,[r14,#-4]! add r6,r6,r10 @ E+=F_40_59(B,C,D) add r6,r6,r11,ror#2 ldr r9,[r14,#15*4] @@ -309,15 +370,15 @@ sha1_block_data_order: add r5,r8,r5,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r3,r4 @ F_xx_xx mov r9,r9,ror#31 add r5,r5,r6,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! and r10,r7,r10,ror#2 @ F_xx_xx and r11,r3,r4 @ F_xx_xx add r5,r5,r9 @ E+=X[i] - str r9,[r14,#-4]! add r5,r5,r10 @ E+=F_40_59(B,C,D) add r5,r5,r11,ror#2 ldr r9,[r14,#15*4] @@ -326,15 +387,15 @@ sha1_block_data_order: add r4,r8,r4,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r7,r3 @ F_xx_xx mov r9,r9,ror#31 add r4,r4,r5,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! and r10,r6,r10,ror#2 @ F_xx_xx and r11,r7,r3 @ F_xx_xx add r4,r4,r9 @ E+=X[i] - str r9,[r14,#-4]! add r4,r4,r10 @ E+=F_40_59(B,C,D) add r4,r4,r11,ror#2 ldr r9,[r14,#15*4] @@ -343,15 +404,15 @@ sha1_block_data_order: add r3,r8,r3,ror#2 @ E+=K_xx_xx ldr r12,[r14,#2*4] eor r9,r9,r10 - eor r11,r11,r12 + eor r11,r11,r12 @ 1 cycle stall eor r10,r6,r7 @ F_xx_xx mov r9,r9,ror#31 add r3,r3,r4,ror#27 @ E+=ROR(A,27) eor r9,r9,r11,ror#31 + str r9,[r14,#-4]! and r10,r5,r10,ror#2 @ F_xx_xx and r11,r6,r7 @ F_xx_xx add r3,r3,r9 @ E+=X[i] - str r9,[r14,#-4]! add r3,r3,r10 @ E+=F_40_59(B,C,D) add r3,r3,r11,ror#2 teq r14,sp @@ -373,10 +434,14 @@ sha1_block_data_order: teq r1,r2 bne .Lloop @ [+18], total 1307 +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else ldmia sp!,{r4-r12,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif .align 2 .LK_00_19: .word 0x5a827999 .LK_20_39: .word 0x6ed9eba1 diff --git a/app/openssl/crypto/sha/asm/sha1-ia64.pl b/app/openssl/crypto/sha/asm/sha1-ia64.pl index 51c4f47e..02d35d16 100644 --- a/app/openssl/crypto/sha/asm/sha1-ia64.pl +++ b/app/openssl/crypto/sha/asm/sha1-ia64.pl @@ -15,7 +15,7 @@ # is >50% better than HP C and >2x better than gcc. $code=<<___; -.ident \"sha1-ia64.s, version 1.2\" +.ident \"sha1-ia64.s, version 1.3\" .ident \"IA-64 ISA artwork by Andy Polyakov \" .explicit @@ -26,14 +26,10 @@ if ($^O eq "hpux") { $ADDP="addp4"; for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); } } else { $ADDP="add"; } -for (@ARGV) { $big_endian=1 if (/\-DB_ENDIAN/); - $big_endian=0 if (/\-DL_ENDIAN/); } -if (!defined($big_endian)) - { $big_endian=(unpack('L',pack('N',1))==1); } #$human=1; if ($human) { # useful for visual code auditing... - ($A,$B,$C,$D,$E,$T) = ("A","B","C","D","E","T"); + ($A,$B,$C,$D,$E) = ("A","B","C","D","E"); ($h0,$h1,$h2,$h3,$h4) = ("h0","h1","h2","h3","h4"); ($K_00_19, $K_20_39, $K_40_59, $K_60_79) = ( "K_00_19","K_20_39","K_40_59","K_60_79" ); @@ -41,47 +37,50 @@ if ($human) { # useful for visual code auditing... "X8", "X9","X10","X11","X12","X13","X14","X15" ); } else { - ($A,$B,$C,$D,$E,$T) = ("loc0","loc1","loc2","loc3","loc4","loc5"); - ($h0,$h1,$h2,$h3,$h4) = ("loc6","loc7","loc8","loc9","loc10"); + ($A,$B,$C,$D,$E) = ("loc0","loc1","loc2","loc3","loc4"); + ($h0,$h1,$h2,$h3,$h4) = ("loc5","loc6","loc7","loc8","loc9"); ($K_00_19, $K_20_39, $K_40_59, $K_60_79) = - ( "r14", "r15", "loc11", "loc12" ); + ( "r14", "r15", "loc10", "loc11" ); @X= ( "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" ); } sub BODY_00_15 { local *code=shift; -local ($i,$a,$b,$c,$d,$e,$f)=@_; +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +my $Xn=@X[$j%16]; $code.=<<___ if ($i==0); -{ .mmi; ld1 $X[$i&0xf]=[inp],2 // MSB +{ .mmi; ld1 $X[$i]=[inp],2 // MSB ld1 tmp2=[tmp3],2 };; { .mmi; ld1 tmp0=[inp],2 ld1 tmp4=[tmp3],2 // LSB - dep $X[$i&0xf]=$X[$i&0xf],tmp2,8,8 };; + dep $X[$i]=$X[$i],tmp2,8,8 };; ___ if ($i<15) { $code.=<<___; -{ .mmi; ld1 $X[($i+1)&0xf]=[inp],2 // +1 +{ .mmi; ld1 $Xn=[inp],2 // forward Xload + nop.m 0x0 dep tmp1=tmp0,tmp4,8,8 };; -{ .mmi; ld1 tmp2=[tmp3],2 // +1 +{ .mmi; ld1 tmp2=[tmp3],2 // forward Xload and tmp4=$c,$b - dep $X[$i&0xf]=$X[$i&0xf],tmp1,16,16 } //;; -{ .mmi; andcm tmp1=$d,$b - add tmp0=$e,$K_00_19 + dep $X[$i]=$X[$i],tmp1,16,16} //;; +{ .mmi; add $e=$e,$K_00_19 // e+=K_00_19 + andcm tmp1=$d,$b dep.z tmp5=$a,5,27 };; // a<<5 -{ .mmi; or tmp4=tmp4,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) - add $f=tmp0,$X[$i&0xf] // f=xi+e+K_00_19 +{ .mmi; add $e=$e,$X[$i] // e+=Xload + or tmp4=tmp4,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) extr.u tmp1=$a,27,5 };; // a>>27 -{ .mmi; ld1 tmp0=[inp],2 // +1 - add $f=$f,tmp4 // f+=F_00_19(b,c,d) +{ .mmi; ld1 tmp0=[inp],2 // forward Xload + add $e=$e,tmp4 // e+=F_00_19(b,c,d) shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) -{ .mmi; ld1 tmp4=[tmp3],2 // +1 +{ .mmi; ld1 tmp4=[tmp3],2 // forward Xload or tmp5=tmp1,tmp5 // ROTATE(a,5) mux2 tmp6=$a,0x44 };; // see b in next iteration -{ .mii; add $f=$f,tmp5 // f+=ROTATE(a,5) - dep $X[($i+1)&0xf]=$X[($i+1)&0xf],tmp2,8,8 // +1 - mux2 $X[$i&0xf]=$X[$i&0xf],0x44 } //;; +{ .mii; add $e=$e,tmp5 // e+=ROTATE(a,5) + dep $Xn=$Xn,tmp2,8,8 // forward Xload + mux2 $X[$i]=$X[$i],0x44 } //;; ___ } @@ -89,24 +88,24 @@ else { $code.=<<___; { .mii; and tmp3=$c,$b dep tmp1=tmp0,tmp4,8,8;; - dep $X[$i&0xf]=$X[$i&0xf],tmp1,16,16 } //;; -{ .mmi; andcm tmp1=$d,$b - add tmp0=$e,$K_00_19 + dep $X[$i]=$X[$i],tmp1,16,16} //;; +{ .mmi; add $e=$e,$K_00_19 // e+=K_00_19 + andcm tmp1=$d,$b dep.z tmp5=$a,5,27 };; // a<<5 -{ .mmi; or tmp4=tmp3,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) - add $f=tmp0,$X[$i&0xf] // f=xi+e+K_00_19 +{ .mmi; add $e=$e,$X[$i] // e+=Xupdate + or tmp4=tmp3,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) extr.u tmp1=$a,27,5 } // a>>27 -{ .mmi; xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1 - xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1 +{ .mmi; xor $Xn=$Xn,$X[($j+2)%16] // forward Xupdate + xor tmp3=$X[($j+8)%16],$X[($j+13)%16] // forward Xupdate nop.i 0 };; -{ .mmi; add $f=$f,tmp4 // f+=F_00_19(b,c,d) - xor tmp2=tmp2,tmp3 // +1 +{ .mmi; add $e=$e,tmp4 // e+=F_00_19(b,c,d) + xor $Xn=$Xn,tmp3 // forward Xupdate shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) mux2 tmp6=$a,0x44 };; // see b in next iteration -{ .mii; add $f=$f,tmp1 // f+=ROTATE(a,5) - shrp $e=tmp2,tmp2,31 // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1) - mux2 $X[$i&0xf]=$X[$i&0xf],0x44 };; +{ .mii; add $e=$e,tmp1 // e+=ROTATE(a,5) + shrp $Xn=$Xn,$Xn,31 // ROTATE(x[0]^x[2]^x[8]^x[13],1) + mux2 $X[$i]=$X[$i],0x44 };; ___ } @@ -114,27 +113,28 @@ ___ sub BODY_16_19 { local *code=shift; -local ($i,$a,$b,$c,$d,$e,$f)=@_; +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +my $Xn=@X[$j%16]; $code.=<<___; -{ .mmi; mov $X[$i&0xf]=$f // Xupdate - and tmp0=$c,$b +{ .mib; add $e=$e,$K_00_19 // e+=K_00_19 dep.z tmp5=$a,5,27 } // a<<5 -{ .mmi; andcm tmp1=$d,$b - add tmp4=$e,$K_00_19 };; -{ .mmi; or tmp0=tmp0,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) - add $f=$f,tmp4 // f+=e+K_00_19 +{ .mib; andcm tmp1=$d,$b + and tmp0=$c,$b };; +{ .mmi; add $e=$e,$X[$i%16] // e+=Xupdate + or tmp0=tmp0,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d) extr.u tmp1=$a,27,5 } // a>>27 -{ .mmi; xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1 - xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1 +{ .mmi; xor $Xn=$Xn,$X[($j+2)%16] // forward Xupdate + xor tmp3=$X[($j+8)%16],$X[($j+13)%16] // forward Xupdate nop.i 0 };; -{ .mmi; add $f=$f,tmp0 // f+=F_00_19(b,c,d) - xor tmp2=tmp2,tmp3 // +1 +{ .mmi; add $e=$e,tmp0 // f+=F_00_19(b,c,d) + xor $Xn=$Xn,tmp3 // forward Xupdate shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) mux2 tmp6=$a,0x44 };; // see b in next iteration -{ .mii; add $f=$f,tmp1 // f+=ROTATE(a,5) - shrp $e=tmp2,tmp2,31 // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1) +{ .mii; add $e=$e,tmp1 // e+=ROTATE(a,5) + shrp $Xn=$Xn,$Xn,31 // ROTATE(x[0]^x[2]^x[8]^x[13],1) nop.i 0 };; ___ @@ -142,49 +142,47 @@ ___ sub BODY_20_39 { local *code=shift; -local ($i,$a,$b,$c,$d,$e,$f,$Konst)=@_; +my ($i,$a,$b,$c,$d,$e,$Konst)=@_; $Konst = $K_20_39 if (!defined($Konst)); +my $j=$i+1; +my $Xn=@X[$j%16]; if ($i<79) { $code.=<<___; -{ .mib; mov $X[$i&0xf]=$f // Xupdate +{ .mib; add $e=$e,$Konst // e+=K_XX_XX dep.z tmp5=$a,5,27 } // a<<5 { .mib; xor tmp0=$c,$b - add tmp4=$e,$Konst };; -{ .mmi; xor tmp0=tmp0,$d // F_20_39(b,c,d)=b^c^d - add $f=$f,tmp4 // f+=e+K_20_39 + xor $Xn=$Xn,$X[($j+2)%16] };; // forward Xupdate +{ .mib; add $e=$e,$X[$i%16] // e+=Xupdate extr.u tmp1=$a,27,5 } // a>>27 -{ .mmi; xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1 - xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1 - nop.i 0 };; -{ .mmi; add $f=$f,tmp0 // f+=F_20_39(b,c,d) - xor tmp2=tmp2,tmp3 // +1 +{ .mib; xor tmp0=tmp0,$d // F_20_39(b,c,d)=b^c^d + xor $Xn=$Xn,$X[($j+8)%16] };; // forward Xupdate +{ .mmi; add $e=$e,tmp0 // e+=F_20_39(b,c,d) + xor $Xn=$Xn,$X[($j+13)%16] // forward Xupdate shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) mux2 tmp6=$a,0x44 };; // see b in next iteration -{ .mii; add $f=$f,tmp1 // f+=ROTATE(a,5) - shrp $e=tmp2,tmp2,31 // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1) +{ .mii; add $e=$e,tmp1 // e+=ROTATE(a,5) + shrp $Xn=$Xn,$Xn,31 // ROTATE(x[0]^x[2]^x[8]^x[13],1) nop.i 0 };; ___ } else { $code.=<<___; -{ .mib; mov $X[$i&0xf]=$f // Xupdate +{ .mib; add $e=$e,$Konst // e+=K_60_79 dep.z tmp5=$a,5,27 } // a<<5 { .mib; xor tmp0=$c,$b - add tmp4=$e,$Konst };; -{ .mib; xor tmp0=tmp0,$d // F_20_39(b,c,d)=b^c^d - extr.u tmp1=$a,27,5 } // a>>27 -{ .mib; add $f=$f,tmp4 // f+=e+K_20_39 add $h1=$h1,$a };; // wrap up -{ .mmi; add $f=$f,tmp0 // f+=F_20_39(b,c,d) - shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) ;;? -{ .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) +{ .mib; add $e=$e,$X[$i%16] // e+=Xupdate + extr.u tmp1=$a,27,5 } // a>>27 +{ .mib; xor tmp0=tmp0,$d // F_20_39(b,c,d)=b^c^d add $h3=$h3,$c };; // wrap up -{ .mib; add tmp3=1,inp // used in unaligned codepath - add $f=$f,tmp1 } // f+=ROTATE(a,5) -{ .mib; add $h2=$h2,$b // wrap up +{ .mmi; add $e=$e,tmp0 // e+=F_20_39(b,c,d) + or tmp1=tmp1,tmp5 // ROTATE(a,5) + shrp $b=tmp6,tmp6,2 };; // b=ROTATE(b,30) ;;? +{ .mmi; add $e=$e,tmp1 // e+=ROTATE(a,5) + add tmp3=1,inp // used in unaligned codepath add $h4=$h4,$d };; // wrap up ___ @@ -193,29 +191,29 @@ ___ sub BODY_40_59 { local *code=shift; -local ($i,$a,$b,$c,$d,$e,$f)=@_; +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +my $Xn=@X[$j%16]; $code.=<<___; -{ .mmi; mov $X[$i&0xf]=$f // Xupdate - and tmp0=$c,$b +{ .mib; add $e=$e,$K_40_59 // e+=K_40_59 dep.z tmp5=$a,5,27 } // a<<5 -{ .mmi; and tmp1=$d,$b - add tmp4=$e,$K_40_59 };; -{ .mmi; or tmp0=tmp0,tmp1 // (b&c)|(b&d) - add $f=$f,tmp4 // f+=e+K_40_59 +{ .mib; and tmp1=$c,$d + xor tmp0=$c,$d };; +{ .mmi; add $e=$e,$X[$i%16] // e+=Xupdate + add tmp5=tmp5,tmp1 // a<<5+(c&d) extr.u tmp1=$a,27,5 } // a>>27 -{ .mmi; and tmp4=$c,$d - xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1 - xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1 - };; -{ .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) - xor tmp2=tmp2,tmp3 // +1 +{ .mmi; and tmp0=tmp0,$b + xor $Xn=$Xn,$X[($j+2)%16] // forward Xupdate + xor tmp3=$X[($j+8)%16],$X[($j+13)%16] };; // forward Xupdate +{ .mmi; add $e=$e,tmp0 // e+=b&(c^d) + add tmp5=tmp5,tmp1 // ROTATE(a,5)+(c&d) shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) -{ .mmi; or tmp0=tmp0,tmp4 // F_40_59(b,c,d)=(b&c)|(b&d)|(c&d) +{ .mmi; xor $Xn=$Xn,tmp3 mux2 tmp6=$a,0x44 };; // see b in next iteration -{ .mii; add $f=$f,tmp0 // f+=F_40_59(b,c,d) - shrp $e=tmp2,tmp2,31;; // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1) - add $f=$f,tmp1 };; // f+=ROTATE(a,5) +{ .mii; add $e=$e,tmp5 // e+=ROTATE(a,5)+(c&d) + shrp $Xn=$Xn,$Xn,31 // ROTATE(x[0]^x[2]^x[8]^x[13],1) + nop.i 0x0 };; ___ } @@ -237,7 +235,7 @@ inp=r33; // in1 .align 32 sha1_block_data_order: .prologue -{ .mmi; alloc tmp1=ar.pfs,3,15,0,0 +{ .mmi; alloc tmp1=ar.pfs,3,14,0,0 $ADDP tmp0=4,ctx .save ar.lc,r3 mov r3=ar.lc } @@ -245,8 +243,8 @@ sha1_block_data_order: $ADDP inp=0,inp mov r2=pr };; tmp4=in2; -tmp5=loc13; -tmp6=loc14; +tmp5=loc12; +tmp6=loc13; .body { .mlx; ld4 $h0=[ctx],8 movl $K_00_19=0x5a827999 } @@ -273,7 +271,8 @@ tmp6=loc14; ___ -{ my $i,@V=($A,$B,$C,$D,$E,$T); +{ my $i; + my @V=($A,$B,$C,$D,$E); for($i=0;$i<16;$i++) { &BODY_00_15(\$code,$i,@V); unshift(@V,pop(@V)); } for(;$i<20;$i++) { &BODY_16_19(\$code,$i,@V); unshift(@V,pop(@V)); } @@ -281,12 +280,12 @@ ___ for(;$i<60;$i++) { &BODY_40_59(\$code,$i,@V); unshift(@V,pop(@V)); } for(;$i<80;$i++) { &BODY_60_79(\$code,$i,@V); unshift(@V,pop(@V)); } - (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check + (($V[0] eq $A) and ($V[4] eq $E)) or die; # double-check } $code.=<<___; -{ .mmb; add $h0=$h0,$E - nop.m 0 +{ .mmb; add $h0=$h0,$A + add $h2=$h2,$C br.ctop.dptk.many .Ldtop };; .Ldend: { .mmi; add tmp0=4,ctx diff --git a/app/openssl/crypto/sha/asm/sha1-mips.S b/app/openssl/crypto/sha/asm/sha1-mips.S new file mode 100644 index 00000000..865da255 --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha1-mips.S @@ -0,0 +1,1664 @@ +#ifdef OPENSSL_FIPSCANISTER +# include +#endif + +.text + +.set noat +.set noreorder +.align 5 +.globl sha1_block_data_order +.ent sha1_block_data_order +sha1_block_data_order: + .frame $29,16*4,$31 + .mask 3237937152,-4 + .set noreorder + sub $29,16*4 + sw $31,(16-1)*4($29) + sw $30,(16-2)*4($29) + sw $23,(16-3)*4($29) + sw $22,(16-4)*4($29) + sw $21,(16-5)*4($29) + sw $20,(16-6)*4($29) + sw $19,(16-7)*4($29) + sw $18,(16-8)*4($29) + sw $17,(16-9)*4($29) + sw $16,(16-10)*4($29) + sll $6,6 + add $6,$5 + sw $6,0($29) + lw $1,0($4) + lw $2,4($4) + lw $3,8($4) + lw $7,12($4) + b .Loop + lw $24,16($4) +.align 4 +.Loop: + .set reorder + lwl $8,3($5) + lui $31,0x5a82 + lwr $8,0($5) + ori $31,0x7999 # K_00_19 + srl $25,$8,24 # byte swap(0) + srl $6,$8,8 + andi $30,$8,0xFF00 + sll $8,$8,24 + andi $6,0xFF00 + sll $30,$30,8 + or $8,$25 + or $6,$30 + or $8,$6 + lwl $9,1*4+3($5) + sll $25,$1,5 # 0 + addu $24,$31 + lwr $9,1*4+0($5) + srl $6,$1,27 + addu $24,$25 + xor $25,$3,$7 + addu $24,$6 + sll $30,$2,30 + and $25,$2 + srl $2,$2,2 + xor $25,$7 + addu $24,$8 + or $2,$30 + addu $24,$25 + srl $25,$9,24 # byte swap(1) + srl $6,$9,8 + andi $30,$9,0xFF00 + sll $9,$9,24 + andi $6,0xFF00 + sll $30,$30,8 + or $9,$25 + or $6,$30 + or $9,$6 + lwl $10,2*4+3($5) + sll $25,$24,5 # 1 + addu $7,$31 + lwr $10,2*4+0($5) + srl $6,$24,27 + addu $7,$25 + xor $25,$2,$3 + addu $7,$6 + sll $30,$1,30 + and $25,$1 + srl $1,$1,2 + xor $25,$3 + addu $7,$9 + or $1,$30 + addu $7,$25 + srl $25,$10,24 # byte swap(2) + srl $6,$10,8 + andi $30,$10,0xFF00 + sll $10,$10,24 + andi $6,0xFF00 + sll $30,$30,8 + or $10,$25 + or $6,$30 + or $10,$6 + lwl $11,3*4+3($5) + sll $25,$7,5 # 2 + addu $3,$31 + lwr $11,3*4+0($5) + srl $6,$7,27 + addu $3,$25 + xor $25,$1,$2 + addu $3,$6 + sll $30,$24,30 + and $25,$24 + srl $24,$24,2 + xor $25,$2 + addu $3,$10 + or $24,$30 + addu $3,$25 + srl $25,$11,24 # byte swap(3) + srl $6,$11,8 + andi $30,$11,0xFF00 + sll $11,$11,24 + andi $6,0xFF00 + sll $30,$30,8 + or $11,$25 + or $6,$30 + or $11,$6 + lwl $12,4*4+3($5) + sll $25,$3,5 # 3 + addu $2,$31 + lwr $12,4*4+0($5) + srl $6,$3,27 + addu $2,$25 + xor $25,$24,$1 + addu $2,$6 + sll $30,$7,30 + and $25,$7 + srl $7,$7,2 + xor $25,$1 + addu $2,$11 + or $7,$30 + addu $2,$25 + srl $25,$12,24 # byte swap(4) + srl $6,$12,8 + andi $30,$12,0xFF00 + sll $12,$12,24 + andi $6,0xFF00 + sll $30,$30,8 + or $12,$25 + or $6,$30 + or $12,$6 + lwl $13,5*4+3($5) + sll $25,$2,5 # 4 + addu $1,$31 + lwr $13,5*4+0($5) + srl $6,$2,27 + addu $1,$25 + xor $25,$7,$24 + addu $1,$6 + sll $30,$3,30 + and $25,$3 + srl $3,$3,2 + xor $25,$24 + addu $1,$12 + or $3,$30 + addu $1,$25 + srl $25,$13,24 # byte swap(5) + srl $6,$13,8 + andi $30,$13,0xFF00 + sll $13,$13,24 + andi $6,0xFF00 + sll $30,$30,8 + or $13,$25 + or $6,$30 + or $13,$6 + lwl $14,6*4+3($5) + sll $25,$1,5 # 5 + addu $24,$31 + lwr $14,6*4+0($5) + srl $6,$1,27 + addu $24,$25 + xor $25,$3,$7 + addu $24,$6 + sll $30,$2,30 + and $25,$2 + srl $2,$2,2 + xor $25,$7 + addu $24,$13 + or $2,$30 + addu $24,$25 + srl $25,$14,24 # byte swap(6) + srl $6,$14,8 + andi $30,$14,0xFF00 + sll $14,$14,24 + andi $6,0xFF00 + sll $30,$30,8 + or $14,$25 + or $6,$30 + or $14,$6 + lwl $15,7*4+3($5) + sll $25,$24,5 # 6 + addu $7,$31 + lwr $15,7*4+0($5) + srl $6,$24,27 + addu $7,$25 + xor $25,$2,$3 + addu $7,$6 + sll $30,$1,30 + and $25,$1 + srl $1,$1,2 + xor $25,$3 + addu $7,$14 + or $1,$30 + addu $7,$25 + srl $25,$15,24 # byte swap(7) + srl $6,$15,8 + andi $30,$15,0xFF00 + sll $15,$15,24 + andi $6,0xFF00 + sll $30,$30,8 + or $15,$25 + or $6,$30 + or $15,$6 + lwl $16,8*4+3($5) + sll $25,$7,5 # 7 + addu $3,$31 + lwr $16,8*4+0($5) + srl $6,$7,27 + addu $3,$25 + xor $25,$1,$2 + addu $3,$6 + sll $30,$24,30 + and $25,$24 + srl $24,$24,2 + xor $25,$2 + addu $3,$15 + or $24,$30 + addu $3,$25 + srl $25,$16,24 # byte swap(8) + srl $6,$16,8 + andi $30,$16,0xFF00 + sll $16,$16,24 + andi $6,0xFF00 + sll $30,$30,8 + or $16,$25 + or $6,$30 + or $16,$6 + lwl $17,9*4+3($5) + sll $25,$3,5 # 8 + addu $2,$31 + lwr $17,9*4+0($5) + srl $6,$3,27 + addu $2,$25 + xor $25,$24,$1 + addu $2,$6 + sll $30,$7,30 + and $25,$7 + srl $7,$7,2 + xor $25,$1 + addu $2,$16 + or $7,$30 + addu $2,$25 + srl $25,$17,24 # byte swap(9) + srl $6,$17,8 + andi $30,$17,0xFF00 + sll $17,$17,24 + andi $6,0xFF00 + sll $30,$30,8 + or $17,$25 + or $6,$30 + or $17,$6 + lwl $18,10*4+3($5) + sll $25,$2,5 # 9 + addu $1,$31 + lwr $18,10*4+0($5) + srl $6,$2,27 + addu $1,$25 + xor $25,$7,$24 + addu $1,$6 + sll $30,$3,30 + and $25,$3 + srl $3,$3,2 + xor $25,$24 + addu $1,$17 + or $3,$30 + addu $1,$25 + srl $25,$18,24 # byte swap(10) + srl $6,$18,8 + andi $30,$18,0xFF00 + sll $18,$18,24 + andi $6,0xFF00 + sll $30,$30,8 + or $18,$25 + or $6,$30 + or $18,$6 + lwl $19,11*4+3($5) + sll $25,$1,5 # 10 + addu $24,$31 + lwr $19,11*4+0($5) + srl $6,$1,27 + addu $24,$25 + xor $25,$3,$7 + addu $24,$6 + sll $30,$2,30 + and $25,$2 + srl $2,$2,2 + xor $25,$7 + addu $24,$18 + or $2,$30 + addu $24,$25 + srl $25,$19,24 # byte swap(11) + srl $6,$19,8 + andi $30,$19,0xFF00 + sll $19,$19,24 + andi $6,0xFF00 + sll $30,$30,8 + or $19,$25 + or $6,$30 + or $19,$6 + lwl $20,12*4+3($5) + sll $25,$24,5 # 11 + addu $7,$31 + lwr $20,12*4+0($5) + srl $6,$24,27 + addu $7,$25 + xor $25,$2,$3 + addu $7,$6 + sll $30,$1,30 + and $25,$1 + srl $1,$1,2 + xor $25,$3 + addu $7,$19 + or $1,$30 + addu $7,$25 + srl $25,$20,24 # byte swap(12) + srl $6,$20,8 + andi $30,$20,0xFF00 + sll $20,$20,24 + andi $6,0xFF00 + sll $30,$30,8 + or $20,$25 + or $6,$30 + or $20,$6 + lwl $21,13*4+3($5) + sll $25,$7,5 # 12 + addu $3,$31 + lwr $21,13*4+0($5) + srl $6,$7,27 + addu $3,$25 + xor $25,$1,$2 + addu $3,$6 + sll $30,$24,30 + and $25,$24 + srl $24,$24,2 + xor $25,$2 + addu $3,$20 + or $24,$30 + addu $3,$25 + srl $25,$21,24 # byte swap(13) + srl $6,$21,8 + andi $30,$21,0xFF00 + sll $21,$21,24 + andi $6,0xFF00 + sll $30,$30,8 + or $21,$25 + or $6,$30 + or $21,$6 + lwl $22,14*4+3($5) + sll $25,$3,5 # 13 + addu $2,$31 + lwr $22,14*4+0($5) + srl $6,$3,27 + addu $2,$25 + xor $25,$24,$1 + addu $2,$6 + sll $30,$7,30 + and $25,$7 + srl $7,$7,2 + xor $25,$1 + addu $2,$21 + or $7,$30 + addu $2,$25 + srl $25,$22,24 # byte swap(14) + srl $6,$22,8 + andi $30,$22,0xFF00 + sll $22,$22,24 + andi $6,0xFF00 + sll $30,$30,8 + or $22,$25 + or $6,$30 + or $22,$6 + lwl $23,15*4+3($5) + sll $25,$2,5 # 14 + addu $1,$31 + lwr $23,15*4+0($5) + srl $6,$2,27 + addu $1,$25 + xor $25,$7,$24 + addu $1,$6 + sll $30,$3,30 + and $25,$3 + srl $3,$3,2 + xor $25,$24 + addu $1,$22 + or $3,$30 + addu $1,$25 + srl $25,$23,24 # byte swap(15) + srl $6,$23,8 + andi $30,$23,0xFF00 + sll $23,$23,24 + andi $6,0xFF00 + sll $30,$30,8 + or $23,$25 + or $23,$6 + or $23,$30 + xor $8,$10 + sll $25,$1,5 # 15 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $8,$16 + xor $25,$3,$7 + addu $24,$6 + xor $8,$21 + sll $30,$2,30 + and $25,$2 + srl $6,$8,31 + addu $8,$8 + srl $2,$2,2 + xor $25,$7 + or $8,$6 + addu $24,$23 + or $2,$30 + addu $24,$25 + xor $9,$11 + sll $25,$24,5 # 16 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $9,$17 + xor $25,$2,$3 + addu $7,$6 + xor $9,$22 + sll $30,$1,30 + and $25,$1 + srl $6,$9,31 + addu $9,$9 + srl $1,$1,2 + xor $25,$3 + or $9,$6 + addu $7,$8 + or $1,$30 + addu $7,$25 + xor $10,$12 + sll $25,$7,5 # 17 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $10,$18 + xor $25,$1,$2 + addu $3,$6 + xor $10,$23 + sll $30,$24,30 + and $25,$24 + srl $6,$10,31 + addu $10,$10 + srl $24,$24,2 + xor $25,$2 + or $10,$6 + addu $3,$9 + or $24,$30 + addu $3,$25 + xor $11,$13 + sll $25,$3,5 # 18 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $11,$19 + xor $25,$24,$1 + addu $2,$6 + xor $11,$8 + sll $30,$7,30 + and $25,$7 + srl $6,$11,31 + addu $11,$11 + srl $7,$7,2 + xor $25,$1 + or $11,$6 + addu $2,$10 + or $7,$30 + addu $2,$25 + xor $12,$14 + sll $25,$2,5 # 19 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $12,$20 + xor $25,$7,$24 + addu $1,$6 + xor $12,$9 + sll $30,$3,30 + and $25,$3 + srl $6,$12,31 + addu $12,$12 + srl $3,$3,2 + xor $25,$24 + or $12,$6 + addu $1,$11 + or $3,$30 + addu $1,$25 + lui $31,0x6ed9 + ori $31,0xeba1 # K_20_39 + xor $13,$15 + sll $25,$1,5 # 20 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $13,$21 + xor $25,$3,$7 + addu $24,$6 + xor $13,$10 + sll $30,$2,30 + xor $25,$2 + srl $6,$13,31 + addu $13,$13 + srl $2,$2,2 + addu $24,$12 + or $13,$6 + or $2,$30 + addu $24,$25 + xor $14,$16 + sll $25,$24,5 # 21 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $14,$22 + xor $25,$2,$3 + addu $7,$6 + xor $14,$11 + sll $30,$1,30 + xor $25,$1 + srl $6,$14,31 + addu $14,$14 + srl $1,$1,2 + addu $7,$13 + or $14,$6 + or $1,$30 + addu $7,$25 + xor $15,$17 + sll $25,$7,5 # 22 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $15,$23 + xor $25,$1,$2 + addu $3,$6 + xor $15,$12 + sll $30,$24,30 + xor $25,$24 + srl $6,$15,31 + addu $15,$15 + srl $24,$24,2 + addu $3,$14 + or $15,$6 + or $24,$30 + addu $3,$25 + xor $16,$18 + sll $25,$3,5 # 23 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $16,$8 + xor $25,$24,$1 + addu $2,$6 + xor $16,$13 + sll $30,$7,30 + xor $25,$7 + srl $6,$16,31 + addu $16,$16 + srl $7,$7,2 + addu $2,$15 + or $16,$6 + or $7,$30 + addu $2,$25 + xor $17,$19 + sll $25,$2,5 # 24 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $17,$9 + xor $25,$7,$24 + addu $1,$6 + xor $17,$14 + sll $30,$3,30 + xor $25,$3 + srl $6,$17,31 + addu $17,$17 + srl $3,$3,2 + addu $1,$16 + or $17,$6 + or $3,$30 + addu $1,$25 + xor $18,$20 + sll $25,$1,5 # 25 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $18,$10 + xor $25,$3,$7 + addu $24,$6 + xor $18,$15 + sll $30,$2,30 + xor $25,$2 + srl $6,$18,31 + addu $18,$18 + srl $2,$2,2 + addu $24,$17 + or $18,$6 + or $2,$30 + addu $24,$25 + xor $19,$21 + sll $25,$24,5 # 26 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $19,$11 + xor $25,$2,$3 + addu $7,$6 + xor $19,$16 + sll $30,$1,30 + xor $25,$1 + srl $6,$19,31 + addu $19,$19 + srl $1,$1,2 + addu $7,$18 + or $19,$6 + or $1,$30 + addu $7,$25 + xor $20,$22 + sll $25,$7,5 # 27 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $20,$12 + xor $25,$1,$2 + addu $3,$6 + xor $20,$17 + sll $30,$24,30 + xor $25,$24 + srl $6,$20,31 + addu $20,$20 + srl $24,$24,2 + addu $3,$19 + or $20,$6 + or $24,$30 + addu $3,$25 + xor $21,$23 + sll $25,$3,5 # 28 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $21,$13 + xor $25,$24,$1 + addu $2,$6 + xor $21,$18 + sll $30,$7,30 + xor $25,$7 + srl $6,$21,31 + addu $21,$21 + srl $7,$7,2 + addu $2,$20 + or $21,$6 + or $7,$30 + addu $2,$25 + xor $22,$8 + sll $25,$2,5 # 29 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $22,$14 + xor $25,$7,$24 + addu $1,$6 + xor $22,$19 + sll $30,$3,30 + xor $25,$3 + srl $6,$22,31 + addu $22,$22 + srl $3,$3,2 + addu $1,$21 + or $22,$6 + or $3,$30 + addu $1,$25 + xor $23,$9 + sll $25,$1,5 # 30 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $23,$15 + xor $25,$3,$7 + addu $24,$6 + xor $23,$20 + sll $30,$2,30 + xor $25,$2 + srl $6,$23,31 + addu $23,$23 + srl $2,$2,2 + addu $24,$22 + or $23,$6 + or $2,$30 + addu $24,$25 + xor $8,$10 + sll $25,$24,5 # 31 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $8,$16 + xor $25,$2,$3 + addu $7,$6 + xor $8,$21 + sll $30,$1,30 + xor $25,$1 + srl $6,$8,31 + addu $8,$8 + srl $1,$1,2 + addu $7,$23 + or $8,$6 + or $1,$30 + addu $7,$25 + xor $9,$11 + sll $25,$7,5 # 32 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $9,$17 + xor $25,$1,$2 + addu $3,$6 + xor $9,$22 + sll $30,$24,30 + xor $25,$24 + srl $6,$9,31 + addu $9,$9 + srl $24,$24,2 + addu $3,$8 + or $9,$6 + or $24,$30 + addu $3,$25 + xor $10,$12 + sll $25,$3,5 # 33 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $10,$18 + xor $25,$24,$1 + addu $2,$6 + xor $10,$23 + sll $30,$7,30 + xor $25,$7 + srl $6,$10,31 + addu $10,$10 + srl $7,$7,2 + addu $2,$9 + or $10,$6 + or $7,$30 + addu $2,$25 + xor $11,$13 + sll $25,$2,5 # 34 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $11,$19 + xor $25,$7,$24 + addu $1,$6 + xor $11,$8 + sll $30,$3,30 + xor $25,$3 + srl $6,$11,31 + addu $11,$11 + srl $3,$3,2 + addu $1,$10 + or $11,$6 + or $3,$30 + addu $1,$25 + xor $12,$14 + sll $25,$1,5 # 35 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $12,$20 + xor $25,$3,$7 + addu $24,$6 + xor $12,$9 + sll $30,$2,30 + xor $25,$2 + srl $6,$12,31 + addu $12,$12 + srl $2,$2,2 + addu $24,$11 + or $12,$6 + or $2,$30 + addu $24,$25 + xor $13,$15 + sll $25,$24,5 # 36 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $13,$21 + xor $25,$2,$3 + addu $7,$6 + xor $13,$10 + sll $30,$1,30 + xor $25,$1 + srl $6,$13,31 + addu $13,$13 + srl $1,$1,2 + addu $7,$12 + or $13,$6 + or $1,$30 + addu $7,$25 + xor $14,$16 + sll $25,$7,5 # 37 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $14,$22 + xor $25,$1,$2 + addu $3,$6 + xor $14,$11 + sll $30,$24,30 + xor $25,$24 + srl $6,$14,31 + addu $14,$14 + srl $24,$24,2 + addu $3,$13 + or $14,$6 + or $24,$30 + addu $3,$25 + xor $15,$17 + sll $25,$3,5 # 38 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $15,$23 + xor $25,$24,$1 + addu $2,$6 + xor $15,$12 + sll $30,$7,30 + xor $25,$7 + srl $6,$15,31 + addu $15,$15 + srl $7,$7,2 + addu $2,$14 + or $15,$6 + or $7,$30 + addu $2,$25 + xor $16,$18 + sll $25,$2,5 # 39 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $16,$8 + xor $25,$7,$24 + addu $1,$6 + xor $16,$13 + sll $30,$3,30 + xor $25,$3 + srl $6,$16,31 + addu $16,$16 + srl $3,$3,2 + addu $1,$15 + or $16,$6 + or $3,$30 + addu $1,$25 + lui $31,0x8f1b + ori $31,0xbcdc # K_40_59 + xor $17,$19 + sll $25,$1,5 # 40 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $17,$9 + and $25,$3,$7 + addu $24,$6 + xor $17,$14 + sll $30,$2,30 + addu $24,$25 + srl $6,$17,31 + xor $25,$3,$7 + addu $17,$17 + and $25,$2 + srl $2,$2,2 + or $17,$6 + addu $24,$16 + or $2,$30 + addu $24,$25 + xor $18,$20 + sll $25,$24,5 # 41 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $18,$10 + and $25,$2,$3 + addu $7,$6 + xor $18,$15 + sll $30,$1,30 + addu $7,$25 + srl $6,$18,31 + xor $25,$2,$3 + addu $18,$18 + and $25,$1 + srl $1,$1,2 + or $18,$6 + addu $7,$17 + or $1,$30 + addu $7,$25 + xor $19,$21 + sll $25,$7,5 # 42 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $19,$11 + and $25,$1,$2 + addu $3,$6 + xor $19,$16 + sll $30,$24,30 + addu $3,$25 + srl $6,$19,31 + xor $25,$1,$2 + addu $19,$19 + and $25,$24 + srl $24,$24,2 + or $19,$6 + addu $3,$18 + or $24,$30 + addu $3,$25 + xor $20,$22 + sll $25,$3,5 # 43 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $20,$12 + and $25,$24,$1 + addu $2,$6 + xor $20,$17 + sll $30,$7,30 + addu $2,$25 + srl $6,$20,31 + xor $25,$24,$1 + addu $20,$20 + and $25,$7 + srl $7,$7,2 + or $20,$6 + addu $2,$19 + or $7,$30 + addu $2,$25 + xor $21,$23 + sll $25,$2,5 # 44 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $21,$13 + and $25,$7,$24 + addu $1,$6 + xor $21,$18 + sll $30,$3,30 + addu $1,$25 + srl $6,$21,31 + xor $25,$7,$24 + addu $21,$21 + and $25,$3 + srl $3,$3,2 + or $21,$6 + addu $1,$20 + or $3,$30 + addu $1,$25 + xor $22,$8 + sll $25,$1,5 # 45 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $22,$14 + and $25,$3,$7 + addu $24,$6 + xor $22,$19 + sll $30,$2,30 + addu $24,$25 + srl $6,$22,31 + xor $25,$3,$7 + addu $22,$22 + and $25,$2 + srl $2,$2,2 + or $22,$6 + addu $24,$21 + or $2,$30 + addu $24,$25 + xor $23,$9 + sll $25,$24,5 # 46 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $23,$15 + and $25,$2,$3 + addu $7,$6 + xor $23,$20 + sll $30,$1,30 + addu $7,$25 + srl $6,$23,31 + xor $25,$2,$3 + addu $23,$23 + and $25,$1 + srl $1,$1,2 + or $23,$6 + addu $7,$22 + or $1,$30 + addu $7,$25 + xor $8,$10 + sll $25,$7,5 # 47 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $8,$16 + and $25,$1,$2 + addu $3,$6 + xor $8,$21 + sll $30,$24,30 + addu $3,$25 + srl $6,$8,31 + xor $25,$1,$2 + addu $8,$8 + and $25,$24 + srl $24,$24,2 + or $8,$6 + addu $3,$23 + or $24,$30 + addu $3,$25 + xor $9,$11 + sll $25,$3,5 # 48 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $9,$17 + and $25,$24,$1 + addu $2,$6 + xor $9,$22 + sll $30,$7,30 + addu $2,$25 + srl $6,$9,31 + xor $25,$24,$1 + addu $9,$9 + and $25,$7 + srl $7,$7,2 + or $9,$6 + addu $2,$8 + or $7,$30 + addu $2,$25 + xor $10,$12 + sll $25,$2,5 # 49 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $10,$18 + and $25,$7,$24 + addu $1,$6 + xor $10,$23 + sll $30,$3,30 + addu $1,$25 + srl $6,$10,31 + xor $25,$7,$24 + addu $10,$10 + and $25,$3 + srl $3,$3,2 + or $10,$6 + addu $1,$9 + or $3,$30 + addu $1,$25 + xor $11,$13 + sll $25,$1,5 # 50 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $11,$19 + and $25,$3,$7 + addu $24,$6 + xor $11,$8 + sll $30,$2,30 + addu $24,$25 + srl $6,$11,31 + xor $25,$3,$7 + addu $11,$11 + and $25,$2 + srl $2,$2,2 + or $11,$6 + addu $24,$10 + or $2,$30 + addu $24,$25 + xor $12,$14 + sll $25,$24,5 # 51 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $12,$20 + and $25,$2,$3 + addu $7,$6 + xor $12,$9 + sll $30,$1,30 + addu $7,$25 + srl $6,$12,31 + xor $25,$2,$3 + addu $12,$12 + and $25,$1 + srl $1,$1,2 + or $12,$6 + addu $7,$11 + or $1,$30 + addu $7,$25 + xor $13,$15 + sll $25,$7,5 # 52 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $13,$21 + and $25,$1,$2 + addu $3,$6 + xor $13,$10 + sll $30,$24,30 + addu $3,$25 + srl $6,$13,31 + xor $25,$1,$2 + addu $13,$13 + and $25,$24 + srl $24,$24,2 + or $13,$6 + addu $3,$12 + or $24,$30 + addu $3,$25 + xor $14,$16 + sll $25,$3,5 # 53 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $14,$22 + and $25,$24,$1 + addu $2,$6 + xor $14,$11 + sll $30,$7,30 + addu $2,$25 + srl $6,$14,31 + xor $25,$24,$1 + addu $14,$14 + and $25,$7 + srl $7,$7,2 + or $14,$6 + addu $2,$13 + or $7,$30 + addu $2,$25 + xor $15,$17 + sll $25,$2,5 # 54 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $15,$23 + and $25,$7,$24 + addu $1,$6 + xor $15,$12 + sll $30,$3,30 + addu $1,$25 + srl $6,$15,31 + xor $25,$7,$24 + addu $15,$15 + and $25,$3 + srl $3,$3,2 + or $15,$6 + addu $1,$14 + or $3,$30 + addu $1,$25 + xor $16,$18 + sll $25,$1,5 # 55 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $16,$8 + and $25,$3,$7 + addu $24,$6 + xor $16,$13 + sll $30,$2,30 + addu $24,$25 + srl $6,$16,31 + xor $25,$3,$7 + addu $16,$16 + and $25,$2 + srl $2,$2,2 + or $16,$6 + addu $24,$15 + or $2,$30 + addu $24,$25 + xor $17,$19 + sll $25,$24,5 # 56 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $17,$9 + and $25,$2,$3 + addu $7,$6 + xor $17,$14 + sll $30,$1,30 + addu $7,$25 + srl $6,$17,31 + xor $25,$2,$3 + addu $17,$17 + and $25,$1 + srl $1,$1,2 + or $17,$6 + addu $7,$16 + or $1,$30 + addu $7,$25 + xor $18,$20 + sll $25,$7,5 # 57 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $18,$10 + and $25,$1,$2 + addu $3,$6 + xor $18,$15 + sll $30,$24,30 + addu $3,$25 + srl $6,$18,31 + xor $25,$1,$2 + addu $18,$18 + and $25,$24 + srl $24,$24,2 + or $18,$6 + addu $3,$17 + or $24,$30 + addu $3,$25 + xor $19,$21 + sll $25,$3,5 # 58 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $19,$11 + and $25,$24,$1 + addu $2,$6 + xor $19,$16 + sll $30,$7,30 + addu $2,$25 + srl $6,$19,31 + xor $25,$24,$1 + addu $19,$19 + and $25,$7 + srl $7,$7,2 + or $19,$6 + addu $2,$18 + or $7,$30 + addu $2,$25 + xor $20,$22 + sll $25,$2,5 # 59 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $20,$12 + and $25,$7,$24 + addu $1,$6 + xor $20,$17 + sll $30,$3,30 + addu $1,$25 + srl $6,$20,31 + xor $25,$7,$24 + addu $20,$20 + and $25,$3 + srl $3,$3,2 + or $20,$6 + addu $1,$19 + or $3,$30 + addu $1,$25 + lui $31,0xca62 + ori $31,0xc1d6 # K_60_79 + xor $21,$23 + sll $25,$1,5 # 60 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $21,$13 + xor $25,$3,$7 + addu $24,$6 + xor $21,$18 + sll $30,$2,30 + xor $25,$2 + srl $6,$21,31 + addu $21,$21 + srl $2,$2,2 + addu $24,$20 + or $21,$6 + or $2,$30 + addu $24,$25 + xor $22,$8 + sll $25,$24,5 # 61 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $22,$14 + xor $25,$2,$3 + addu $7,$6 + xor $22,$19 + sll $30,$1,30 + xor $25,$1 + srl $6,$22,31 + addu $22,$22 + srl $1,$1,2 + addu $7,$21 + or $22,$6 + or $1,$30 + addu $7,$25 + xor $23,$9 + sll $25,$7,5 # 62 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $23,$15 + xor $25,$1,$2 + addu $3,$6 + xor $23,$20 + sll $30,$24,30 + xor $25,$24 + srl $6,$23,31 + addu $23,$23 + srl $24,$24,2 + addu $3,$22 + or $23,$6 + or $24,$30 + addu $3,$25 + xor $8,$10 + sll $25,$3,5 # 63 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $8,$16 + xor $25,$24,$1 + addu $2,$6 + xor $8,$21 + sll $30,$7,30 + xor $25,$7 + srl $6,$8,31 + addu $8,$8 + srl $7,$7,2 + addu $2,$23 + or $8,$6 + or $7,$30 + addu $2,$25 + xor $9,$11 + sll $25,$2,5 # 64 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $9,$17 + xor $25,$7,$24 + addu $1,$6 + xor $9,$22 + sll $30,$3,30 + xor $25,$3 + srl $6,$9,31 + addu $9,$9 + srl $3,$3,2 + addu $1,$8 + or $9,$6 + or $3,$30 + addu $1,$25 + xor $10,$12 + sll $25,$1,5 # 65 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $10,$18 + xor $25,$3,$7 + addu $24,$6 + xor $10,$23 + sll $30,$2,30 + xor $25,$2 + srl $6,$10,31 + addu $10,$10 + srl $2,$2,2 + addu $24,$9 + or $10,$6 + or $2,$30 + addu $24,$25 + xor $11,$13 + sll $25,$24,5 # 66 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $11,$19 + xor $25,$2,$3 + addu $7,$6 + xor $11,$8 + sll $30,$1,30 + xor $25,$1 + srl $6,$11,31 + addu $11,$11 + srl $1,$1,2 + addu $7,$10 + or $11,$6 + or $1,$30 + addu $7,$25 + xor $12,$14 + sll $25,$7,5 # 67 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $12,$20 + xor $25,$1,$2 + addu $3,$6 + xor $12,$9 + sll $30,$24,30 + xor $25,$24 + srl $6,$12,31 + addu $12,$12 + srl $24,$24,2 + addu $3,$11 + or $12,$6 + or $24,$30 + addu $3,$25 + xor $13,$15 + sll $25,$3,5 # 68 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $13,$21 + xor $25,$24,$1 + addu $2,$6 + xor $13,$10 + sll $30,$7,30 + xor $25,$7 + srl $6,$13,31 + addu $13,$13 + srl $7,$7,2 + addu $2,$12 + or $13,$6 + or $7,$30 + addu $2,$25 + xor $14,$16 + sll $25,$2,5 # 69 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $14,$22 + xor $25,$7,$24 + addu $1,$6 + xor $14,$11 + sll $30,$3,30 + xor $25,$3 + srl $6,$14,31 + addu $14,$14 + srl $3,$3,2 + addu $1,$13 + or $14,$6 + or $3,$30 + addu $1,$25 + xor $15,$17 + sll $25,$1,5 # 70 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $15,$23 + xor $25,$3,$7 + addu $24,$6 + xor $15,$12 + sll $30,$2,30 + xor $25,$2 + srl $6,$15,31 + addu $15,$15 + srl $2,$2,2 + addu $24,$14 + or $15,$6 + or $2,$30 + addu $24,$25 + xor $16,$18 + sll $25,$24,5 # 71 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $16,$8 + xor $25,$2,$3 + addu $7,$6 + xor $16,$13 + sll $30,$1,30 + xor $25,$1 + srl $6,$16,31 + addu $16,$16 + srl $1,$1,2 + addu $7,$15 + or $16,$6 + or $1,$30 + addu $7,$25 + xor $17,$19 + sll $25,$7,5 # 72 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $17,$9 + xor $25,$1,$2 + addu $3,$6 + xor $17,$14 + sll $30,$24,30 + xor $25,$24 + srl $6,$17,31 + addu $17,$17 + srl $24,$24,2 + addu $3,$16 + or $17,$6 + or $24,$30 + addu $3,$25 + xor $18,$20 + sll $25,$3,5 # 73 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $18,$10 + xor $25,$24,$1 + addu $2,$6 + xor $18,$15 + sll $30,$7,30 + xor $25,$7 + srl $6,$18,31 + addu $18,$18 + srl $7,$7,2 + addu $2,$17 + or $18,$6 + or $7,$30 + addu $2,$25 + xor $19,$21 + sll $25,$2,5 # 74 + addu $1,$31 + srl $6,$2,27 + addu $1,$25 + xor $19,$11 + xor $25,$7,$24 + addu $1,$6 + xor $19,$16 + sll $30,$3,30 + xor $25,$3 + srl $6,$19,31 + addu $19,$19 + srl $3,$3,2 + addu $1,$18 + or $19,$6 + or $3,$30 + addu $1,$25 + xor $20,$22 + sll $25,$1,5 # 75 + addu $24,$31 + srl $6,$1,27 + addu $24,$25 + xor $20,$12 + xor $25,$3,$7 + addu $24,$6 + xor $20,$17 + sll $30,$2,30 + xor $25,$2 + srl $6,$20,31 + addu $20,$20 + srl $2,$2,2 + addu $24,$19 + or $20,$6 + or $2,$30 + addu $24,$25 + xor $21,$23 + sll $25,$24,5 # 76 + addu $7,$31 + srl $6,$24,27 + addu $7,$25 + xor $21,$13 + xor $25,$2,$3 + addu $7,$6 + xor $21,$18 + sll $30,$1,30 + xor $25,$1 + srl $6,$21,31 + addu $21,$21 + srl $1,$1,2 + addu $7,$20 + or $21,$6 + or $1,$30 + addu $7,$25 + xor $22,$8 + sll $25,$7,5 # 77 + addu $3,$31 + srl $6,$7,27 + addu $3,$25 + xor $22,$14 + xor $25,$1,$2 + addu $3,$6 + xor $22,$19 + sll $30,$24,30 + xor $25,$24 + srl $6,$22,31 + addu $22,$22 + srl $24,$24,2 + addu $3,$21 + or $22,$6 + or $24,$30 + addu $3,$25 + xor $23,$9 + sll $25,$3,5 # 78 + addu $2,$31 + srl $6,$3,27 + addu $2,$25 + xor $23,$15 + xor $25,$24,$1 + addu $2,$6 + xor $23,$20 + sll $30,$7,30 + xor $25,$7 + srl $6,$23,31 + addu $23,$23 + srl $7,$7,2 + addu $2,$22 + or $23,$6 + or $7,$30 + addu $2,$25 + lw $8,0($4) + sll $25,$2,5 # 79 + addu $1,$31 + lw $9,4($4) + srl $6,$2,27 + addu $1,$25 + lw $10,8($4) + xor $25,$7,$24 + addu $1,$6 + lw $11,12($4) + sll $30,$3,30 + xor $25,$3 + lw $12,16($4) + srl $3,$3,2 + addu $1,$23 + or $3,$30 + addu $1,$25 + add $5,64 + lw $6,0($29) + + addu $1,$8 + addu $2,$9 + sw $1,0($4) + addu $3,$10 + addu $7,$11 + sw $2,4($4) + addu $24,$12 + sw $3,8($4) + sw $7,12($4) + sw $24,16($4) + .set noreorder + bne $5,$6,.Loop + nop + + .set noreorder + lw $31,(16-1)*4($29) + lw $30,(16-2)*4($29) + lw $23,(16-3)*4($29) + lw $22,(16-4)*4($29) + lw $21,(16-5)*4($29) + lw $20,(16-6)*4($29) + lw $19,(16-7)*4($29) + lw $18,(16-8)*4($29) + lw $17,(16-9)*4($29) + lw $16,(16-10)*4($29) + jr $31 + add $29,16*4 +.end sha1_block_data_order +.rdata +.asciiz "SHA1 for MIPS, CRYPTOGAMS by " diff --git a/app/openssl/crypto/sha/asm/sha1-mips.pl b/app/openssl/crypto/sha/asm/sha1-mips.pl new file mode 100644 index 00000000..f1a702f3 --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha1-mips.pl @@ -0,0 +1,354 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# SHA1 block procedure for MIPS. + +# Performance improvement is 30% on unaligned input. The "secret" is +# to deploy lwl/lwr pair to load unaligned input. One could have +# vectorized Xupdate on MIPSIII/IV, but the goal was to code MIPS32- +# compatible subroutine. There is room for minor optimization on +# little-endian platforms... + +###################################################################### +# There is a number of MIPS ABI in use, O32 and N32/64 are most +# widely used. Then there is a new contender: NUBI. It appears that if +# one picks the latter, it's possible to arrange code in ABI neutral +# manner. Therefore let's stick to NUBI register layout: +# +($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25)); +($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); +($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23)); +($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31)); +# +# The return value is placed in $a0. Following coding rules facilitate +# interoperability: +# +# - never ever touch $tp, "thread pointer", former $gp; +# - copy return value to $t0, former $v0 [or to $a0 if you're adapting +# old code]; +# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary; +# +# For reference here is register layout for N32/64 MIPS ABIs: +# +# ($zero,$at,$v0,$v1)=map("\$$_",(0..3)); +# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); +# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25)); +# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23)); +# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31)); +# +$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64 + +if ($flavour =~ /64|n32/i) { + $PTR_ADD="dadd"; # incidentally works even on n32 + $PTR_SUB="dsub"; # incidentally works even on n32 + $REG_S="sd"; + $REG_L="ld"; + $PTR_SLL="dsll"; # incidentally works even on n32 + $SZREG=8; +} else { + $PTR_ADD="add"; + $PTR_SUB="sub"; + $REG_S="sw"; + $REG_L="lw"; + $PTR_SLL="sll"; + $SZREG=4; +} +# +# +# +###################################################################### + +$big_endian=(`echo MIPSEL | $ENV{CC} -E -P -`=~/MIPSEL/)?1:0; + +for (@ARGV) { $output=$_ if (/^\w[\w\-]*\.\w+$/); } +open STDOUT,">$output"; + +if (!defined($big_endian)) + { $big_endian=(unpack('L',pack('N',1))==1); } + +# offsets of the Most and Least Significant Bytes +$MSB=$big_endian?0:3; +$LSB=3&~$MSB; + +@X=map("\$$_",(8..23)); # a4-a7,s0-s11 + +$ctx=$a0; +$inp=$a1; +$num=$a2; +$A="\$1"; +$B="\$2"; +$C="\$3"; +$D="\$7"; +$E="\$24"; @V=($A,$B,$C,$D,$E); +$t0="\$25"; +$t1=$num; # $num is offloaded to stack +$t2="\$30"; # fp +$K="\$31"; # ra + +sub BODY_00_14 { +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +$code.=<<___ if (!$big_endian); + srl $t0,@X[$i],24 # byte swap($i) + srl $t1,@X[$i],8 + andi $t2,@X[$i],0xFF00 + sll @X[$i],@X[$i],24 + andi $t1,0xFF00 + sll $t2,$t2,8 + or @X[$i],$t0 + or $t1,$t2 + or @X[$i],$t1 +___ +$code.=<<___; + lwl @X[$j],$j*4+$MSB($inp) + sll $t0,$a,5 # $i + addu $e,$K + lwr @X[$j],$j*4+$LSB($inp) + srl $t1,$a,27 + addu $e,$t0 + xor $t0,$c,$d + addu $e,$t1 + sll $t2,$b,30 + and $t0,$b + srl $b,$b,2 + xor $t0,$d + addu $e,@X[$i] + or $b,$t2 + addu $e,$t0 +___ +} + +sub BODY_15_19 { +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; + +$code.=<<___ if (!$big_endian && $i==15); + srl $t0,@X[$i],24 # byte swap($i) + srl $t1,@X[$i],8 + andi $t2,@X[$i],0xFF00 + sll @X[$i],@X[$i],24 + andi $t1,0xFF00 + sll $t2,$t2,8 + or @X[$i],$t0 + or @X[$i],$t1 + or @X[$i],$t2 +___ +$code.=<<___; + xor @X[$j%16],@X[($j+2)%16] + sll $t0,$a,5 # $i + addu $e,$K + srl $t1,$a,27 + addu $e,$t0 + xor @X[$j%16],@X[($j+8)%16] + xor $t0,$c,$d + addu $e,$t1 + xor @X[$j%16],@X[($j+13)%16] + sll $t2,$b,30 + and $t0,$b + srl $t1,@X[$j%16],31 + addu @X[$j%16],@X[$j%16] + srl $b,$b,2 + xor $t0,$d + or @X[$j%16],$t1 + addu $e,@X[$i%16] + or $b,$t2 + addu $e,$t0 +___ +} + +sub BODY_20_39 { +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +$code.=<<___ if ($i<79); + xor @X[$j%16],@X[($j+2)%16] + sll $t0,$a,5 # $i + addu $e,$K + srl $t1,$a,27 + addu $e,$t0 + xor @X[$j%16],@X[($j+8)%16] + xor $t0,$c,$d + addu $e,$t1 + xor @X[$j%16],@X[($j+13)%16] + sll $t2,$b,30 + xor $t0,$b + srl $t1,@X[$j%16],31 + addu @X[$j%16],@X[$j%16] + srl $b,$b,2 + addu $e,@X[$i%16] + or @X[$j%16],$t1 + or $b,$t2 + addu $e,$t0 +___ +$code.=<<___ if ($i==79); + lw @X[0],0($ctx) + sll $t0,$a,5 # $i + addu $e,$K + lw @X[1],4($ctx) + srl $t1,$a,27 + addu $e,$t0 + lw @X[2],8($ctx) + xor $t0,$c,$d + addu $e,$t1 + lw @X[3],12($ctx) + sll $t2,$b,30 + xor $t0,$b + lw @X[4],16($ctx) + srl $b,$b,2 + addu $e,@X[$i%16] + or $b,$t2 + addu $e,$t0 +___ +} + +sub BODY_40_59 { +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +$code.=<<___ if ($i<79); + xor @X[$j%16],@X[($j+2)%16] + sll $t0,$a,5 # $i + addu $e,$K + srl $t1,$a,27 + addu $e,$t0 + xor @X[$j%16],@X[($j+8)%16] + and $t0,$c,$d + addu $e,$t1 + xor @X[$j%16],@X[($j+13)%16] + sll $t2,$b,30 + addu $e,$t0 + srl $t1,@X[$j%16],31 + xor $t0,$c,$d + addu @X[$j%16],@X[$j%16] + and $t0,$b + srl $b,$b,2 + or @X[$j%16],$t1 + addu $e,@X[$i%16] + or $b,$t2 + addu $e,$t0 +___ +} + +$FRAMESIZE=16; # large enough to accomodate NUBI saved registers +$SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0xc0fff008 : 0xc0ff0000; + +$code=<<___; +#ifdef OPENSSL_FIPSCANISTER +# include +#endif + +.text + +.set noat +.set noreorder +.align 5 +.globl sha1_block_data_order +.ent sha1_block_data_order +sha1_block_data_order: + .frame $sp,$FRAMESIZE*$SZREG,$ra + .mask $SAVED_REGS_MASK,-$SZREG + .set noreorder + $PTR_SUB $sp,$FRAMESIZE*$SZREG + $REG_S $ra,($FRAMESIZE-1)*$SZREG($sp) + $REG_S $fp,($FRAMESIZE-2)*$SZREG($sp) + $REG_S $s11,($FRAMESIZE-3)*$SZREG($sp) + $REG_S $s10,($FRAMESIZE-4)*$SZREG($sp) + $REG_S $s9,($FRAMESIZE-5)*$SZREG($sp) + $REG_S $s8,($FRAMESIZE-6)*$SZREG($sp) + $REG_S $s7,($FRAMESIZE-7)*$SZREG($sp) + $REG_S $s6,($FRAMESIZE-8)*$SZREG($sp) + $REG_S $s5,($FRAMESIZE-9)*$SZREG($sp) + $REG_S $s4,($FRAMESIZE-10)*$SZREG($sp) +___ +$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue + $REG_S $s3,($FRAMESIZE-11)*$SZREG($sp) + $REG_S $s2,($FRAMESIZE-12)*$SZREG($sp) + $REG_S $s1,($FRAMESIZE-13)*$SZREG($sp) + $REG_S $s0,($FRAMESIZE-14)*$SZREG($sp) + $REG_S $gp,($FRAMESIZE-15)*$SZREG($sp) +___ +$code.=<<___; + $PTR_SLL $num,6 + $PTR_ADD $num,$inp + $REG_S $num,0($sp) + lw $A,0($ctx) + lw $B,4($ctx) + lw $C,8($ctx) + lw $D,12($ctx) + b .Loop + lw $E,16($ctx) +.align 4 +.Loop: + .set reorder + lwl @X[0],$MSB($inp) + lui $K,0x5a82 + lwr @X[0],$LSB($inp) + ori $K,0x7999 # K_00_19 +___ +for ($i=0;$i<15;$i++) { &BODY_00_14($i,@V); unshift(@V,pop(@V)); } +for (;$i<20;$i++) { &BODY_15_19($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + lui $K,0x6ed9 + ori $K,0xeba1 # K_20_39 +___ +for (;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + lui $K,0x8f1b + ori $K,0xbcdc # K_40_59 +___ +for (;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + lui $K,0xca62 + ori $K,0xc1d6 # K_60_79 +___ +for (;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + $PTR_ADD $inp,64 + $REG_L $num,0($sp) + + addu $A,$X[0] + addu $B,$X[1] + sw $A,0($ctx) + addu $C,$X[2] + addu $D,$X[3] + sw $B,4($ctx) + addu $E,$X[4] + sw $C,8($ctx) + sw $D,12($ctx) + sw $E,16($ctx) + .set noreorder + bne $inp,$num,.Loop + nop + + .set noreorder + $REG_L $ra,($FRAMESIZE-1)*$SZREG($sp) + $REG_L $fp,($FRAMESIZE-2)*$SZREG($sp) + $REG_L $s11,($FRAMESIZE-3)*$SZREG($sp) + $REG_L $s10,($FRAMESIZE-4)*$SZREG($sp) + $REG_L $s9,($FRAMESIZE-5)*$SZREG($sp) + $REG_L $s8,($FRAMESIZE-6)*$SZREG($sp) + $REG_L $s7,($FRAMESIZE-7)*$SZREG($sp) + $REG_L $s6,($FRAMESIZE-8)*$SZREG($sp) + $REG_L $s5,($FRAMESIZE-9)*$SZREG($sp) + $REG_L $s4,($FRAMESIZE-10)*$SZREG($sp) +___ +$code.=<<___ if ($flavour =~ /nubi/i); + $REG_L $s3,($FRAMESIZE-11)*$SZREG($sp) + $REG_L $s2,($FRAMESIZE-12)*$SZREG($sp) + $REG_L $s1,($FRAMESIZE-13)*$SZREG($sp) + $REG_L $s0,($FRAMESIZE-14)*$SZREG($sp) + $REG_L $gp,($FRAMESIZE-15)*$SZREG($sp) +___ +$code.=<<___; + jr $ra + $PTR_ADD $sp,$FRAMESIZE*$SZREG +.end sha1_block_data_order +.rdata +.asciiz "SHA1 for MIPS, CRYPTOGAMS by " +___ +print $code; +close STDOUT; diff --git a/app/openssl/crypto/sha/asm/sha1-parisc.pl b/app/openssl/crypto/sha/asm/sha1-parisc.pl new file mode 100644 index 00000000..6e5a328a --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha1-parisc.pl @@ -0,0 +1,260 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# SHA1 block procedure for PA-RISC. + +# June 2009. +# +# On PA-7100LC performance is >30% better than gcc 3.2 generated code +# for aligned input and >50% better for unaligned. Compared to vendor +# compiler on PA-8600 it's almost 60% faster in 64-bit build and just +# few percent faster in 32-bit one (this for aligned input, data for +# unaligned input is not available). +# +# Special thanks to polarhome.com for providing HP-UX account. + +$flavour = shift; +$output = shift; +open STDOUT,">$output"; + +if ($flavour =~ /64/) { + $LEVEL ="2.0W"; + $SIZE_T =8; + $FRAME_MARKER =80; + $SAVED_RP =16; + $PUSH ="std"; + $PUSHMA ="std,ma"; + $POP ="ldd"; + $POPMB ="ldd,mb"; +} else { + $LEVEL ="1.0"; + $SIZE_T =4; + $FRAME_MARKER =48; + $SAVED_RP =20; + $PUSH ="stw"; + $PUSHMA ="stwm"; + $POP ="ldw"; + $POPMB ="ldwm"; +} + +$FRAME=14*$SIZE_T+$FRAME_MARKER;# 14 saved regs + frame marker + # [+ argument transfer] +$ctx="%r26"; # arg0 +$inp="%r25"; # arg1 +$num="%r24"; # arg2 + +$t0="%r28"; +$t1="%r29"; +$K="%r31"; + +@X=("%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", "%r8", + "%r9", "%r10","%r11","%r12","%r13","%r14","%r15","%r16",$t0); + +@V=($A,$B,$C,$D,$E)=("%r19","%r20","%r21","%r22","%r23"); + +sub BODY_00_19 { +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +$code.=<<___ if ($i<15); + addl $K,$e,$e ; $i + shd $a,$a,27,$t1 + addl @X[$i],$e,$e + and $c,$b,$t0 + addl $t1,$e,$e + andcm $d,$b,$t1 + shd $b,$b,2,$b + or $t1,$t0,$t0 + addl $t0,$e,$e +___ +$code.=<<___ if ($i>=15); # with forward Xupdate + addl $K,$e,$e ; $i + shd $a,$a,27,$t1 + xor @X[($j+2)%16],@X[$j%16],@X[$j%16] + addl @X[$i%16],$e,$e + and $c,$b,$t0 + xor @X[($j+8)%16],@X[$j%16],@X[$j%16] + addl $t1,$e,$e + andcm $d,$b,$t1 + shd $b,$b,2,$b + or $t1,$t0,$t0 + xor @X[($j+13)%16],@X[$j%16],@X[$j%16] + add $t0,$e,$e + shd @X[$j%16],@X[$j%16],31,@X[$j%16] +___ +} + +sub BODY_20_39 { +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +$code.=<<___ if ($i<79); + xor @X[($j+2)%16],@X[$j%16],@X[$j%16] ; $i + addl $K,$e,$e + shd $a,$a,27,$t1 + xor @X[($j+8)%16],@X[$j%16],@X[$j%16] + addl @X[$i%16],$e,$e + xor $b,$c,$t0 + xor @X[($j+13)%16],@X[$j%16],@X[$j%16] + addl $t1,$e,$e + shd $b,$b,2,$b + xor $d,$t0,$t0 + shd @X[$j%16],@X[$j%16],31,@X[$j%16] + addl $t0,$e,$e +___ +$code.=<<___ if ($i==79); # with context load + ldw 0($ctx),@X[0] ; $i + addl $K,$e,$e + shd $a,$a,27,$t1 + ldw 4($ctx),@X[1] + addl @X[$i%16],$e,$e + xor $b,$c,$t0 + ldw 8($ctx),@X[2] + addl $t1,$e,$e + shd $b,$b,2,$b + xor $d,$t0,$t0 + ldw 12($ctx),@X[3] + addl $t0,$e,$e + ldw 16($ctx),@X[4] +___ +} + +sub BODY_40_59 { +my ($i,$a,$b,$c,$d,$e)=@_; +my $j=$i+1; +$code.=<<___; + shd $a,$a,27,$t1 ; $i + addl $K,$e,$e + xor @X[($j+2)%16],@X[$j%16],@X[$j%16] + xor $d,$c,$t0 + addl @X[$i%16],$e,$e + xor @X[($j+8)%16],@X[$j%16],@X[$j%16] + and $b,$t0,$t0 + addl $t1,$e,$e + shd $b,$b,2,$b + xor @X[($j+13)%16],@X[$j%16],@X[$j%16] + addl $t0,$e,$e + and $d,$c,$t1 + shd @X[$j%16],@X[$j%16],31,@X[$j%16] + addl $t1,$e,$e +___ +} + +$code=<<___; + .LEVEL $LEVEL + .SPACE \$TEXT\$ + .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY + + .EXPORT sha1_block_data_order,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR +sha1_block_data_order + .PROC + .CALLINFO FRAME=`$FRAME-14*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=16 + .ENTRY + $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue + $PUSHMA %r3,$FRAME(%sp) + $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp) + $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp) + $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp) + $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp) + $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp) + $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp) + $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp) + $PUSH %r11,`-$FRAME+8*$SIZE_T`(%sp) + $PUSH %r12,`-$FRAME+9*$SIZE_T`(%sp) + $PUSH %r13,`-$FRAME+10*$SIZE_T`(%sp) + $PUSH %r14,`-$FRAME+11*$SIZE_T`(%sp) + $PUSH %r15,`-$FRAME+12*$SIZE_T`(%sp) + $PUSH %r16,`-$FRAME+13*$SIZE_T`(%sp) + + ldw 0($ctx),$A + ldw 4($ctx),$B + ldw 8($ctx),$C + ldw 12($ctx),$D + ldw 16($ctx),$E + + extru $inp,31,2,$t0 ; t0=inp&3; + sh3addl $t0,%r0,$t0 ; t0*=8; + subi 32,$t0,$t0 ; t0=32-t0; + mtctl $t0,%cr11 ; %sar=t0; + +L\$oop + ldi 3,$t0 + andcm $inp,$t0,$t0 ; 64-bit neutral +___ + for ($i=0;$i<15;$i++) { # load input block + $code.="\tldw `4*$i`($t0),@X[$i]\n"; } +$code.=<<___; + cmpb,*= $inp,$t0,L\$aligned + ldw 60($t0),@X[15] + ldw 64($t0),@X[16] +___ + for ($i=0;$i<16;$i++) { # align input + $code.="\tvshd @X[$i],@X[$i+1],@X[$i]\n"; } +$code.=<<___; +L\$aligned + ldil L'0x5a827000,$K ; K_00_19 + ldo 0x999($K),$K +___ +for ($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + ldil L'0x6ed9e000,$K ; K_20_39 + ldo 0xba1($K),$K +___ + +for (;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + ldil L'0x8f1bb000,$K ; K_40_59 + ldo 0xcdc($K),$K +___ + +for (;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + ldil L'0xca62c000,$K ; K_60_79 + ldo 0x1d6($K),$K +___ +for (;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } + +$code.=<<___; + addl @X[0],$A,$A + addl @X[1],$B,$B + addl @X[2],$C,$C + addl @X[3],$D,$D + addl @X[4],$E,$E + stw $A,0($ctx) + stw $B,4($ctx) + stw $C,8($ctx) + stw $D,12($ctx) + stw $E,16($ctx) + addib,*<> -1,$num,L\$oop + ldo 64($inp),$inp + + $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue + $POP `-$FRAME+1*$SIZE_T`(%sp),%r4 + $POP `-$FRAME+2*$SIZE_T`(%sp),%r5 + $POP `-$FRAME+3*$SIZE_T`(%sp),%r6 + $POP `-$FRAME+4*$SIZE_T`(%sp),%r7 + $POP `-$FRAME+5*$SIZE_T`(%sp),%r8 + $POP `-$FRAME+6*$SIZE_T`(%sp),%r9 + $POP `-$FRAME+7*$SIZE_T`(%sp),%r10 + $POP `-$FRAME+8*$SIZE_T`(%sp),%r11 + $POP `-$FRAME+9*$SIZE_T`(%sp),%r12 + $POP `-$FRAME+10*$SIZE_T`(%sp),%r13 + $POP `-$FRAME+11*$SIZE_T`(%sp),%r14 + $POP `-$FRAME+12*$SIZE_T`(%sp),%r15 + $POP `-$FRAME+13*$SIZE_T`(%sp),%r16 + bv (%r2) + .EXIT + $POPMB -$FRAME(%sp),%r3 + .PROCEND + .STRINGZ "SHA1 block transform for PA-RISC, CRYPTOGAMS by " +___ + +$code =~ s/\`([^\`]*)\`/eval $1/gem; +$code =~ s/,\*/,/gm if ($SIZE_T==4); +$code =~ s/\bbv\b/bve/gm if ($SIZE_T==8); +print $code; +close STDOUT; diff --git a/app/openssl/crypto/sha/asm/sha1-ppc.pl b/app/openssl/crypto/sha/asm/sha1-ppc.pl index dcd0fcdf..2140dd2f 100755 --- a/app/openssl/crypto/sha/asm/sha1-ppc.pl +++ b/app/openssl/crypto/sha/asm/sha1-ppc.pl @@ -24,12 +24,14 @@ $flavour = shift; if ($flavour =~ /64/) { $SIZE_T =8; + $LRSAVE =2*$SIZE_T; $UCMP ="cmpld"; $STU ="stdu"; $POP ="ld"; $PUSH ="std"; } elsif ($flavour =~ /32/) { $SIZE_T =4; + $LRSAVE =$SIZE_T; $UCMP ="cmplw"; $STU ="stwu"; $POP ="lwz"; @@ -43,7 +45,8 @@ die "can't locate ppc-xlate.pl"; open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; -$FRAME=24*$SIZE_T; +$FRAME=24*$SIZE_T+64; +$LOCALS=6*$SIZE_T; $K ="r0"; $sp ="r1"; @@ -162,9 +165,8 @@ $code=<<___; .globl .sha1_block_data_order .align 4 .sha1_block_data_order: + $STU $sp,-$FRAME($sp) mflr r0 - $STU $sp,`-($FRAME+64)`($sp) - $PUSH r0,`$FRAME-$SIZE_T*18`($sp) $PUSH r15,`$FRAME-$SIZE_T*17`($sp) $PUSH r16,`$FRAME-$SIZE_T*16`($sp) $PUSH r17,`$FRAME-$SIZE_T*15`($sp) @@ -182,6 +184,7 @@ $code=<<___; $PUSH r29,`$FRAME-$SIZE_T*3`($sp) $PUSH r30,`$FRAME-$SIZE_T*2`($sp) $PUSH r31,`$FRAME-$SIZE_T*1`($sp) + $PUSH r0,`$FRAME+$LRSAVE`($sp) lwz $A,0($ctx) lwz $B,4($ctx) lwz $C,8($ctx) @@ -192,37 +195,14 @@ $code=<<___; Laligned: mtctr $num bl Lsha1_block_private -Ldone: - $POP r0,`$FRAME-$SIZE_T*18`($sp) - $POP r15,`$FRAME-$SIZE_T*17`($sp) - $POP r16,`$FRAME-$SIZE_T*16`($sp) - $POP r17,`$FRAME-$SIZE_T*15`($sp) - $POP r18,`$FRAME-$SIZE_T*14`($sp) - $POP r19,`$FRAME-$SIZE_T*13`($sp) - $POP r20,`$FRAME-$SIZE_T*12`($sp) - $POP r21,`$FRAME-$SIZE_T*11`($sp) - $POP r22,`$FRAME-$SIZE_T*10`($sp) - $POP r23,`$FRAME-$SIZE_T*9`($sp) - $POP r24,`$FRAME-$SIZE_T*8`($sp) - $POP r25,`$FRAME-$SIZE_T*7`($sp) - $POP r26,`$FRAME-$SIZE_T*6`($sp) - $POP r27,`$FRAME-$SIZE_T*5`($sp) - $POP r28,`$FRAME-$SIZE_T*4`($sp) - $POP r29,`$FRAME-$SIZE_T*3`($sp) - $POP r30,`$FRAME-$SIZE_T*2`($sp) - $POP r31,`$FRAME-$SIZE_T*1`($sp) - mtlr r0 - addi $sp,$sp,`$FRAME+64` - blr -___ + b Ldone -# PowerPC specification allows an implementation to be ill-behaved -# upon unaligned access which crosses page boundary. "Better safe -# than sorry" principle makes me treat it specially. But I don't -# look for particular offending word, but rather for 64-byte input -# block which crosses the boundary. Once found that block is aligned -# and hashed separately... -$code.=<<___; +; PowerPC specification allows an implementation to be ill-behaved +; upon unaligned access which crosses page boundary. "Better safe +; than sorry" principle makes me treat it specially. But I don't +; look for particular offending word, but rather for 64-byte input +; block which crosses the boundary. Once found that block is aligned +; and hashed separately... .align 4 Lunaligned: subfic $t1,$inp,4096 @@ -237,7 +217,7 @@ Lunaligned: Lcross_page: li $t1,16 mtctr $t1 - addi r20,$sp,$FRAME ; spot below the frame + addi r20,$sp,$LOCALS ; spot within the frame Lmemcpy: lbz r16,0($inp) lbz r17,1($inp) @@ -251,15 +231,40 @@ Lmemcpy: addi r20,r20,4 bdnz Lmemcpy - $PUSH $inp,`$FRAME-$SIZE_T*19`($sp) + $PUSH $inp,`$FRAME-$SIZE_T*18`($sp) li $t1,1 - addi $inp,$sp,$FRAME + addi $inp,$sp,$LOCALS mtctr $t1 bl Lsha1_block_private - $POP $inp,`$FRAME-$SIZE_T*19`($sp) + $POP $inp,`$FRAME-$SIZE_T*18`($sp) addic. $num,$num,-1 bne- Lunaligned - b Ldone + +Ldone: + $POP r0,`$FRAME+$LRSAVE`($sp) + $POP r15,`$FRAME-$SIZE_T*17`($sp) + $POP r16,`$FRAME-$SIZE_T*16`($sp) + $POP r17,`$FRAME-$SIZE_T*15`($sp) + $POP r18,`$FRAME-$SIZE_T*14`($sp) + $POP r19,`$FRAME-$SIZE_T*13`($sp) + $POP r20,`$FRAME-$SIZE_T*12`($sp) + $POP r21,`$FRAME-$SIZE_T*11`($sp) + $POP r22,`$FRAME-$SIZE_T*10`($sp) + $POP r23,`$FRAME-$SIZE_T*9`($sp) + $POP r24,`$FRAME-$SIZE_T*8`($sp) + $POP r25,`$FRAME-$SIZE_T*7`($sp) + $POP r26,`$FRAME-$SIZE_T*6`($sp) + $POP r27,`$FRAME-$SIZE_T*5`($sp) + $POP r28,`$FRAME-$SIZE_T*4`($sp) + $POP r29,`$FRAME-$SIZE_T*3`($sp) + $POP r30,`$FRAME-$SIZE_T*2`($sp) + $POP r31,`$FRAME-$SIZE_T*1`($sp) + mtlr r0 + addi $sp,$sp,$FRAME + blr + .long 0 + .byte 0,12,4,1,0x80,18,3,0 + .long 0 ___ # This is private block function, which uses tailored calling @@ -309,6 +314,8 @@ $code.=<<___; addi $inp,$inp,`16*4` bdnz- Lsha1_block_private blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 ___ $code.=<<___; .asciz "SHA1 block transform for PPC, CRYPTOGAMS by " diff --git a/app/openssl/crypto/sha/asm/sha1-s390x.pl b/app/openssl/crypto/sha/asm/sha1-s390x.pl index 4b178482..9193dda4 100644 --- a/app/openssl/crypto/sha/asm/sha1-s390x.pl +++ b/app/openssl/crypto/sha/asm/sha1-s390x.pl @@ -21,9 +21,28 @@ # instructions to favour dual-issue z10 pipeline. On z10 hardware is # "only" ~2.3x faster than software. +# November 2010. +# +# Adapt for -m31 build. If kernel supports what's called "highgprs" +# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit +# instructions and achieve "64-bit" performance even in 31-bit legacy +# application context. The feature is not specific to any particular +# processor, as long as it's "z-CPU". Latter implies that the code +# remains z/Architecture specific. + $kimdfunc=1; # magic function code for kimd instruction -$output=shift; +$flavour = shift; + +if ($flavour =~ /3[12]/) { + $SIZE_T=4; + $g=""; +} else { + $SIZE_T=8; + $g="g"; +} + +while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} open STDOUT,">$output"; $K_00_39="%r0"; $K=$K_00_39; @@ -42,13 +61,14 @@ $t1="%r11"; @X=("%r12","%r13","%r14"); $sp="%r15"; -$frame=160+16*4; +$stdframe=16*$SIZE_T+4*8; +$frame=$stdframe+16*4; sub Xupdate { my $i=shift; $code.=<<___ if ($i==15); - lg $prefetch,160($sp) ### Xupdate(16) warm-up + lg $prefetch,$stdframe($sp) ### Xupdate(16) warm-up lr $X[0],$X[2] ___ return if ($i&1); # Xupdate is vectorized and executed every 2nd cycle @@ -58,8 +78,8 @@ $code.=<<___ if ($i<16); ___ $code.=<<___ if ($i>=16); xgr $X[0],$prefetch ### Xupdate($i) - lg $prefetch,`160+4*(($i+2)%16)`($sp) - xg $X[0],`160+4*(($i+8)%16)`($sp) + lg $prefetch,`$stdframe+4*(($i+2)%16)`($sp) + xg $X[0],`$stdframe+4*(($i+8)%16)`($sp) xgr $X[0],$prefetch rll $X[0],$X[0],1 rllg $X[1],$X[0],32 @@ -68,7 +88,7 @@ $code.=<<___ if ($i>=16); lr $X[2],$X[1] # feedback ___ $code.=<<___ if ($i<=70); - stg $X[0],`160+4*($i%16)`($sp) + stg $X[0],`$stdframe+4*($i%16)`($sp) ___ unshift(@X,pop(@X)); } @@ -148,9 +168,9 @@ $code.=<<___ if ($kimdfunc); tmhl %r0,0x4000 # check for message-security assist jz .Lsoftware lghi %r0,0 - la %r1,16($sp) + la %r1,`2*$SIZE_T`($sp) .long 0xb93e0002 # kimd %r0,%r2 - lg %r0,16($sp) + lg %r0,`2*$SIZE_T`($sp) tmhh %r0,`0x8000>>$kimdfunc` jz .Lsoftware lghi %r0,$kimdfunc @@ -165,11 +185,11 @@ $code.=<<___ if ($kimdfunc); ___ $code.=<<___; lghi %r1,-$frame - stg $ctx,16($sp) - stmg %r6,%r15,48($sp) + st${g} $ctx,`2*$SIZE_T`($sp) + stm${g} %r6,%r15,`6*$SIZE_T`($sp) lgr %r0,$sp la $sp,0(%r1,$sp) - stg %r0,0($sp) + st${g} %r0,0($sp) larl $t0,Ktable llgf $A,0($ctx) @@ -199,7 +219,7 @@ ___ for (;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } $code.=<<___; - lg $ctx,`$frame+16`($sp) + l${g} $ctx,`$frame+2*$SIZE_T`($sp) la $inp,64($inp) al $A,0($ctx) al $B,4($ctx) @@ -211,13 +231,13 @@ $code.=<<___; st $C,8($ctx) st $D,12($ctx) st $E,16($ctx) - brct $len,.Lloop + brct${g} $len,.Lloop - lmg %r6,%r15,`$frame+48`($sp) + lm${g} %r6,%r15,`$frame+6*$SIZE_T`($sp) br %r14 .size sha1_block_data_order,.-sha1_block_data_order .string "SHA1 block transform for s390x, CRYPTOGAMS by " -.comm OPENSSL_s390xcap_P,8,8 +.comm OPENSSL_s390xcap_P,16,8 ___ $code =~ s/\`([^\`]*)\`/eval $1/gem; diff --git a/app/openssl/crypto/sha/asm/sha1-sparcv9a.pl b/app/openssl/crypto/sha/asm/sha1-sparcv9a.pl index 85e8d680..e65291bb 100644 --- a/app/openssl/crypto/sha/asm/sha1-sparcv9a.pl +++ b/app/openssl/crypto/sha/asm/sha1-sparcv9a.pl @@ -549,7 +549,7 @@ ___ # programmer detect if current CPU is VIS capable at run-time. sub unvis { my ($mnemonic,$rs1,$rs2,$rd)=@_; -my $ref,$opf; +my ($ref,$opf); my %visopf = ( "fmul8ulx16" => 0x037, "faligndata" => 0x048, "fpadd32" => 0x052, diff --git a/app/openssl/crypto/sha/asm/sha1-x86_64.S b/app/openssl/crypto/sha/asm/sha1-x86_64.S new file mode 100644 index 00000000..3922e203 --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha1-x86_64.S @@ -0,0 +1,2486 @@ +.text + + +.globl sha1_block_data_order +.type sha1_block_data_order,@function +.align 16 +sha1_block_data_order: + movl OPENSSL_ia32cap_P+0(%rip),%r9d + movl OPENSSL_ia32cap_P+4(%rip),%r8d + testl $512,%r8d + jz .Lialu + jmp _ssse3_shortcut + +.align 16 +.Lialu: + pushq %rbx + pushq %rbp + pushq %r12 + pushq %r13 + movq %rsp,%r11 + movq %rdi,%r8 + subq $72,%rsp + movq %rsi,%r9 + andq $-64,%rsp + movq %rdx,%r10 + movq %r11,64(%rsp) +.Lprologue: + + movl 0(%r8),%esi + movl 4(%r8),%edi + movl 8(%r8),%r11d + movl 12(%r8),%r12d + movl 16(%r8),%r13d + jmp .Lloop + +.align 16 +.Lloop: + movl 0(%r9),%edx + bswapl %edx + movl %edx,0(%rsp) + movl %r11d,%eax + movl 4(%r9),%ebp + movl %esi,%ecx + xorl %r12d,%eax + bswapl %ebp + roll $5,%ecx + leal 1518500249(%rdx,%r13,1),%r13d + andl %edi,%eax + movl %ebp,4(%rsp) + addl %ecx,%r13d + xorl %r12d,%eax + roll $30,%edi + addl %eax,%r13d + movl %edi,%eax + movl 8(%r9),%edx + movl %r13d,%ecx + xorl %r11d,%eax + bswapl %edx + roll $5,%ecx + leal 1518500249(%rbp,%r12,1),%r12d + andl %esi,%eax + movl %edx,8(%rsp) + addl %ecx,%r12d + xorl %r11d,%eax + roll $30,%esi + addl %eax,%r12d + movl %esi,%eax + movl 12(%r9),%ebp + movl %r12d,%ecx + xorl %edi,%eax + bswapl %ebp + roll $5,%ecx + leal 1518500249(%rdx,%r11,1),%r11d + andl %r13d,%eax + movl %ebp,12(%rsp) + addl %ecx,%r11d + xorl %edi,%eax + roll $30,%r13d + addl %eax,%r11d + movl %r13d,%eax + movl 16(%r9),%edx + movl %r11d,%ecx + xorl %esi,%eax + bswapl %edx + roll $5,%ecx + leal 1518500249(%rbp,%rdi,1),%edi + andl %r12d,%eax + movl %edx,16(%rsp) + addl %ecx,%edi + xorl %esi,%eax + roll $30,%r12d + addl %eax,%edi + movl %r12d,%eax + movl 20(%r9),%ebp + movl %edi,%ecx + xorl %r13d,%eax + bswapl %ebp + roll $5,%ecx + leal 1518500249(%rdx,%rsi,1),%esi + andl %r11d,%eax + movl %ebp,20(%rsp) + addl %ecx,%esi + xorl %r13d,%eax + roll $30,%r11d + addl %eax,%esi + movl %r11d,%eax + movl 24(%r9),%edx + movl %esi,%ecx + xorl %r12d,%eax + bswapl %edx + roll $5,%ecx + leal 1518500249(%rbp,%r13,1),%r13d + andl %edi,%eax + movl %edx,24(%rsp) + addl %ecx,%r13d + xorl %r12d,%eax + roll $30,%edi + addl %eax,%r13d + movl %edi,%eax + movl 28(%r9),%ebp + movl %r13d,%ecx + xorl %r11d,%eax + bswapl %ebp + roll $5,%ecx + leal 1518500249(%rdx,%r12,1),%r12d + andl %esi,%eax + movl %ebp,28(%rsp) + addl %ecx,%r12d + xorl %r11d,%eax + roll $30,%esi + addl %eax,%r12d + movl %esi,%eax + movl 32(%r9),%edx + movl %r12d,%ecx + xorl %edi,%eax + bswapl %edx + roll $5,%ecx + leal 1518500249(%rbp,%r11,1),%r11d + andl %r13d,%eax + movl %edx,32(%rsp) + addl %ecx,%r11d + xorl %edi,%eax + roll $30,%r13d + addl %eax,%r11d + movl %r13d,%eax + movl 36(%r9),%ebp + movl %r11d,%ecx + xorl %esi,%eax + bswapl %ebp + roll $5,%ecx + leal 1518500249(%rdx,%rdi,1),%edi + andl %r12d,%eax + movl %ebp,36(%rsp) + addl %ecx,%edi + xorl %esi,%eax + roll $30,%r12d + addl %eax,%edi + movl %r12d,%eax + movl 40(%r9),%edx + movl %edi,%ecx + xorl %r13d,%eax + bswapl %edx + roll $5,%ecx + leal 1518500249(%rbp,%rsi,1),%esi + andl %r11d,%eax + movl %edx,40(%rsp) + addl %ecx,%esi + xorl %r13d,%eax + roll $30,%r11d + addl %eax,%esi + movl %r11d,%eax + movl 44(%r9),%ebp + movl %esi,%ecx + xorl %r12d,%eax + bswapl %ebp + roll $5,%ecx + leal 1518500249(%rdx,%r13,1),%r13d + andl %edi,%eax + movl %ebp,44(%rsp) + addl %ecx,%r13d + xorl %r12d,%eax + roll $30,%edi + addl %eax,%r13d + movl %edi,%eax + movl 48(%r9),%edx + movl %r13d,%ecx + xorl %r11d,%eax + bswapl %edx + roll $5,%ecx + leal 1518500249(%rbp,%r12,1),%r12d + andl %esi,%eax + movl %edx,48(%rsp) + addl %ecx,%r12d + xorl %r11d,%eax + roll $30,%esi + addl %eax,%r12d + movl %esi,%eax + movl 52(%r9),%ebp + movl %r12d,%ecx + xorl %edi,%eax + bswapl %ebp + roll $5,%ecx + leal 1518500249(%rdx,%r11,1),%r11d + andl %r13d,%eax + movl %ebp,52(%rsp) + addl %ecx,%r11d + xorl %edi,%eax + roll $30,%r13d + addl %eax,%r11d + movl %r13d,%eax + movl 56(%r9),%edx + movl %r11d,%ecx + xorl %esi,%eax + bswapl %edx + roll $5,%ecx + leal 1518500249(%rbp,%rdi,1),%edi + andl %r12d,%eax + movl %edx,56(%rsp) + addl %ecx,%edi + xorl %esi,%eax + roll $30,%r12d + addl %eax,%edi + movl %r12d,%eax + movl 60(%r9),%ebp + movl %edi,%ecx + xorl %r13d,%eax + bswapl %ebp + roll $5,%ecx + leal 1518500249(%rdx,%rsi,1),%esi + andl %r11d,%eax + movl %ebp,60(%rsp) + addl %ecx,%esi + xorl %r13d,%eax + roll $30,%r11d + addl %eax,%esi + movl 0(%rsp),%edx + movl %r11d,%eax + movl %esi,%ecx + xorl 8(%rsp),%edx + xorl %r12d,%eax + roll $5,%ecx + xorl 32(%rsp),%edx + andl %edi,%eax + leal 1518500249(%rbp,%r13,1),%r13d + xorl 52(%rsp),%edx + xorl %r12d,%eax + roll $1,%edx + addl %ecx,%r13d + roll $30,%edi + movl %edx,0(%rsp) + addl %eax,%r13d + movl 4(%rsp),%ebp + movl %edi,%eax + movl %r13d,%ecx + xorl 12(%rsp),%ebp + xorl %r11d,%eax + roll $5,%ecx + xorl 36(%rsp),%ebp + andl %esi,%eax + leal 1518500249(%rdx,%r12,1),%r12d + xorl 56(%rsp),%ebp + xorl %r11d,%eax + roll $1,%ebp + addl %ecx,%r12d + roll $30,%esi + movl %ebp,4(%rsp) + addl %eax,%r12d + movl 8(%rsp),%edx + movl %esi,%eax + movl %r12d,%ecx + xorl 16(%rsp),%edx + xorl %edi,%eax + roll $5,%ecx + xorl 40(%rsp),%edx + andl %r13d,%eax + leal 1518500249(%rbp,%r11,1),%r11d + xorl 60(%rsp),%edx + xorl %edi,%eax + roll $1,%edx + addl %ecx,%r11d + roll $30,%r13d + movl %edx,8(%rsp) + addl %eax,%r11d + movl 12(%rsp),%ebp + movl %r13d,%eax + movl %r11d,%ecx + xorl 20(%rsp),%ebp + xorl %esi,%eax + roll $5,%ecx + xorl 44(%rsp),%ebp + andl %r12d,%eax + leal 1518500249(%rdx,%rdi,1),%edi + xorl 0(%rsp),%ebp + xorl %esi,%eax + roll $1,%ebp + addl %ecx,%edi + roll $30,%r12d + movl %ebp,12(%rsp) + addl %eax,%edi + movl 16(%rsp),%edx + movl %r12d,%eax + movl %edi,%ecx + xorl 24(%rsp),%edx + xorl %r13d,%eax + roll $5,%ecx + xorl 48(%rsp),%edx + andl %r11d,%eax + leal 1518500249(%rbp,%rsi,1),%esi + xorl 4(%rsp),%edx + xorl %r13d,%eax + roll $1,%edx + addl %ecx,%esi + roll $30,%r11d + movl %edx,16(%rsp) + addl %eax,%esi + movl 20(%rsp),%ebp + movl %r11d,%eax + movl %esi,%ecx + xorl 28(%rsp),%ebp + xorl %edi,%eax + roll $5,%ecx + leal 1859775393(%rdx,%r13,1),%r13d + xorl 52(%rsp),%ebp + xorl %r12d,%eax + addl %ecx,%r13d + xorl 8(%rsp),%ebp + roll $30,%edi + addl %eax,%r13d + roll $1,%ebp + movl %ebp,20(%rsp) + movl 24(%rsp),%edx + movl %edi,%eax + movl %r13d,%ecx + xorl 32(%rsp),%edx + xorl %esi,%eax + roll $5,%ecx + leal 1859775393(%rbp,%r12,1),%r12d + xorl 56(%rsp),%edx + xorl %r11d,%eax + addl %ecx,%r12d + xorl 12(%rsp),%edx + roll $30,%esi + addl %eax,%r12d + roll $1,%edx + movl %edx,24(%rsp) + movl 28(%rsp),%ebp + movl %esi,%eax + movl %r12d,%ecx + xorl 36(%rsp),%ebp + xorl %r13d,%eax + roll $5,%ecx + leal 1859775393(%rdx,%r11,1),%r11d + xorl 60(%rsp),%ebp + xorl %edi,%eax + addl %ecx,%r11d + xorl 16(%rsp),%ebp + roll $30,%r13d + addl %eax,%r11d + roll $1,%ebp + movl %ebp,28(%rsp) + movl 32(%rsp),%edx + movl %r13d,%eax + movl %r11d,%ecx + xorl 40(%rsp),%edx + xorl %r12d,%eax + roll $5,%ecx + leal 1859775393(%rbp,%rdi,1),%edi + xorl 0(%rsp),%edx + xorl %esi,%eax + addl %ecx,%edi + xorl 20(%rsp),%edx + roll $30,%r12d + addl %eax,%edi + roll $1,%edx + movl %edx,32(%rsp) + movl 36(%rsp),%ebp + movl %r12d,%eax + movl %edi,%ecx + xorl 44(%rsp),%ebp + xorl %r11d,%eax + roll $5,%ecx + leal 1859775393(%rdx,%rsi,1),%esi + xorl 4(%rsp),%ebp + xorl %r13d,%eax + addl %ecx,%esi + xorl 24(%rsp),%ebp + roll $30,%r11d + addl %eax,%esi + roll $1,%ebp + movl %ebp,36(%rsp) + movl 40(%rsp),%edx + movl %r11d,%eax + movl %esi,%ecx + xorl 48(%rsp),%edx + xorl %edi,%eax + roll $5,%ecx + leal 1859775393(%rbp,%r13,1),%r13d + xorl 8(%rsp),%edx + xorl %r12d,%eax + addl %ecx,%r13d + xorl 28(%rsp),%edx + roll $30,%edi + addl %eax,%r13d + roll $1,%edx + movl %edx,40(%rsp) + movl 44(%rsp),%ebp + movl %edi,%eax + movl %r13d,%ecx + xorl 52(%rsp),%ebp + xorl %esi,%eax + roll $5,%ecx + leal 1859775393(%rdx,%r12,1),%r12d + xorl 12(%rsp),%ebp + xorl %r11d,%eax + addl %ecx,%r12d + xorl 32(%rsp),%ebp + roll $30,%esi + addl %eax,%r12d + roll $1,%ebp + movl %ebp,44(%rsp) + movl 48(%rsp),%edx + movl %esi,%eax + movl %r12d,%ecx + xorl 56(%rsp),%edx + xorl %r13d,%eax + roll $5,%ecx + leal 1859775393(%rbp,%r11,1),%r11d + xorl 16(%rsp),%edx + xorl %edi,%eax + addl %ecx,%r11d + xorl 36(%rsp),%edx + roll $30,%r13d + addl %eax,%r11d + roll $1,%edx + movl %edx,48(%rsp) + movl 52(%rsp),%ebp + movl %r13d,%eax + movl %r11d,%ecx + xorl 60(%rsp),%ebp + xorl %r12d,%eax + roll $5,%ecx + leal 1859775393(%rdx,%rdi,1),%edi + xorl 20(%rsp),%ebp + xorl %esi,%eax + addl %ecx,%edi + xorl 40(%rsp),%ebp + roll $30,%r12d + addl %eax,%edi + roll $1,%ebp + movl %ebp,52(%rsp) + movl 56(%rsp),%edx + movl %r12d,%eax + movl %edi,%ecx + xorl 0(%rsp),%edx + xorl %r11d,%eax + roll $5,%ecx + leal 1859775393(%rbp,%rsi,1),%esi + xorl 24(%rsp),%edx + xorl %r13d,%eax + addl %ecx,%esi + xorl 44(%rsp),%edx + roll $30,%r11d + addl %eax,%esi + roll $1,%edx + movl %edx,56(%rsp) + movl 60(%rsp),%ebp + movl %r11d,%eax + movl %esi,%ecx + xorl 4(%rsp),%ebp + xorl %edi,%eax + roll $5,%ecx + leal 1859775393(%rdx,%r13,1),%r13d + xorl 28(%rsp),%ebp + xorl %r12d,%eax + addl %ecx,%r13d + xorl 48(%rsp),%ebp + roll $30,%edi + addl %eax,%r13d + roll $1,%ebp + movl %ebp,60(%rsp) + movl 0(%rsp),%edx + movl %edi,%eax + movl %r13d,%ecx + xorl 8(%rsp),%edx + xorl %esi,%eax + roll $5,%ecx + leal 1859775393(%rbp,%r12,1),%r12d + xorl 32(%rsp),%edx + xorl %r11d,%eax + addl %ecx,%r12d + xorl 52(%rsp),%edx + roll $30,%esi + addl %eax,%r12d + roll $1,%edx + movl %edx,0(%rsp) + movl 4(%rsp),%ebp + movl %esi,%eax + movl %r12d,%ecx + xorl 12(%rsp),%ebp + xorl %r13d,%eax + roll $5,%ecx + leal 1859775393(%rdx,%r11,1),%r11d + xorl 36(%rsp),%ebp + xorl %edi,%eax + addl %ecx,%r11d + xorl 56(%rsp),%ebp + roll $30,%r13d + addl %eax,%r11d + roll $1,%ebp + movl %ebp,4(%rsp) + movl 8(%rsp),%edx + movl %r13d,%eax + movl %r11d,%ecx + xorl 16(%rsp),%edx + xorl %r12d,%eax + roll $5,%ecx + leal 1859775393(%rbp,%rdi,1),%edi + xorl 40(%rsp),%edx + xorl %esi,%eax + addl %ecx,%edi + xorl 60(%rsp),%edx + roll $30,%r12d + addl %eax,%edi + roll $1,%edx + movl %edx,8(%rsp) + movl 12(%rsp),%ebp + movl %r12d,%eax + movl %edi,%ecx + xorl 20(%rsp),%ebp + xorl %r11d,%eax + roll $5,%ecx + leal 1859775393(%rdx,%rsi,1),%esi + xorl 44(%rsp),%ebp + xorl %r13d,%eax + addl %ecx,%esi + xorl 0(%rsp),%ebp + roll $30,%r11d + addl %eax,%esi + roll $1,%ebp + movl %ebp,12(%rsp) + movl 16(%rsp),%edx + movl %r11d,%eax + movl %esi,%ecx + xorl 24(%rsp),%edx + xorl %edi,%eax + roll $5,%ecx + leal 1859775393(%rbp,%r13,1),%r13d + xorl 48(%rsp),%edx + xorl %r12d,%eax + addl %ecx,%r13d + xorl 4(%rsp),%edx + roll $30,%edi + addl %eax,%r13d + roll $1,%edx + movl %edx,16(%rsp) + movl 20(%rsp),%ebp + movl %edi,%eax + movl %r13d,%ecx + xorl 28(%rsp),%ebp + xorl %esi,%eax + roll $5,%ecx + leal 1859775393(%rdx,%r12,1),%r12d + xorl 52(%rsp),%ebp + xorl %r11d,%eax + addl %ecx,%r12d + xorl 8(%rsp),%ebp + roll $30,%esi + addl %eax,%r12d + roll $1,%ebp + movl %ebp,20(%rsp) + movl 24(%rsp),%edx + movl %esi,%eax + movl %r12d,%ecx + xorl 32(%rsp),%edx + xorl %r13d,%eax + roll $5,%ecx + leal 1859775393(%rbp,%r11,1),%r11d + xorl 56(%rsp),%edx + xorl %edi,%eax + addl %ecx,%r11d + xorl 12(%rsp),%edx + roll $30,%r13d + addl %eax,%r11d + roll $1,%edx + movl %edx,24(%rsp) + movl 28(%rsp),%ebp + movl %r13d,%eax + movl %r11d,%ecx + xorl 36(%rsp),%ebp + xorl %r12d,%eax + roll $5,%ecx + leal 1859775393(%rdx,%rdi,1),%edi + xorl 60(%rsp),%ebp + xorl %esi,%eax + addl %ecx,%edi + xorl 16(%rsp),%ebp + roll $30,%r12d + addl %eax,%edi + roll $1,%ebp + movl %ebp,28(%rsp) + movl 32(%rsp),%edx + movl %r12d,%eax + movl %edi,%ecx + xorl 40(%rsp),%edx + xorl %r11d,%eax + roll $5,%ecx + leal 1859775393(%rbp,%rsi,1),%esi + xorl 0(%rsp),%edx + xorl %r13d,%eax + addl %ecx,%esi + xorl 20(%rsp),%edx + roll $30,%r11d + addl %eax,%esi + roll $1,%edx + movl %edx,32(%rsp) + movl 36(%rsp),%ebp + movl %r11d,%eax + movl %r11d,%ebx + xorl 44(%rsp),%ebp + andl %r12d,%eax + movl %esi,%ecx + xorl 4(%rsp),%ebp + xorl %r12d,%ebx + leal -1894007588(%rdx,%r13,1),%r13d + roll $5,%ecx + xorl 24(%rsp),%ebp + addl %eax,%r13d + andl %edi,%ebx + roll $1,%ebp + addl %ebx,%r13d + roll $30,%edi + movl %ebp,36(%rsp) + addl %ecx,%r13d + movl 40(%rsp),%edx + movl %edi,%eax + movl %edi,%ebx + xorl 48(%rsp),%edx + andl %r11d,%eax + movl %r13d,%ecx + xorl 8(%rsp),%edx + xorl %r11d,%ebx + leal -1894007588(%rbp,%r12,1),%r12d + roll $5,%ecx + xorl 28(%rsp),%edx + addl %eax,%r12d + andl %esi,%ebx + roll $1,%edx + addl %ebx,%r12d + roll $30,%esi + movl %edx,40(%rsp) + addl %ecx,%r12d + movl 44(%rsp),%ebp + movl %esi,%eax + movl %esi,%ebx + xorl 52(%rsp),%ebp + andl %edi,%eax + movl %r12d,%ecx + xorl 12(%rsp),%ebp + xorl %edi,%ebx + leal -1894007588(%rdx,%r11,1),%r11d + roll $5,%ecx + xorl 32(%rsp),%ebp + addl %eax,%r11d + andl %r13d,%ebx + roll $1,%ebp + addl %ebx,%r11d + roll $30,%r13d + movl %ebp,44(%rsp) + addl %ecx,%r11d + movl 48(%rsp),%edx + movl %r13d,%eax + movl %r13d,%ebx + xorl 56(%rsp),%edx + andl %esi,%eax + movl %r11d,%ecx + xorl 16(%rsp),%edx + xorl %esi,%ebx + leal -1894007588(%rbp,%rdi,1),%edi + roll $5,%ecx + xorl 36(%rsp),%edx + addl %eax,%edi + andl %r12d,%ebx + roll $1,%edx + addl %ebx,%edi + roll $30,%r12d + movl %edx,48(%rsp) + addl %ecx,%edi + movl 52(%rsp),%ebp + movl %r12d,%eax + movl %r12d,%ebx + xorl 60(%rsp),%ebp + andl %r13d,%eax + movl %edi,%ecx + xorl 20(%rsp),%ebp + xorl %r13d,%ebx + leal -1894007588(%rdx,%rsi,1),%esi + roll $5,%ecx + xorl 40(%rsp),%ebp + addl %eax,%esi + andl %r11d,%ebx + roll $1,%ebp + addl %ebx,%esi + roll $30,%r11d + movl %ebp,52(%rsp) + addl %ecx,%esi + movl 56(%rsp),%edx + movl %r11d,%eax + movl %r11d,%ebx + xorl 0(%rsp),%edx + andl %r12d,%eax + movl %esi,%ecx + xorl 24(%rsp),%edx + xorl %r12d,%ebx + leal -1894007588(%rbp,%r13,1),%r13d + roll $5,%ecx + xorl 44(%rsp),%edx + addl %eax,%r13d + andl %edi,%ebx + roll $1,%edx + addl %ebx,%r13d + roll $30,%edi + movl %edx,56(%rsp) + addl %ecx,%r13d + movl 60(%rsp),%ebp + movl %edi,%eax + movl %edi,%ebx + xorl 4(%rsp),%ebp + andl %r11d,%eax + movl %r13d,%ecx + xorl 28(%rsp),%ebp + xorl %r11d,%ebx + leal -1894007588(%rdx,%r12,1),%r12d + roll $5,%ecx + xorl 48(%rsp),%ebp + addl %eax,%r12d + andl %esi,%ebx + roll $1,%ebp + addl %ebx,%r12d + roll $30,%esi + movl %ebp,60(%rsp) + addl %ecx,%r12d + movl 0(%rsp),%edx + movl %esi,%eax + movl %esi,%ebx + xorl 8(%rsp),%edx + andl %edi,%eax + movl %r12d,%ecx + xorl 32(%rsp),%edx + xorl %edi,%ebx + leal -1894007588(%rbp,%r11,1),%r11d + roll $5,%ecx + xorl 52(%rsp),%edx + addl %eax,%r11d + andl %r13d,%ebx + roll $1,%edx + addl %ebx,%r11d + roll $30,%r13d + movl %edx,0(%rsp) + addl %ecx,%r11d + movl 4(%rsp),%ebp + movl %r13d,%eax + movl %r13d,%ebx + xorl 12(%rsp),%ebp + andl %esi,%eax + movl %r11d,%ecx + xorl 36(%rsp),%ebp + xorl %esi,%ebx + leal -1894007588(%rdx,%rdi,1),%edi + roll $5,%ecx + xorl 56(%rsp),%ebp + addl %eax,%edi + andl %r12d,%ebx + roll $1,%ebp + addl %ebx,%edi + roll $30,%r12d + movl %ebp,4(%rsp) + addl %ecx,%edi + movl 8(%rsp),%edx + movl %r12d,%eax + movl %r12d,%ebx + xorl 16(%rsp),%edx + andl %r13d,%eax + movl %edi,%ecx + xorl 40(%rsp),%edx + xorl %r13d,%ebx + leal -1894007588(%rbp,%rsi,1),%esi + roll $5,%ecx + xorl 60(%rsp),%edx + addl %eax,%esi + andl %r11d,%ebx + roll $1,%edx + addl %ebx,%esi + roll $30,%r11d + movl %edx,8(%rsp) + addl %ecx,%esi + movl 12(%rsp),%ebp + movl %r11d,%eax + movl %r11d,%ebx + xorl 20(%rsp),%ebp + andl %r12d,%eax + movl %esi,%ecx + xorl 44(%rsp),%ebp + xorl %r12d,%ebx + leal -1894007588(%rdx,%r13,1),%r13d + roll $5,%ecx + xorl 0(%rsp),%ebp + addl %eax,%r13d + andl %edi,%ebx + roll $1,%ebp + addl %ebx,%r13d + roll $30,%edi + movl %ebp,12(%rsp) + addl %ecx,%r13d + movl 16(%rsp),%edx + movl %edi,%eax + movl %edi,%ebx + xorl 24(%rsp),%edx + andl %r11d,%eax + movl %r13d,%ecx + xorl 48(%rsp),%edx + xorl %r11d,%ebx + leal -1894007588(%rbp,%r12,1),%r12d + roll $5,%ecx + xorl 4(%rsp),%edx + addl %eax,%r12d + andl %esi,%ebx + roll $1,%edx + addl %ebx,%r12d + roll $30,%esi + movl %edx,16(%rsp) + addl %ecx,%r12d + movl 20(%rsp),%ebp + movl %esi,%eax + movl %esi,%ebx + xorl 28(%rsp),%ebp + andl %edi,%eax + movl %r12d,%ecx + xorl 52(%rsp),%ebp + xorl %edi,%ebx + leal -1894007588(%rdx,%r11,1),%r11d + roll $5,%ecx + xorl 8(%rsp),%ebp + addl %eax,%r11d + andl %r13d,%ebx + roll $1,%ebp + addl %ebx,%r11d + roll $30,%r13d + movl %ebp,20(%rsp) + addl %ecx,%r11d + movl 24(%rsp),%edx + movl %r13d,%eax + movl %r13d,%ebx + xorl 32(%rsp),%edx + andl %esi,%eax + movl %r11d,%ecx + xorl 56(%rsp),%edx + xorl %esi,%ebx + leal -1894007588(%rbp,%rdi,1),%edi + roll $5,%ecx + xorl 12(%rsp),%edx + addl %eax,%edi + andl %r12d,%ebx + roll $1,%edx + addl %ebx,%edi + roll $30,%r12d + movl %edx,24(%rsp) + addl %ecx,%edi + movl 28(%rsp),%ebp + movl %r12d,%eax + movl %r12d,%ebx + xorl 36(%rsp),%ebp + andl %r13d,%eax + movl %edi,%ecx + xorl 60(%rsp),%ebp + xorl %r13d,%ebx + leal -1894007588(%rdx,%rsi,1),%esi + roll $5,%ecx + xorl 16(%rsp),%ebp + addl %eax,%esi + andl %r11d,%ebx + roll $1,%ebp + addl %ebx,%esi + roll $30,%r11d + movl %ebp,28(%rsp) + addl %ecx,%esi + movl 32(%rsp),%edx + movl %r11d,%eax + movl %r11d,%ebx + xorl 40(%rsp),%edx + andl %r12d,%eax + movl %esi,%ecx + xorl 0(%rsp),%edx + xorl %r12d,%ebx + leal -1894007588(%rbp,%r13,1),%r13d + roll $5,%ecx + xorl 20(%rsp),%edx + addl %eax,%r13d + andl %edi,%ebx + roll $1,%edx + addl %ebx,%r13d + roll $30,%edi + movl %edx,32(%rsp) + addl %ecx,%r13d + movl 36(%rsp),%ebp + movl %edi,%eax + movl %edi,%ebx + xorl 44(%rsp),%ebp + andl %r11d,%eax + movl %r13d,%ecx + xorl 4(%rsp),%ebp + xorl %r11d,%ebx + leal -1894007588(%rdx,%r12,1),%r12d + roll $5,%ecx + xorl 24(%rsp),%ebp + addl %eax,%r12d + andl %esi,%ebx + roll $1,%ebp + addl %ebx,%r12d + roll $30,%esi + movl %ebp,36(%rsp) + addl %ecx,%r12d + movl 40(%rsp),%edx + movl %esi,%eax + movl %esi,%ebx + xorl 48(%rsp),%edx + andl %edi,%eax + movl %r12d,%ecx + xorl 8(%rsp),%edx + xorl %edi,%ebx + leal -1894007588(%rbp,%r11,1),%r11d + roll $5,%ecx + xorl 28(%rsp),%edx + addl %eax,%r11d + andl %r13d,%ebx + roll $1,%edx + addl %ebx,%r11d + roll $30,%r13d + movl %edx,40(%rsp) + addl %ecx,%r11d + movl 44(%rsp),%ebp + movl %r13d,%eax + movl %r13d,%ebx + xorl 52(%rsp),%ebp + andl %esi,%eax + movl %r11d,%ecx + xorl 12(%rsp),%ebp + xorl %esi,%ebx + leal -1894007588(%rdx,%rdi,1),%edi + roll $5,%ecx + xorl 32(%rsp),%ebp + addl %eax,%edi + andl %r12d,%ebx + roll $1,%ebp + addl %ebx,%edi + roll $30,%r12d + movl %ebp,44(%rsp) + addl %ecx,%edi + movl 48(%rsp),%edx + movl %r12d,%eax + movl %r12d,%ebx + xorl 56(%rsp),%edx + andl %r13d,%eax + movl %edi,%ecx + xorl 16(%rsp),%edx + xorl %r13d,%ebx + leal -1894007588(%rbp,%rsi,1),%esi + roll $5,%ecx + xorl 36(%rsp),%edx + addl %eax,%esi + andl %r11d,%ebx + roll $1,%edx + addl %ebx,%esi + roll $30,%r11d + movl %edx,48(%rsp) + addl %ecx,%esi + movl 52(%rsp),%ebp + movl %r11d,%eax + movl %esi,%ecx + xorl 60(%rsp),%ebp + xorl %edi,%eax + roll $5,%ecx + leal -899497514(%rdx,%r13,1),%r13d + xorl 20(%rsp),%ebp + xorl %r12d,%eax + addl %ecx,%r13d + xorl 40(%rsp),%ebp + roll $30,%edi + addl %eax,%r13d + roll $1,%ebp + movl %ebp,52(%rsp) + movl 56(%rsp),%edx + movl %edi,%eax + movl %r13d,%ecx + xorl 0(%rsp),%edx + xorl %esi,%eax + roll $5,%ecx + leal -899497514(%rbp,%r12,1),%r12d + xorl 24(%rsp),%edx + xorl %r11d,%eax + addl %ecx,%r12d + xorl 44(%rsp),%edx + roll $30,%esi + addl %eax,%r12d + roll $1,%edx + movl %edx,56(%rsp) + movl 60(%rsp),%ebp + movl %esi,%eax + movl %r12d,%ecx + xorl 4(%rsp),%ebp + xorl %r13d,%eax + roll $5,%ecx + leal -899497514(%rdx,%r11,1),%r11d + xorl 28(%rsp),%ebp + xorl %edi,%eax + addl %ecx,%r11d + xorl 48(%rsp),%ebp + roll $30,%r13d + addl %eax,%r11d + roll $1,%ebp + movl %ebp,60(%rsp) + movl 0(%rsp),%edx + movl %r13d,%eax + movl %r11d,%ecx + xorl 8(%rsp),%edx + xorl %r12d,%eax + roll $5,%ecx + leal -899497514(%rbp,%rdi,1),%edi + xorl 32(%rsp),%edx + xorl %esi,%eax + addl %ecx,%edi + xorl 52(%rsp),%edx + roll $30,%r12d + addl %eax,%edi + roll $1,%edx + movl %edx,0(%rsp) + movl 4(%rsp),%ebp + movl %r12d,%eax + movl %edi,%ecx + xorl 12(%rsp),%ebp + xorl %r11d,%eax + roll $5,%ecx + leal -899497514(%rdx,%rsi,1),%esi + xorl 36(%rsp),%ebp + xorl %r13d,%eax + addl %ecx,%esi + xorl 56(%rsp),%ebp + roll $30,%r11d + addl %eax,%esi + roll $1,%ebp + movl %ebp,4(%rsp) + movl 8(%rsp),%edx + movl %r11d,%eax + movl %esi,%ecx + xorl 16(%rsp),%edx + xorl %edi,%eax + roll $5,%ecx + leal -899497514(%rbp,%r13,1),%r13d + xorl 40(%rsp),%edx + xorl %r12d,%eax + addl %ecx,%r13d + xorl 60(%rsp),%edx + roll $30,%edi + addl %eax,%r13d + roll $1,%edx + movl %edx,8(%rsp) + movl 12(%rsp),%ebp + movl %edi,%eax + movl %r13d,%ecx + xorl 20(%rsp),%ebp + xorl %esi,%eax + roll $5,%ecx + leal -899497514(%rdx,%r12,1),%r12d + xorl 44(%rsp),%ebp + xorl %r11d,%eax + addl %ecx,%r12d + xorl 0(%rsp),%ebp + roll $30,%esi + addl %eax,%r12d + roll $1,%ebp + movl %ebp,12(%rsp) + movl 16(%rsp),%edx + movl %esi,%eax + movl %r12d,%ecx + xorl 24(%rsp),%edx + xorl %r13d,%eax + roll $5,%ecx + leal -899497514(%rbp,%r11,1),%r11d + xorl 48(%rsp),%edx + xorl %edi,%eax + addl %ecx,%r11d + xorl 4(%rsp),%edx + roll $30,%r13d + addl %eax,%r11d + roll $1,%edx + movl %edx,16(%rsp) + movl 20(%rsp),%ebp + movl %r13d,%eax + movl %r11d,%ecx + xorl 28(%rsp),%ebp + xorl %r12d,%eax + roll $5,%ecx + leal -899497514(%rdx,%rdi,1),%edi + xorl 52(%rsp),%ebp + xorl %esi,%eax + addl %ecx,%edi + xorl 8(%rsp),%ebp + roll $30,%r12d + addl %eax,%edi + roll $1,%ebp + movl %ebp,20(%rsp) + movl 24(%rsp),%edx + movl %r12d,%eax + movl %edi,%ecx + xorl 32(%rsp),%edx + xorl %r11d,%eax + roll $5,%ecx + leal -899497514(%rbp,%rsi,1),%esi + xorl 56(%rsp),%edx + xorl %r13d,%eax + addl %ecx,%esi + xorl 12(%rsp),%edx + roll $30,%r11d + addl %eax,%esi + roll $1,%edx + movl %edx,24(%rsp) + movl 28(%rsp),%ebp + movl %r11d,%eax + movl %esi,%ecx + xorl 36(%rsp),%ebp + xorl %edi,%eax + roll $5,%ecx + leal -899497514(%rdx,%r13,1),%r13d + xorl 60(%rsp),%ebp + xorl %r12d,%eax + addl %ecx,%r13d + xorl 16(%rsp),%ebp + roll $30,%edi + addl %eax,%r13d + roll $1,%ebp + movl %ebp,28(%rsp) + movl 32(%rsp),%edx + movl %edi,%eax + movl %r13d,%ecx + xorl 40(%rsp),%edx + xorl %esi,%eax + roll $5,%ecx + leal -899497514(%rbp,%r12,1),%r12d + xorl 0(%rsp),%edx + xorl %r11d,%eax + addl %ecx,%r12d + xorl 20(%rsp),%edx + roll $30,%esi + addl %eax,%r12d + roll $1,%edx + movl %edx,32(%rsp) + movl 36(%rsp),%ebp + movl %esi,%eax + movl %r12d,%ecx + xorl 44(%rsp),%ebp + xorl %r13d,%eax + roll $5,%ecx + leal -899497514(%rdx,%r11,1),%r11d + xorl 4(%rsp),%ebp + xorl %edi,%eax + addl %ecx,%r11d + xorl 24(%rsp),%ebp + roll $30,%r13d + addl %eax,%r11d + roll $1,%ebp + movl %ebp,36(%rsp) + movl 40(%rsp),%edx + movl %r13d,%eax + movl %r11d,%ecx + xorl 48(%rsp),%edx + xorl %r12d,%eax + roll $5,%ecx + leal -899497514(%rbp,%rdi,1),%edi + xorl 8(%rsp),%edx + xorl %esi,%eax + addl %ecx,%edi + xorl 28(%rsp),%edx + roll $30,%r12d + addl %eax,%edi + roll $1,%edx + movl %edx,40(%rsp) + movl 44(%rsp),%ebp + movl %r12d,%eax + movl %edi,%ecx + xorl 52(%rsp),%ebp + xorl %r11d,%eax + roll $5,%ecx + leal -899497514(%rdx,%rsi,1),%esi + xorl 12(%rsp),%ebp + xorl %r13d,%eax + addl %ecx,%esi + xorl 32(%rsp),%ebp + roll $30,%r11d + addl %eax,%esi + roll $1,%ebp + movl %ebp,44(%rsp) + movl 48(%rsp),%edx + movl %r11d,%eax + movl %esi,%ecx + xorl 56(%rsp),%edx + xorl %edi,%eax + roll $5,%ecx + leal -899497514(%rbp,%r13,1),%r13d + xorl 16(%rsp),%edx + xorl %r12d,%eax + addl %ecx,%r13d + xorl 36(%rsp),%edx + roll $30,%edi + addl %eax,%r13d + roll $1,%edx + movl %edx,48(%rsp) + movl 52(%rsp),%ebp + movl %edi,%eax + movl %r13d,%ecx + xorl 60(%rsp),%ebp + xorl %esi,%eax + roll $5,%ecx + leal -899497514(%rdx,%r12,1),%r12d + xorl 20(%rsp),%ebp + xorl %r11d,%eax + addl %ecx,%r12d + xorl 40(%rsp),%ebp + roll $30,%esi + addl %eax,%r12d + roll $1,%ebp + movl 56(%rsp),%edx + movl %esi,%eax + movl %r12d,%ecx + xorl 0(%rsp),%edx + xorl %r13d,%eax + roll $5,%ecx + leal -899497514(%rbp,%r11,1),%r11d + xorl 24(%rsp),%edx + xorl %edi,%eax + addl %ecx,%r11d + xorl 44(%rsp),%edx + roll $30,%r13d + addl %eax,%r11d + roll $1,%edx + movl 60(%rsp),%ebp + movl %r13d,%eax + movl %r11d,%ecx + xorl 4(%rsp),%ebp + xorl %r12d,%eax + roll $5,%ecx + leal -899497514(%rdx,%rdi,1),%edi + xorl 28(%rsp),%ebp + xorl %esi,%eax + addl %ecx,%edi + xorl 48(%rsp),%ebp + roll $30,%r12d + addl %eax,%edi + roll $1,%ebp + movl %r12d,%eax + movl %edi,%ecx + xorl %r11d,%eax + leal -899497514(%rbp,%rsi,1),%esi + roll $5,%ecx + xorl %r13d,%eax + addl %ecx,%esi + roll $30,%r11d + addl %eax,%esi + addl 0(%r8),%esi + addl 4(%r8),%edi + addl 8(%r8),%r11d + addl 12(%r8),%r12d + addl 16(%r8),%r13d + movl %esi,0(%r8) + movl %edi,4(%r8) + movl %r11d,8(%r8) + movl %r12d,12(%r8) + movl %r13d,16(%r8) + + subq $1,%r10 + leaq 64(%r9),%r9 + jnz .Lloop + + movq 64(%rsp),%rsi + movq (%rsi),%r13 + movq 8(%rsi),%r12 + movq 16(%rsi),%rbp + movq 24(%rsi),%rbx + leaq 32(%rsi),%rsp +.Lepilogue: + .byte 0xf3,0xc3 +.size sha1_block_data_order,.-sha1_block_data_order +.type sha1_block_data_order_ssse3,@function +.align 16 +sha1_block_data_order_ssse3: +_ssse3_shortcut: + pushq %rbx + pushq %rbp + pushq %r12 + leaq -64(%rsp),%rsp + movq %rdi,%r8 + movq %rsi,%r9 + movq %rdx,%r10 + + shlq $6,%r10 + addq %r9,%r10 + leaq K_XX_XX(%rip),%r11 + + movl 0(%r8),%eax + movl 4(%r8),%ebx + movl 8(%r8),%ecx + movl 12(%r8),%edx + movl %ebx,%esi + movl 16(%r8),%ebp + + movdqa 64(%r11),%xmm6 + movdqa 0(%r11),%xmm9 + movdqu 0(%r9),%xmm0 + movdqu 16(%r9),%xmm1 + movdqu 32(%r9),%xmm2 + movdqu 48(%r9),%xmm3 +.byte 102,15,56,0,198 + addq $64,%r9 +.byte 102,15,56,0,206 +.byte 102,15,56,0,214 +.byte 102,15,56,0,222 + paddd %xmm9,%xmm0 + paddd %xmm9,%xmm1 + paddd %xmm9,%xmm2 + movdqa %xmm0,0(%rsp) + psubd %xmm9,%xmm0 + movdqa %xmm1,16(%rsp) + psubd %xmm9,%xmm1 + movdqa %xmm2,32(%rsp) + psubd %xmm9,%xmm2 + jmp .Loop_ssse3 +.align 16 +.Loop_ssse3: + movdqa %xmm1,%xmm4 + addl 0(%rsp),%ebp + xorl %edx,%ecx + movdqa %xmm3,%xmm8 +.byte 102,15,58,15,224,8 + movl %eax,%edi + roll $5,%eax + paddd %xmm3,%xmm9 + andl %ecx,%esi + xorl %edx,%ecx + psrldq $4,%xmm8 + xorl %edx,%esi + addl %eax,%ebp + pxor %xmm0,%xmm4 + rorl $2,%ebx + addl %esi,%ebp + pxor %xmm2,%xmm8 + addl 4(%rsp),%edx + xorl %ecx,%ebx + movl %ebp,%esi + roll $5,%ebp + pxor %xmm8,%xmm4 + andl %ebx,%edi + xorl %ecx,%ebx + movdqa %xmm9,48(%rsp) + xorl %ecx,%edi + addl %ebp,%edx + movdqa %xmm4,%xmm10 + movdqa %xmm4,%xmm8 + rorl $7,%eax + addl %edi,%edx + addl 8(%rsp),%ecx + xorl %ebx,%eax + pslldq $12,%xmm10 + paddd %xmm4,%xmm4 + movl %edx,%edi + roll $5,%edx + andl %eax,%esi + xorl %ebx,%eax + psrld $31,%xmm8 + xorl %ebx,%esi + addl %edx,%ecx + movdqa %xmm10,%xmm9 + rorl $7,%ebp + addl %esi,%ecx + psrld $30,%xmm10 + por %xmm8,%xmm4 + addl 12(%rsp),%ebx + xorl %eax,%ebp + movl %ecx,%esi + roll $5,%ecx + pslld $2,%xmm9 + pxor %xmm10,%xmm4 + andl %ebp,%edi + xorl %eax,%ebp + movdqa 0(%r11),%xmm10 + xorl %eax,%edi + addl %ecx,%ebx + pxor %xmm9,%xmm4 + rorl $7,%edx + addl %edi,%ebx + movdqa %xmm2,%xmm5 + addl 16(%rsp),%eax + xorl %ebp,%edx + movdqa %xmm4,%xmm9 +.byte 102,15,58,15,233,8 + movl %ebx,%edi + roll $5,%ebx + paddd %xmm4,%xmm10 + andl %edx,%esi + xorl %ebp,%edx + psrldq $4,%xmm9 + xorl %ebp,%esi + addl %ebx,%eax + pxor %xmm1,%xmm5 + rorl $7,%ecx + addl %esi,%eax + pxor %xmm3,%xmm9 + addl 20(%rsp),%ebp + xorl %edx,%ecx + movl %eax,%esi + roll $5,%eax + pxor %xmm9,%xmm5 + andl %ecx,%edi + xorl %edx,%ecx + movdqa %xmm10,0(%rsp) + xorl %edx,%edi + addl %eax,%ebp + movdqa %xmm5,%xmm8 + movdqa %xmm5,%xmm9 + rorl $7,%ebx + addl %edi,%ebp + addl 24(%rsp),%edx + xorl %ecx,%ebx + pslldq $12,%xmm8 + paddd %xmm5,%xmm5 + movl %ebp,%edi + roll $5,%ebp + andl %ebx,%esi + xorl %ecx,%ebx + psrld $31,%xmm9 + xorl %ecx,%esi + addl %ebp,%edx + movdqa %xmm8,%xmm10 + rorl $7,%eax + addl %esi,%edx + psrld $30,%xmm8 + por %xmm9,%xmm5 + addl 28(%rsp),%ecx + xorl %ebx,%eax + movl %edx,%esi + roll $5,%edx + pslld $2,%xmm10 + pxor %xmm8,%xmm5 + andl %eax,%edi + xorl %ebx,%eax + movdqa 16(%r11),%xmm8 + xorl %ebx,%edi + addl %edx,%ecx + pxor %xmm10,%xmm5 + rorl $7,%ebp + addl %edi,%ecx + movdqa %xmm3,%xmm6 + addl 32(%rsp),%ebx + xorl %eax,%ebp + movdqa %xmm5,%xmm10 +.byte 102,15,58,15,242,8 + movl %ecx,%edi + roll $5,%ecx + paddd %xmm5,%xmm8 + andl %ebp,%esi + xorl %eax,%ebp + psrldq $4,%xmm10 + xorl %eax,%esi + addl %ecx,%ebx + pxor %xmm2,%xmm6 + rorl $7,%edx + addl %esi,%ebx + pxor %xmm4,%xmm10 + addl 36(%rsp),%eax + xorl %ebp,%edx + movl %ebx,%esi + roll $5,%ebx + pxor %xmm10,%xmm6 + andl %edx,%edi + xorl %ebp,%edx + movdqa %xmm8,16(%rsp) + xorl %ebp,%edi + addl %ebx,%eax + movdqa %xmm6,%xmm9 + movdqa %xmm6,%xmm10 + rorl $7,%ecx + addl %edi,%eax + addl 40(%rsp),%ebp + xorl %edx,%ecx + pslldq $12,%xmm9 + paddd %xmm6,%xmm6 + movl %eax,%edi + roll $5,%eax + andl %ecx,%esi + xorl %edx,%ecx + psrld $31,%xmm10 + xorl %edx,%esi + addl %eax,%ebp + movdqa %xmm9,%xmm8 + rorl $7,%ebx + addl %esi,%ebp + psrld $30,%xmm9 + por %xmm10,%xmm6 + addl 44(%rsp),%edx + xorl %ecx,%ebx + movl %ebp,%esi + roll $5,%ebp + pslld $2,%xmm8 + pxor %xmm9,%xmm6 + andl %ebx,%edi + xorl %ecx,%ebx + movdqa 16(%r11),%xmm9 + xorl %ecx,%edi + addl %ebp,%edx + pxor %xmm8,%xmm6 + rorl $7,%eax + addl %edi,%edx + movdqa %xmm4,%xmm7 + addl 48(%rsp),%ecx + xorl %ebx,%eax + movdqa %xmm6,%xmm8 +.byte 102,15,58,15,251,8 + movl %edx,%edi + roll $5,%edx + paddd %xmm6,%xmm9 + andl %eax,%esi + xorl %ebx,%eax + psrldq $4,%xmm8 + xorl %ebx,%esi + addl %edx,%ecx + pxor %xmm3,%xmm7 + rorl $7,%ebp + addl %esi,%ecx + pxor %xmm5,%xmm8 + addl 52(%rsp),%ebx + xorl %eax,%ebp + movl %ecx,%esi + roll $5,%ecx + pxor %xmm8,%xmm7 + andl %ebp,%edi + xorl %eax,%ebp + movdqa %xmm9,32(%rsp) + xorl %eax,%edi + addl %ecx,%ebx + movdqa %xmm7,%xmm10 + movdqa %xmm7,%xmm8 + rorl $7,%edx + addl %edi,%ebx + addl 56(%rsp),%eax + xorl %ebp,%edx + pslldq $12,%xmm10 + paddd %xmm7,%xmm7 + movl %ebx,%edi + roll $5,%ebx + andl %edx,%esi + xorl %ebp,%edx + psrld $31,%xmm8 + xorl %ebp,%esi + addl %ebx,%eax + movdqa %xmm10,%xmm9 + rorl $7,%ecx + addl %esi,%eax + psrld $30,%xmm10 + por %xmm8,%xmm7 + addl 60(%rsp),%ebp + xorl %edx,%ecx + movl %eax,%esi + roll $5,%eax + pslld $2,%xmm9 + pxor %xmm10,%xmm7 + andl %ecx,%edi + xorl %edx,%ecx + movdqa 16(%r11),%xmm10 + xorl %edx,%edi + addl %eax,%ebp + pxor %xmm9,%xmm7 + rorl $7,%ebx + addl %edi,%ebp + movdqa %xmm7,%xmm9 + addl 0(%rsp),%edx + pxor %xmm4,%xmm0 +.byte 102,68,15,58,15,206,8 + xorl %ecx,%ebx + movl %ebp,%edi + roll $5,%ebp + pxor %xmm1,%xmm0 + andl %ebx,%esi + xorl %ecx,%ebx + movdqa %xmm10,%xmm8 + paddd %xmm7,%xmm10 + xorl %ecx,%esi + addl %ebp,%edx + pxor %xmm9,%xmm0 + rorl $7,%eax + addl %esi,%edx + addl 4(%rsp),%ecx + xorl %ebx,%eax + movdqa %xmm0,%xmm9 + movdqa %xmm10,48(%rsp) + movl %edx,%esi + roll $5,%edx + andl %eax,%edi + xorl %ebx,%eax + pslld $2,%xmm0 + xorl %ebx,%edi + addl %edx,%ecx + psrld $30,%xmm9 + rorl $7,%ebp + addl %edi,%ecx + addl 8(%rsp),%ebx + xorl %eax,%ebp + movl %ecx,%edi + roll $5,%ecx + por %xmm9,%xmm0 + andl %ebp,%esi + xorl %eax,%ebp + movdqa %xmm0,%xmm10 + xorl %eax,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + addl 12(%rsp),%eax + xorl %ebp,%edx + movl %ebx,%esi + roll $5,%ebx + andl %edx,%edi + xorl %ebp,%edx + xorl %ebp,%edi + addl %ebx,%eax + rorl $7,%ecx + addl %edi,%eax + addl 16(%rsp),%ebp + pxor %xmm5,%xmm1 +.byte 102,68,15,58,15,215,8 + xorl %edx,%esi + movl %eax,%edi + roll $5,%eax + pxor %xmm2,%xmm1 + xorl %ecx,%esi + addl %eax,%ebp + movdqa %xmm8,%xmm9 + paddd %xmm0,%xmm8 + rorl $7,%ebx + addl %esi,%ebp + pxor %xmm10,%xmm1 + addl 20(%rsp),%edx + xorl %ecx,%edi + movl %ebp,%esi + roll $5,%ebp + movdqa %xmm1,%xmm10 + movdqa %xmm8,0(%rsp) + xorl %ebx,%edi + addl %ebp,%edx + rorl $7,%eax + addl %edi,%edx + pslld $2,%xmm1 + addl 24(%rsp),%ecx + xorl %ebx,%esi + psrld $30,%xmm10 + movl %edx,%edi + roll $5,%edx + xorl %eax,%esi + addl %edx,%ecx + rorl $7,%ebp + addl %esi,%ecx + por %xmm10,%xmm1 + addl 28(%rsp),%ebx + xorl %eax,%edi + movdqa %xmm1,%xmm8 + movl %ecx,%esi + roll $5,%ecx + xorl %ebp,%edi + addl %ecx,%ebx + rorl $7,%edx + addl %edi,%ebx + addl 32(%rsp),%eax + pxor %xmm6,%xmm2 +.byte 102,68,15,58,15,192,8 + xorl %ebp,%esi + movl %ebx,%edi + roll $5,%ebx + pxor %xmm3,%xmm2 + xorl %edx,%esi + addl %ebx,%eax + movdqa 32(%r11),%xmm10 + paddd %xmm1,%xmm9 + rorl $7,%ecx + addl %esi,%eax + pxor %xmm8,%xmm2 + addl 36(%rsp),%ebp + xorl %edx,%edi + movl %eax,%esi + roll $5,%eax + movdqa %xmm2,%xmm8 + movdqa %xmm9,16(%rsp) + xorl %ecx,%edi + addl %eax,%ebp + rorl $7,%ebx + addl %edi,%ebp + pslld $2,%xmm2 + addl 40(%rsp),%edx + xorl %ecx,%esi + psrld $30,%xmm8 + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%esi + addl %ebp,%edx + rorl $7,%eax + addl %esi,%edx + por %xmm8,%xmm2 + addl 44(%rsp),%ecx + xorl %ebx,%edi + movdqa %xmm2,%xmm9 + movl %edx,%esi + roll $5,%edx + xorl %eax,%edi + addl %edx,%ecx + rorl $7,%ebp + addl %edi,%ecx + addl 48(%rsp),%ebx + pxor %xmm7,%xmm3 +.byte 102,68,15,58,15,201,8 + xorl %eax,%esi + movl %ecx,%edi + roll $5,%ecx + pxor %xmm4,%xmm3 + xorl %ebp,%esi + addl %ecx,%ebx + movdqa %xmm10,%xmm8 + paddd %xmm2,%xmm10 + rorl $7,%edx + addl %esi,%ebx + pxor %xmm9,%xmm3 + addl 52(%rsp),%eax + xorl %ebp,%edi + movl %ebx,%esi + roll $5,%ebx + movdqa %xmm3,%xmm9 + movdqa %xmm10,32(%rsp) + xorl %edx,%edi + addl %ebx,%eax + rorl $7,%ecx + addl %edi,%eax + pslld $2,%xmm3 + addl 56(%rsp),%ebp + xorl %edx,%esi + psrld $30,%xmm9 + movl %eax,%edi + roll $5,%eax + xorl %ecx,%esi + addl %eax,%ebp + rorl $7,%ebx + addl %esi,%ebp + por %xmm9,%xmm3 + addl 60(%rsp),%edx + xorl %ecx,%edi + movdqa %xmm3,%xmm10 + movl %ebp,%esi + roll $5,%ebp + xorl %ebx,%edi + addl %ebp,%edx + rorl $7,%eax + addl %edi,%edx + addl 0(%rsp),%ecx + pxor %xmm0,%xmm4 +.byte 102,68,15,58,15,210,8 + xorl %ebx,%esi + movl %edx,%edi + roll $5,%edx + pxor %xmm5,%xmm4 + xorl %eax,%esi + addl %edx,%ecx + movdqa %xmm8,%xmm9 + paddd %xmm3,%xmm8 + rorl $7,%ebp + addl %esi,%ecx + pxor %xmm10,%xmm4 + addl 4(%rsp),%ebx + xorl %eax,%edi + movl %ecx,%esi + roll $5,%ecx + movdqa %xmm4,%xmm10 + movdqa %xmm8,48(%rsp) + xorl %ebp,%edi + addl %ecx,%ebx + rorl $7,%edx + addl %edi,%ebx + pslld $2,%xmm4 + addl 8(%rsp),%eax + xorl %ebp,%esi + psrld $30,%xmm10 + movl %ebx,%edi + roll $5,%ebx + xorl %edx,%esi + addl %ebx,%eax + rorl $7,%ecx + addl %esi,%eax + por %xmm10,%xmm4 + addl 12(%rsp),%ebp + xorl %edx,%edi + movdqa %xmm4,%xmm8 + movl %eax,%esi + roll $5,%eax + xorl %ecx,%edi + addl %eax,%ebp + rorl $7,%ebx + addl %edi,%ebp + addl 16(%rsp),%edx + pxor %xmm1,%xmm5 +.byte 102,68,15,58,15,195,8 + xorl %ecx,%esi + movl %ebp,%edi + roll $5,%ebp + pxor %xmm6,%xmm5 + xorl %ebx,%esi + addl %ebp,%edx + movdqa %xmm9,%xmm10 + paddd %xmm4,%xmm9 + rorl $7,%eax + addl %esi,%edx + pxor %xmm8,%xmm5 + addl 20(%rsp),%ecx + xorl %ebx,%edi + movl %edx,%esi + roll $5,%edx + movdqa %xmm5,%xmm8 + movdqa %xmm9,0(%rsp) + xorl %eax,%edi + addl %edx,%ecx + rorl $7,%ebp + addl %edi,%ecx + pslld $2,%xmm5 + addl 24(%rsp),%ebx + xorl %eax,%esi + psrld $30,%xmm8 + movl %ecx,%edi + roll $5,%ecx + xorl %ebp,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + por %xmm8,%xmm5 + addl 28(%rsp),%eax + xorl %ebp,%edi + movdqa %xmm5,%xmm9 + movl %ebx,%esi + roll $5,%ebx + xorl %edx,%edi + addl %ebx,%eax + rorl $7,%ecx + addl %edi,%eax + movl %ecx,%edi + pxor %xmm2,%xmm6 +.byte 102,68,15,58,15,204,8 + xorl %edx,%ecx + addl 32(%rsp),%ebp + andl %edx,%edi + pxor %xmm7,%xmm6 + andl %ecx,%esi + rorl $7,%ebx + movdqa %xmm10,%xmm8 + paddd %xmm5,%xmm10 + addl %edi,%ebp + movl %eax,%edi + pxor %xmm9,%xmm6 + roll $5,%eax + addl %esi,%ebp + xorl %edx,%ecx + addl %eax,%ebp + movdqa %xmm6,%xmm9 + movdqa %xmm10,16(%rsp) + movl %ebx,%esi + xorl %ecx,%ebx + addl 36(%rsp),%edx + andl %ecx,%esi + pslld $2,%xmm6 + andl %ebx,%edi + rorl $7,%eax + psrld $30,%xmm9 + addl %esi,%edx + movl %ebp,%esi + roll $5,%ebp + addl %edi,%edx + xorl %ecx,%ebx + addl %ebp,%edx + por %xmm9,%xmm6 + movl %eax,%edi + xorl %ebx,%eax + movdqa %xmm6,%xmm10 + addl 40(%rsp),%ecx + andl %ebx,%edi + andl %eax,%esi + rorl $7,%ebp + addl %edi,%ecx + movl %edx,%edi + roll $5,%edx + addl %esi,%ecx + xorl %ebx,%eax + addl %edx,%ecx + movl %ebp,%esi + xorl %eax,%ebp + addl 44(%rsp),%ebx + andl %eax,%esi + andl %ebp,%edi + rorl $7,%edx + addl %esi,%ebx + movl %ecx,%esi + roll $5,%ecx + addl %edi,%ebx + xorl %eax,%ebp + addl %ecx,%ebx + movl %edx,%edi + pxor %xmm3,%xmm7 +.byte 102,68,15,58,15,213,8 + xorl %ebp,%edx + addl 48(%rsp),%eax + andl %ebp,%edi + pxor %xmm0,%xmm7 + andl %edx,%esi + rorl $7,%ecx + movdqa 48(%r11),%xmm9 + paddd %xmm6,%xmm8 + addl %edi,%eax + movl %ebx,%edi + pxor %xmm10,%xmm7 + roll $5,%ebx + addl %esi,%eax + xorl %ebp,%edx + addl %ebx,%eax + movdqa %xmm7,%xmm10 + movdqa %xmm8,32(%rsp) + movl %ecx,%esi + xorl %edx,%ecx + addl 52(%rsp),%ebp + andl %edx,%esi + pslld $2,%xmm7 + andl %ecx,%edi + rorl $7,%ebx + psrld $30,%xmm10 + addl %esi,%ebp + movl %eax,%esi + roll $5,%eax + addl %edi,%ebp + xorl %edx,%ecx + addl %eax,%ebp + por %xmm10,%xmm7 + movl %ebx,%edi + xorl %ecx,%ebx + movdqa %xmm7,%xmm8 + addl 56(%rsp),%edx + andl %ecx,%edi + andl %ebx,%esi + rorl $7,%eax + addl %edi,%edx + movl %ebp,%edi + roll $5,%ebp + addl %esi,%edx + xorl %ecx,%ebx + addl %ebp,%edx + movl %eax,%esi + xorl %ebx,%eax + addl 60(%rsp),%ecx + andl %ebx,%esi + andl %eax,%edi + rorl $7,%ebp + addl %esi,%ecx + movl %edx,%esi + roll $5,%edx + addl %edi,%ecx + xorl %ebx,%eax + addl %edx,%ecx + movl %ebp,%edi + pxor %xmm4,%xmm0 +.byte 102,68,15,58,15,198,8 + xorl %eax,%ebp + addl 0(%rsp),%ebx + andl %eax,%edi + pxor %xmm1,%xmm0 + andl %ebp,%esi + rorl $7,%edx + movdqa %xmm9,%xmm10 + paddd %xmm7,%xmm9 + addl %edi,%ebx + movl %ecx,%edi + pxor %xmm8,%xmm0 + roll $5,%ecx + addl %esi,%ebx + xorl %eax,%ebp + addl %ecx,%ebx + movdqa %xmm0,%xmm8 + movdqa %xmm9,48(%rsp) + movl %edx,%esi + xorl %ebp,%edx + addl 4(%rsp),%eax + andl %ebp,%esi + pslld $2,%xmm0 + andl %edx,%edi + rorl $7,%ecx + psrld $30,%xmm8 + addl %esi,%eax + movl %ebx,%esi + roll $5,%ebx + addl %edi,%eax + xorl %ebp,%edx + addl %ebx,%eax + por %xmm8,%xmm0 + movl %ecx,%edi + xorl %edx,%ecx + movdqa %xmm0,%xmm9 + addl 8(%rsp),%ebp + andl %edx,%edi + andl %ecx,%esi + rorl $7,%ebx + addl %edi,%ebp + movl %eax,%edi + roll $5,%eax + addl %esi,%ebp + xorl %edx,%ecx + addl %eax,%ebp + movl %ebx,%esi + xorl %ecx,%ebx + addl 12(%rsp),%edx + andl %ecx,%esi + andl %ebx,%edi + rorl $7,%eax + addl %esi,%edx + movl %ebp,%esi + roll $5,%ebp + addl %edi,%edx + xorl %ecx,%ebx + addl %ebp,%edx + movl %eax,%edi + pxor %xmm5,%xmm1 +.byte 102,68,15,58,15,207,8 + xorl %ebx,%eax + addl 16(%rsp),%ecx + andl %ebx,%edi + pxor %xmm2,%xmm1 + andl %eax,%esi + rorl $7,%ebp + movdqa %xmm10,%xmm8 + paddd %xmm0,%xmm10 + addl %edi,%ecx + movl %edx,%edi + pxor %xmm9,%xmm1 + roll $5,%edx + addl %esi,%ecx + xorl %ebx,%eax + addl %edx,%ecx + movdqa %xmm1,%xmm9 + movdqa %xmm10,0(%rsp) + movl %ebp,%esi + xorl %eax,%ebp + addl 20(%rsp),%ebx + andl %eax,%esi + pslld $2,%xmm1 + andl %ebp,%edi + rorl $7,%edx + psrld $30,%xmm9 + addl %esi,%ebx + movl %ecx,%esi + roll $5,%ecx + addl %edi,%ebx + xorl %eax,%ebp + addl %ecx,%ebx + por %xmm9,%xmm1 + movl %edx,%edi + xorl %ebp,%edx + movdqa %xmm1,%xmm10 + addl 24(%rsp),%eax + andl %ebp,%edi + andl %edx,%esi + rorl $7,%ecx + addl %edi,%eax + movl %ebx,%edi + roll $5,%ebx + addl %esi,%eax + xorl %ebp,%edx + addl %ebx,%eax + movl %ecx,%esi + xorl %edx,%ecx + addl 28(%rsp),%ebp + andl %edx,%esi + andl %ecx,%edi + rorl $7,%ebx + addl %esi,%ebp + movl %eax,%esi + roll $5,%eax + addl %edi,%ebp + xorl %edx,%ecx + addl %eax,%ebp + movl %ebx,%edi + pxor %xmm6,%xmm2 +.byte 102,68,15,58,15,208,8 + xorl %ecx,%ebx + addl 32(%rsp),%edx + andl %ecx,%edi + pxor %xmm3,%xmm2 + andl %ebx,%esi + rorl $7,%eax + movdqa %xmm8,%xmm9 + paddd %xmm1,%xmm8 + addl %edi,%edx + movl %ebp,%edi + pxor %xmm10,%xmm2 + roll $5,%ebp + addl %esi,%edx + xorl %ecx,%ebx + addl %ebp,%edx + movdqa %xmm2,%xmm10 + movdqa %xmm8,16(%rsp) + movl %eax,%esi + xorl %ebx,%eax + addl 36(%rsp),%ecx + andl %ebx,%esi + pslld $2,%xmm2 + andl %eax,%edi + rorl $7,%ebp + psrld $30,%xmm10 + addl %esi,%ecx + movl %edx,%esi + roll $5,%edx + addl %edi,%ecx + xorl %ebx,%eax + addl %edx,%ecx + por %xmm10,%xmm2 + movl %ebp,%edi + xorl %eax,%ebp + movdqa %xmm2,%xmm8 + addl 40(%rsp),%ebx + andl %eax,%edi + andl %ebp,%esi + rorl $7,%edx + addl %edi,%ebx + movl %ecx,%edi + roll $5,%ecx + addl %esi,%ebx + xorl %eax,%ebp + addl %ecx,%ebx + movl %edx,%esi + xorl %ebp,%edx + addl 44(%rsp),%eax + andl %ebp,%esi + andl %edx,%edi + rorl $7,%ecx + addl %esi,%eax + movl %ebx,%esi + roll $5,%ebx + addl %edi,%eax + xorl %ebp,%edx + addl %ebx,%eax + addl 48(%rsp),%ebp + pxor %xmm7,%xmm3 +.byte 102,68,15,58,15,193,8 + xorl %edx,%esi + movl %eax,%edi + roll $5,%eax + pxor %xmm4,%xmm3 + xorl %ecx,%esi + addl %eax,%ebp + movdqa %xmm9,%xmm10 + paddd %xmm2,%xmm9 + rorl $7,%ebx + addl %esi,%ebp + pxor %xmm8,%xmm3 + addl 52(%rsp),%edx + xorl %ecx,%edi + movl %ebp,%esi + roll $5,%ebp + movdqa %xmm3,%xmm8 + movdqa %xmm9,32(%rsp) + xorl %ebx,%edi + addl %ebp,%edx + rorl $7,%eax + addl %edi,%edx + pslld $2,%xmm3 + addl 56(%rsp),%ecx + xorl %ebx,%esi + psrld $30,%xmm8 + movl %edx,%edi + roll $5,%edx + xorl %eax,%esi + addl %edx,%ecx + rorl $7,%ebp + addl %esi,%ecx + por %xmm8,%xmm3 + addl 60(%rsp),%ebx + xorl %eax,%edi + movl %ecx,%esi + roll $5,%ecx + xorl %ebp,%edi + addl %ecx,%ebx + rorl $7,%edx + addl %edi,%ebx + addl 0(%rsp),%eax + paddd %xmm3,%xmm10 + xorl %ebp,%esi + movl %ebx,%edi + roll $5,%ebx + xorl %edx,%esi + movdqa %xmm10,48(%rsp) + addl %ebx,%eax + rorl $7,%ecx + addl %esi,%eax + addl 4(%rsp),%ebp + xorl %edx,%edi + movl %eax,%esi + roll $5,%eax + xorl %ecx,%edi + addl %eax,%ebp + rorl $7,%ebx + addl %edi,%ebp + addl 8(%rsp),%edx + xorl %ecx,%esi + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%esi + addl %ebp,%edx + rorl $7,%eax + addl %esi,%edx + addl 12(%rsp),%ecx + xorl %ebx,%edi + movl %edx,%esi + roll $5,%edx + xorl %eax,%edi + addl %edx,%ecx + rorl $7,%ebp + addl %edi,%ecx + cmpq %r10,%r9 + je .Ldone_ssse3 + movdqa 64(%r11),%xmm6 + movdqa 0(%r11),%xmm9 + movdqu 0(%r9),%xmm0 + movdqu 16(%r9),%xmm1 + movdqu 32(%r9),%xmm2 + movdqu 48(%r9),%xmm3 +.byte 102,15,56,0,198 + addq $64,%r9 + addl 16(%rsp),%ebx + xorl %eax,%esi +.byte 102,15,56,0,206 + movl %ecx,%edi + roll $5,%ecx + paddd %xmm9,%xmm0 + xorl %ebp,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + movdqa %xmm0,0(%rsp) + addl 20(%rsp),%eax + xorl %ebp,%edi + psubd %xmm9,%xmm0 + movl %ebx,%esi + roll $5,%ebx + xorl %edx,%edi + addl %ebx,%eax + rorl $7,%ecx + addl %edi,%eax + addl 24(%rsp),%ebp + xorl %edx,%esi + movl %eax,%edi + roll $5,%eax + xorl %ecx,%esi + addl %eax,%ebp + rorl $7,%ebx + addl %esi,%ebp + addl 28(%rsp),%edx + xorl %ecx,%edi + movl %ebp,%esi + roll $5,%ebp + xorl %ebx,%edi + addl %ebp,%edx + rorl $7,%eax + addl %edi,%edx + addl 32(%rsp),%ecx + xorl %ebx,%esi +.byte 102,15,56,0,214 + movl %edx,%edi + roll $5,%edx + paddd %xmm9,%xmm1 + xorl %eax,%esi + addl %edx,%ecx + rorl $7,%ebp + addl %esi,%ecx + movdqa %xmm1,16(%rsp) + addl 36(%rsp),%ebx + xorl %eax,%edi + psubd %xmm9,%xmm1 + movl %ecx,%esi + roll $5,%ecx + xorl %ebp,%edi + addl %ecx,%ebx + rorl $7,%edx + addl %edi,%ebx + addl 40(%rsp),%eax + xorl %ebp,%esi + movl %ebx,%edi + roll $5,%ebx + xorl %edx,%esi + addl %ebx,%eax + rorl $7,%ecx + addl %esi,%eax + addl 44(%rsp),%ebp + xorl %edx,%edi + movl %eax,%esi + roll $5,%eax + xorl %ecx,%edi + addl %eax,%ebp + rorl $7,%ebx + addl %edi,%ebp + addl 48(%rsp),%edx + xorl %ecx,%esi +.byte 102,15,56,0,222 + movl %ebp,%edi + roll $5,%ebp + paddd %xmm9,%xmm2 + xorl %ebx,%esi + addl %ebp,%edx + rorl $7,%eax + addl %esi,%edx + movdqa %xmm2,32(%rsp) + addl 52(%rsp),%ecx + xorl %ebx,%edi + psubd %xmm9,%xmm2 + movl %edx,%esi + roll $5,%edx + xorl %eax,%edi + addl %edx,%ecx + rorl $7,%ebp + addl %edi,%ecx + addl 56(%rsp),%ebx + xorl %eax,%esi + movl %ecx,%edi + roll $5,%ecx + xorl %ebp,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + addl 60(%rsp),%eax + xorl %ebp,%edi + movl %ebx,%esi + roll $5,%ebx + xorl %edx,%edi + addl %ebx,%eax + rorl $7,%ecx + addl %edi,%eax + addl 0(%r8),%eax + addl 4(%r8),%esi + addl 8(%r8),%ecx + addl 12(%r8),%edx + movl %eax,0(%r8) + addl 16(%r8),%ebp + movl %esi,4(%r8) + movl %esi,%ebx + movl %ecx,8(%r8) + movl %edx,12(%r8) + movl %ebp,16(%r8) + jmp .Loop_ssse3 + +.align 16 +.Ldone_ssse3: + addl 16(%rsp),%ebx + xorl %eax,%esi + movl %ecx,%edi + roll $5,%ecx + xorl %ebp,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + addl 20(%rsp),%eax + xorl %ebp,%edi + movl %ebx,%esi + roll $5,%ebx + xorl %edx,%edi + addl %ebx,%eax + rorl $7,%ecx + addl %edi,%eax + addl 24(%rsp),%ebp + xorl %edx,%esi + movl %eax,%edi + roll $5,%eax + xorl %ecx,%esi + addl %eax,%ebp + rorl $7,%ebx + addl %esi,%ebp + addl 28(%rsp),%edx + xorl %ecx,%edi + movl %ebp,%esi + roll $5,%ebp + xorl %ebx,%edi + addl %ebp,%edx + rorl $7,%eax + addl %edi,%edx + addl 32(%rsp),%ecx + xorl %ebx,%esi + movl %edx,%edi + roll $5,%edx + xorl %eax,%esi + addl %edx,%ecx + rorl $7,%ebp + addl %esi,%ecx + addl 36(%rsp),%ebx + xorl %eax,%edi + movl %ecx,%esi + roll $5,%ecx + xorl %ebp,%edi + addl %ecx,%ebx + rorl $7,%edx + addl %edi,%ebx + addl 40(%rsp),%eax + xorl %ebp,%esi + movl %ebx,%edi + roll $5,%ebx + xorl %edx,%esi + addl %ebx,%eax + rorl $7,%ecx + addl %esi,%eax + addl 44(%rsp),%ebp + xorl %edx,%edi + movl %eax,%esi + roll $5,%eax + xorl %ecx,%edi + addl %eax,%ebp + rorl $7,%ebx + addl %edi,%ebp + addl 48(%rsp),%edx + xorl %ecx,%esi + movl %ebp,%edi + roll $5,%ebp + xorl %ebx,%esi + addl %ebp,%edx + rorl $7,%eax + addl %esi,%edx + addl 52(%rsp),%ecx + xorl %ebx,%edi + movl %edx,%esi + roll $5,%edx + xorl %eax,%edi + addl %edx,%ecx + rorl $7,%ebp + addl %edi,%ecx + addl 56(%rsp),%ebx + xorl %eax,%esi + movl %ecx,%edi + roll $5,%ecx + xorl %ebp,%esi + addl %ecx,%ebx + rorl $7,%edx + addl %esi,%ebx + addl 60(%rsp),%eax + xorl %ebp,%edi + movl %ebx,%esi + roll $5,%ebx + xorl %edx,%edi + addl %ebx,%eax + rorl $7,%ecx + addl %edi,%eax + addl 0(%r8),%eax + addl 4(%r8),%esi + addl 8(%r8),%ecx + movl %eax,0(%r8) + addl 12(%r8),%edx + movl %esi,4(%r8) + addl 16(%r8),%ebp + movl %ecx,8(%r8) + movl %edx,12(%r8) + movl %ebp,16(%r8) + leaq 64(%rsp),%rsi + movq 0(%rsi),%r12 + movq 8(%rsi),%rbp + movq 16(%rsi),%rbx + leaq 24(%rsi),%rsp +.Lepilogue_ssse3: + .byte 0xf3,0xc3 +.size sha1_block_data_order_ssse3,.-sha1_block_data_order_ssse3 +.align 64 +K_XX_XX: +.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 +.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 +.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc +.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 +.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f +.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 +.align 64 diff --git a/app/openssl/crypto/sha/asm/sha1-x86_64.pl b/app/openssl/crypto/sha/asm/sha1-x86_64.pl index 4edc5ea9..f15c7ec3 100755 --- a/app/openssl/crypto/sha/asm/sha1-x86_64.pl +++ b/app/openssl/crypto/sha/asm/sha1-x86_64.pl @@ -16,7 +16,7 @@ # There was suggestion to mechanically translate 32-bit code, but I # dismissed it, reasoning that x86_64 offers enough register bank # capacity to fully utilize SHA-1 parallelism. Therefore this fresh -# implementation:-) However! While 64-bit code does performs better +# implementation:-) However! While 64-bit code does perform better # on Opteron, I failed to beat 32-bit assembler on EM64T core. Well, # x86_64 does offer larger *addressable* bank, but out-of-order core # reaches for even more registers through dynamic aliasing, and EM64T @@ -29,6 +29,38 @@ # Xeon P4 +65% +0% 9.9 # Core2 +60% +10% 7.0 +# August 2009. +# +# The code was revised to minimize code size and to maximize +# "distance" between instructions producing input to 'lea' +# instruction and the 'lea' instruction itself, which is essential +# for Intel Atom core. + +# October 2010. +# +# Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it +# is to offload message schedule denoted by Wt in NIST specification, +# or Xupdate in OpenSSL source, to SIMD unit. See sha1-586.pl module +# for background and implementation details. The only difference from +# 32-bit code is that 64-bit code doesn't have to spill @X[] elements +# to free temporary registers. + +# April 2011. +# +# Add AVX code path. See sha1-586.pl for further information. + +###################################################################### +# Current performance is summarized in following table. Numbers are +# CPU clock cycles spent to process single byte (less is better). +# +# x86_64 SSSE3 AVX +# P4 9.8 - +# Opteron 6.6 - +# Core2 6.7 6.1/+10% - +# Atom 11.0 9.7/+13% - +# Westmere 7.1 5.6/+27% - +# Sandy Bridge 7.9 6.3/+25% 5.2/+51% + $flavour = shift; $output = shift; if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } @@ -40,7 +72,18 @@ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or die "can't locate x86_64-xlate.pl"; -open STDOUT,"| $^X $xlate $flavour $output"; +$avx=1 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` + =~ /GNU assembler version ([2-9]\.[0-9]+)/ && + $1>=2.19); +$avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && + `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ && + $1>=2.09); +$avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && + `ml64 2>&1` =~ /Version ([0-9]+)\./ && + $1>=10); + +open OUT,"| \"$^X\" $xlate $flavour $output"; +*STDOUT=*OUT; $ctx="%rdi"; # 1st arg $inp="%rsi"; # 2nd arg @@ -51,196 +94,994 @@ $ctx="%r8"; $inp="%r9"; $num="%r10"; -$xi="%eax"; -$t0="%ebx"; -$t1="%ecx"; -$A="%edx"; -$B="%esi"; -$C="%edi"; -$D="%ebp"; -$E="%r11d"; -$T="%r12d"; - -@V=($A,$B,$C,$D,$E,$T); +$t0="%eax"; +$t1="%ebx"; +$t2="%ecx"; +@xi=("%edx","%ebp"); +$A="%esi"; +$B="%edi"; +$C="%r11d"; +$D="%r12d"; +$E="%r13d"; -sub PROLOGUE { -my $func=shift; -$code.=<<___; -.globl $func -.type $func,\@function,3 -.align 16 -$func: - push %rbx - push %rbp - push %r12 - mov %rsp,%r11 - mov %rdi,$ctx # reassigned argument - sub \$`8+16*4`,%rsp - mov %rsi,$inp # reassigned argument - and \$-64,%rsp - mov %rdx,$num # reassigned argument - mov %r11,`16*4`(%rsp) -.Lprologue: - - mov 0($ctx),$A - mov 4($ctx),$B - mov 8($ctx),$C - mov 12($ctx),$D - mov 16($ctx),$E -___ -} - -sub EPILOGUE { -my $func=shift; -$code.=<<___; - mov `16*4`(%rsp),%rsi - mov (%rsi),%r12 - mov 8(%rsi),%rbp - mov 16(%rsi),%rbx - lea 24(%rsi),%rsp -.Lepilogue: - ret -.size $func,.-$func -___ -} +@V=($A,$B,$C,$D,$E); sub BODY_00_19 { -my ($i,$a,$b,$c,$d,$e,$f,$host)=@_; +my ($i,$a,$b,$c,$d,$e)=@_; my $j=$i+1; $code.=<<___ if ($i==0); - mov `4*$i`($inp),$xi - `"bswap $xi" if(!defined($host))` - mov $xi,`4*$i`(%rsp) + mov `4*$i`($inp),$xi[0] + bswap $xi[0] + mov $xi[0],`4*$i`(%rsp) ___ $code.=<<___ if ($i<15); - lea 0x5a827999($xi,$e),$f mov $c,$t0 - mov `4*$j`($inp),$xi - mov $a,$e + mov `4*$j`($inp),$xi[1] + mov $a,$t2 xor $d,$t0 - `"bswap $xi" if(!defined($host))` - rol \$5,$e + bswap $xi[1] + rol \$5,$t2 + lea 0x5a827999($xi[0],$e),$e and $b,$t0 - mov $xi,`4*$j`(%rsp) - add $e,$f + mov $xi[1],`4*$j`(%rsp) + add $t2,$e xor $d,$t0 rol \$30,$b - add $t0,$f + add $t0,$e ___ $code.=<<___ if ($i>=15); - lea 0x5a827999($xi,$e),$f - mov `4*($j%16)`(%rsp),$xi + mov `4*($j%16)`(%rsp),$xi[1] mov $c,$t0 - mov $a,$e - xor `4*(($j+2)%16)`(%rsp),$xi + mov $a,$t2 + xor `4*(($j+2)%16)`(%rsp),$xi[1] xor $d,$t0 - rol \$5,$e - xor `4*(($j+8)%16)`(%rsp),$xi + rol \$5,$t2 + xor `4*(($j+8)%16)`(%rsp),$xi[1] and $b,$t0 - add $e,$f - xor `4*(($j+13)%16)`(%rsp),$xi + lea 0x5a827999($xi[0],$e),$e + xor `4*(($j+13)%16)`(%rsp),$xi[1] xor $d,$t0 + rol \$1,$xi[1] + add $t2,$e rol \$30,$b - add $t0,$f - rol \$1,$xi - mov $xi,`4*($j%16)`(%rsp) + mov $xi[1],`4*($j%16)`(%rsp) + add $t0,$e ___ +unshift(@xi,pop(@xi)); } sub BODY_20_39 { -my ($i,$a,$b,$c,$d,$e,$f)=@_; +my ($i,$a,$b,$c,$d,$e)=@_; my $j=$i+1; my $K=($i<40)?0x6ed9eba1:0xca62c1d6; $code.=<<___ if ($i<79); - lea $K($xi,$e),$f - mov `4*($j%16)`(%rsp),$xi + mov `4*($j%16)`(%rsp),$xi[1] mov $c,$t0 - mov $a,$e - xor `4*(($j+2)%16)`(%rsp),$xi + mov $a,$t2 + xor `4*(($j+2)%16)`(%rsp),$xi[1] xor $b,$t0 - rol \$5,$e - xor `4*(($j+8)%16)`(%rsp),$xi + rol \$5,$t2 + lea $K($xi[0],$e),$e + xor `4*(($j+8)%16)`(%rsp),$xi[1] xor $d,$t0 - add $e,$f - xor `4*(($j+13)%16)`(%rsp),$xi + add $t2,$e + xor `4*(($j+13)%16)`(%rsp),$xi[1] rol \$30,$b - add $t0,$f - rol \$1,$xi + add $t0,$e + rol \$1,$xi[1] ___ $code.=<<___ if ($i<76); - mov $xi,`4*($j%16)`(%rsp) + mov $xi[1],`4*($j%16)`(%rsp) ___ $code.=<<___ if ($i==79); - lea $K($xi,$e),$f mov $c,$t0 - mov $a,$e + mov $a,$t2 xor $b,$t0 - rol \$5,$e + lea $K($xi[0],$e),$e + rol \$5,$t2 xor $d,$t0 - add $e,$f + add $t2,$e rol \$30,$b - add $t0,$f + add $t0,$e ___ +unshift(@xi,pop(@xi)); } sub BODY_40_59 { -my ($i,$a,$b,$c,$d,$e,$f)=@_; +my ($i,$a,$b,$c,$d,$e)=@_; my $j=$i+1; $code.=<<___; - lea 0x8f1bbcdc($xi,$e),$f - mov `4*($j%16)`(%rsp),$xi - mov $b,$t0 - mov $b,$t1 - xor `4*(($j+2)%16)`(%rsp),$xi - mov $a,$e - and $c,$t0 - xor `4*(($j+8)%16)`(%rsp),$xi - or $c,$t1 - rol \$5,$e - xor `4*(($j+13)%16)`(%rsp),$xi - and $d,$t1 - add $e,$f - rol \$1,$xi - or $t1,$t0 + mov `4*($j%16)`(%rsp),$xi[1] + mov $c,$t0 + mov $c,$t1 + xor `4*(($j+2)%16)`(%rsp),$xi[1] + and $d,$t0 + mov $a,$t2 + xor `4*(($j+8)%16)`(%rsp),$xi[1] + xor $d,$t1 + lea 0x8f1bbcdc($xi[0],$e),$e + rol \$5,$t2 + xor `4*(($j+13)%16)`(%rsp),$xi[1] + add $t0,$e + and $b,$t1 + rol \$1,$xi[1] + add $t1,$e rol \$30,$b - mov $xi,`4*($j%16)`(%rsp) - add $t0,$f + mov $xi[1],`4*($j%16)`(%rsp) + add $t2,$e ___ +unshift(@xi,pop(@xi)); } -$code=".text\n"; +$code.=<<___; +.text +.extern OPENSSL_ia32cap_P -&PROLOGUE("sha1_block_data_order"); -$code.=".align 4\n.Lloop:\n"; +.globl sha1_block_data_order +.type sha1_block_data_order,\@function,3 +.align 16 +sha1_block_data_order: + mov OPENSSL_ia32cap_P+0(%rip),%r9d + mov OPENSSL_ia32cap_P+4(%rip),%r8d + test \$`1<<9`,%r8d # check SSSE3 bit + jz .Lialu +___ +$code.=<<___ if ($avx); + and \$`1<<28`,%r8d # mask AVX bit + and \$`1<<30`,%r9d # mask "Intel CPU" bit + or %r9d,%r8d + cmp \$`1<<28|1<<30`,%r8d + je _avx_shortcut +___ +$code.=<<___; + jmp _ssse3_shortcut + +.align 16 +.Lialu: + push %rbx + push %rbp + push %r12 + push %r13 + mov %rsp,%r11 + mov %rdi,$ctx # reassigned argument + sub \$`8+16*4`,%rsp + mov %rsi,$inp # reassigned argument + and \$-64,%rsp + mov %rdx,$num # reassigned argument + mov %r11,`16*4`(%rsp) +.Lprologue: + + mov 0($ctx),$A + mov 4($ctx),$B + mov 8($ctx),$C + mov 12($ctx),$D + mov 16($ctx),$E + jmp .Lloop + +.align 16 +.Lloop: +___ for($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); } for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } $code.=<<___; - add 0($ctx),$E - add 4($ctx),$T - add 8($ctx),$A - add 12($ctx),$B - add 16($ctx),$C - mov $E,0($ctx) - mov $T,4($ctx) - mov $A,8($ctx) - mov $B,12($ctx) - mov $C,16($ctx) - - xchg $E,$A # mov $E,$A - xchg $T,$B # mov $T,$B - xchg $E,$C # mov $A,$C - xchg $T,$D # mov $B,$D - # mov $C,$E - lea `16*4`($inp),$inp + add 0($ctx),$A + add 4($ctx),$B + add 8($ctx),$C + add 12($ctx),$D + add 16($ctx),$E + mov $A,0($ctx) + mov $B,4($ctx) + mov $C,8($ctx) + mov $D,12($ctx) + mov $E,16($ctx) + sub \$1,$num + lea `16*4`($inp),$inp jnz .Lloop + + mov `16*4`(%rsp),%rsi + mov (%rsi),%r13 + mov 8(%rsi),%r12 + mov 16(%rsi),%rbp + mov 24(%rsi),%rbx + lea 32(%rsi),%rsp +.Lepilogue: + ret +.size sha1_block_data_order,.-sha1_block_data_order ___ -&EPILOGUE("sha1_block_data_order"); +{{{ +my $Xi=4; +my @X=map("%xmm$_",(4..7,0..3)); +my @Tx=map("%xmm$_",(8..10)); +my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization +my @T=("%esi","%edi"); +my $j=0; +my $K_XX_XX="%r11"; + +my $_rol=sub { &rol(@_) }; +my $_ror=sub { &ror(@_) }; + +$code.=<<___; +.type sha1_block_data_order_ssse3,\@function,3 +.align 16 +sha1_block_data_order_ssse3: +_ssse3_shortcut: + push %rbx + push %rbp + push %r12 + lea `-64-($win64?5*16:0)`(%rsp),%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,64+0(%rsp) + movaps %xmm7,64+16(%rsp) + movaps %xmm8,64+32(%rsp) + movaps %xmm9,64+48(%rsp) + movaps %xmm10,64+64(%rsp) +.Lprologue_ssse3: +___ +$code.=<<___; + mov %rdi,$ctx # reassigned argument + mov %rsi,$inp # reassigned argument + mov %rdx,$num # reassigned argument + + shl \$6,$num + add $inp,$num + lea K_XX_XX(%rip),$K_XX_XX + + mov 0($ctx),$A # load context + mov 4($ctx),$B + mov 8($ctx),$C + mov 12($ctx),$D + mov $B,@T[0] # magic seed + mov 16($ctx),$E + + movdqa 64($K_XX_XX),@X[2] # pbswap mask + movdqa 0($K_XX_XX),@Tx[1] # K_00_19 + movdqu 0($inp),@X[-4&7] # load input to %xmm[0-3] + movdqu 16($inp),@X[-3&7] + movdqu 32($inp),@X[-2&7] + movdqu 48($inp),@X[-1&7] + pshufb @X[2],@X[-4&7] # byte swap + add \$64,$inp + pshufb @X[2],@X[-3&7] + pshufb @X[2],@X[-2&7] + pshufb @X[2],@X[-1&7] + paddd @Tx[1],@X[-4&7] # add K_00_19 + paddd @Tx[1],@X[-3&7] + paddd @Tx[1],@X[-2&7] + movdqa @X[-4&7],0(%rsp) # X[]+K xfer to IALU + psubd @Tx[1],@X[-4&7] # restore X[] + movdqa @X[-3&7],16(%rsp) + psubd @Tx[1],@X[-3&7] + movdqa @X[-2&7],32(%rsp) + psubd @Tx[1],@X[-2&7] + jmp .Loop_ssse3 +___ + +sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm +{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; + my $arg = pop; + $arg = "\$$arg" if ($arg*1 eq $arg); + $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; +} + +sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4 +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 40 instructions + my ($a,$b,$c,$d,$e); + + &movdqa (@X[0],@X[-3&7]); + eval(shift(@insns)); + eval(shift(@insns)); + &movdqa (@Tx[0],@X[-1&7]); + &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]" + eval(shift(@insns)); + eval(shift(@insns)); + + &paddd (@Tx[1],@X[-1&7]); + eval(shift(@insns)); + eval(shift(@insns)); + &psrldq (@Tx[0],4); # "X[-3]", 3 dwords + eval(shift(@insns)); + eval(shift(@insns)); + &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]" + eval(shift(@insns)); + eval(shift(@insns)); + + &pxor (@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]" + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]" + eval(shift(@insns)); + eval(shift(@insns)); + &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + + &movdqa (@Tx[2],@X[0]); + &movdqa (@Tx[0],@X[0]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &pslldq (@Tx[2],12); # "X[0]"<<96, extract one dword + &paddd (@X[0],@X[0]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &psrld (@Tx[0],31); + eval(shift(@insns)); + eval(shift(@insns)); + &movdqa (@Tx[1],@Tx[2]); + eval(shift(@insns)); + eval(shift(@insns)); + + &psrld (@Tx[2],30); + &por (@X[0],@Tx[0]); # "X[0]"<<<=1 + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &pslld (@Tx[1],2); + &pxor (@X[0],@Tx[2]); + eval(shift(@insns)); + eval(shift(@insns)); + &movdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX + eval(shift(@insns)); + eval(shift(@insns)); + + &pxor (@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2 + + foreach (@insns) { eval; } # remaining instructions [if any] + + $Xi++; push(@X,shift(@X)); # "rotate" X[] + push(@Tx,shift(@Tx)); +} + +sub Xupdate_ssse3_32_79() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions + my ($a,$b,$c,$d,$e); + + &movdqa (@Tx[0],@X[-1&7]) if ($Xi==8); + eval(shift(@insns)); # body_20_39 + &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" + &palignr(@Tx[0],@X[-2&7],8); # compose "X[-6]" + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + + &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]" + eval(shift(@insns)); + eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/); + if ($Xi%5) { + &movdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX... + } else { # ... or load next one + &movdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)"); + } + &paddd (@Tx[1],@X[-1&7]); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-6]" + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + + &movdqa (@Tx[0],@X[0]); + &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &pslld (@X[0],2); + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + &psrld (@Tx[0],30); + eval(shift(@insns)); + eval(shift(@insns)); # rol + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &por (@X[0],@Tx[0]); # "X[0]"<<<=2 + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + &movdqa (@Tx[1],@X[0]) if ($Xi<19); + eval(shift(@insns)); + eval(shift(@insns)); # rol + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + eval(shift(@insns)); + + foreach (@insns) { eval; } # remaining instructions + + $Xi++; push(@X,shift(@X)); # "rotate" X[] + push(@Tx,shift(@Tx)); +} + +sub Xuplast_ssse3_80() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + eval(shift(@insns)); + &paddd (@Tx[1],@X[-1&7]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU + + foreach (@insns) { eval; } # remaining instructions + + &cmp ($inp,$num); + &je (".Ldone_ssse3"); + + unshift(@Tx,pop(@Tx)); + + &movdqa (@X[2],"64($K_XX_XX)"); # pbswap mask + &movdqa (@Tx[1],"0($K_XX_XX)"); # K_00_19 + &movdqu (@X[-4&7],"0($inp)"); # load input + &movdqu (@X[-3&7],"16($inp)"); + &movdqu (@X[-2&7],"32($inp)"); + &movdqu (@X[-1&7],"48($inp)"); + &pshufb (@X[-4&7],@X[2]); # byte swap + &add ($inp,64); + + $Xi=0; +} + +sub Xloop_ssse3() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + eval(shift(@insns)); + eval(shift(@insns)); + &pshufb (@X[($Xi-3)&7],@X[2]); + eval(shift(@insns)); + eval(shift(@insns)); + &paddd (@X[($Xi-4)&7],@Tx[1]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &movdqa (eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + &psubd (@X[($Xi-4)&7],@Tx[1]); + + foreach (@insns) { eval; } + $Xi++; +} + +sub Xtail_ssse3() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + foreach (@insns) { eval; } +} + +sub body_00_19 () { + ( + '($a,$b,$c,$d,$e)=@V;'. + '&add ($e,eval(4*($j&15))."(%rsp)");', # X[]+K xfer + '&xor ($c,$d);', + '&mov (@T[1],$a);', # $b in next round + '&$_rol ($a,5);', + '&and (@T[0],$c);', # ($b&($c^$d)) + '&xor ($c,$d);', # restore $c + '&xor (@T[0],$d);', + '&add ($e,$a);', + '&$_ror ($b,$j?7:2);', # $b>>>2 + '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' + ); +} + +sub body_20_39 () { + ( + '($a,$b,$c,$d,$e)=@V;'. + '&add ($e,eval(4*($j++&15))."(%rsp)");', # X[]+K xfer + '&xor (@T[0],$d);', # ($b^$d) + '&mov (@T[1],$a);', # $b in next round + '&$_rol ($a,5);', + '&xor (@T[0],$c);', # ($b^$d^$c) + '&add ($e,$a);', + '&$_ror ($b,7);', # $b>>>2 + '&add ($e,@T[0]);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));' + ); +} + +sub body_40_59 () { + ( + '($a,$b,$c,$d,$e)=@V;'. + '&mov (@T[1],$c);', + '&xor ($c,$d);', + '&add ($e,eval(4*($j++&15))."(%rsp)");', # X[]+K xfer + '&and (@T[1],$d);', + '&and (@T[0],$c);', # ($b&($c^$d)) + '&$_ror ($b,7);', # $b>>>2 + '&add ($e,@T[1]);', + '&mov (@T[1],$a);', # $b in next round + '&$_rol ($a,5);', + '&add ($e,@T[0]);', + '&xor ($c,$d);', # restore $c + '&add ($e,$a);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));' + ); +} $code.=<<___; -.asciz "SHA1 block transform for x86_64, CRYPTOGAMS by " .align 16 +.Loop_ssse3: +___ + &Xupdate_ssse3_16_31(\&body_00_19); + &Xupdate_ssse3_16_31(\&body_00_19); + &Xupdate_ssse3_16_31(\&body_00_19); + &Xupdate_ssse3_16_31(\&body_00_19); + &Xupdate_ssse3_32_79(\&body_00_19); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xupdate_ssse3_32_79(\&body_40_59); + &Xupdate_ssse3_32_79(\&body_40_59); + &Xupdate_ssse3_32_79(\&body_40_59); + &Xupdate_ssse3_32_79(\&body_40_59); + &Xupdate_ssse3_32_79(\&body_40_59); + &Xupdate_ssse3_32_79(\&body_20_39); + &Xuplast_ssse3_80(\&body_20_39); # can jump to "done" + + $saved_j=$j; @saved_V=@V; + + &Xloop_ssse3(\&body_20_39); + &Xloop_ssse3(\&body_20_39); + &Xloop_ssse3(\&body_20_39); + +$code.=<<___; + add 0($ctx),$A # update context + add 4($ctx),@T[0] + add 8($ctx),$C + add 12($ctx),$D + mov $A,0($ctx) + add 16($ctx),$E + mov @T[0],4($ctx) + mov @T[0],$B # magic seed + mov $C,8($ctx) + mov $D,12($ctx) + mov $E,16($ctx) + jmp .Loop_ssse3 + +.align 16 +.Ldone_ssse3: +___ + $j=$saved_j; @V=@saved_V; + + &Xtail_ssse3(\&body_20_39); + &Xtail_ssse3(\&body_20_39); + &Xtail_ssse3(\&body_20_39); + +$code.=<<___; + add 0($ctx),$A # update context + add 4($ctx),@T[0] + add 8($ctx),$C + mov $A,0($ctx) + add 12($ctx),$D + mov @T[0],4($ctx) + add 16($ctx),$E + mov $C,8($ctx) + mov $D,12($ctx) + mov $E,16($ctx) +___ +$code.=<<___ if ($win64); + movaps 64+0(%rsp),%xmm6 + movaps 64+16(%rsp),%xmm7 + movaps 64+32(%rsp),%xmm8 + movaps 64+48(%rsp),%xmm9 + movaps 64+64(%rsp),%xmm10 +___ +$code.=<<___; + lea `64+($win64?5*16:0)`(%rsp),%rsi + mov 0(%rsi),%r12 + mov 8(%rsi),%rbp + mov 16(%rsi),%rbx + lea 24(%rsi),%rsp +.Lepilogue_ssse3: + ret +.size sha1_block_data_order_ssse3,.-sha1_block_data_order_ssse3 +___ + +if ($avx) { +my $Xi=4; +my @X=map("%xmm$_",(4..7,0..3)); +my @Tx=map("%xmm$_",(8..10)); +my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization +my @T=("%esi","%edi"); +my $j=0; +my $K_XX_XX="%r11"; + +my $_rol=sub { &shld(@_[0],@_) }; +my $_ror=sub { &shrd(@_[0],@_) }; + +$code.=<<___; +.type sha1_block_data_order_avx,\@function,3 +.align 16 +sha1_block_data_order_avx: +_avx_shortcut: + push %rbx + push %rbp + push %r12 + lea `-64-($win64?5*16:0)`(%rsp),%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,64+0(%rsp) + movaps %xmm7,64+16(%rsp) + movaps %xmm8,64+32(%rsp) + movaps %xmm9,64+48(%rsp) + movaps %xmm10,64+64(%rsp) +.Lprologue_avx: +___ +$code.=<<___; + mov %rdi,$ctx # reassigned argument + mov %rsi,$inp # reassigned argument + mov %rdx,$num # reassigned argument + vzeroupper + + shl \$6,$num + add $inp,$num + lea K_XX_XX(%rip),$K_XX_XX + + mov 0($ctx),$A # load context + mov 4($ctx),$B + mov 8($ctx),$C + mov 12($ctx),$D + mov $B,@T[0] # magic seed + mov 16($ctx),$E + + vmovdqa 64($K_XX_XX),@X[2] # pbswap mask + vmovdqa 0($K_XX_XX),@Tx[1] # K_00_19 + vmovdqu 0($inp),@X[-4&7] # load input to %xmm[0-3] + vmovdqu 16($inp),@X[-3&7] + vmovdqu 32($inp),@X[-2&7] + vmovdqu 48($inp),@X[-1&7] + vpshufb @X[2],@X[-4&7],@X[-4&7] # byte swap + add \$64,$inp + vpshufb @X[2],@X[-3&7],@X[-3&7] + vpshufb @X[2],@X[-2&7],@X[-2&7] + vpshufb @X[2],@X[-1&7],@X[-1&7] + vpaddd @Tx[1],@X[-4&7],@X[0] # add K_00_19 + vpaddd @Tx[1],@X[-3&7],@X[1] + vpaddd @Tx[1],@X[-2&7],@X[2] + vmovdqa @X[0],0(%rsp) # X[]+K xfer to IALU + vmovdqa @X[1],16(%rsp) + vmovdqa @X[2],32(%rsp) + jmp .Loop_avx +___ + +sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4 +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 40 instructions + my ($a,$b,$c,$d,$e); + + eval(shift(@insns)); + eval(shift(@insns)); + &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]" + eval(shift(@insns)); + eval(shift(@insns)); + + &vpaddd (@Tx[1],@Tx[1],@X[-1&7]); + eval(shift(@insns)); + eval(shift(@insns)); + &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords + eval(shift(@insns)); + eval(shift(@insns)); + &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]" + eval(shift(@insns)); + eval(shift(@insns)); + + &vpxor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]" + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]" + eval(shift(@insns)); + eval(shift(@insns)); + &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + + &vpsrld (@Tx[0],@X[0],31); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &vpslldq(@Tx[2],@X[0],12); # "X[0]"<<96, extract one dword + &vpaddd (@X[0],@X[0],@X[0]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &vpsrld (@Tx[1],@Tx[2],30); + &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=1 + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &vpslld (@Tx[2],@Tx[2],2); + &vpxor (@X[0],@X[0],@Tx[1]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &vpxor (@X[0],@X[0],@Tx[2]); # "X[0]"^=("X[0]">>96)<<<2 + eval(shift(@insns)); + eval(shift(@insns)); + &vmovdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX + eval(shift(@insns)); + eval(shift(@insns)); + + + foreach (@insns) { eval; } # remaining instructions [if any] + + $Xi++; push(@X,shift(@X)); # "rotate" X[] + push(@Tx,shift(@Tx)); +} + +sub Xupdate_avx_32_79() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions + my ($a,$b,$c,$d,$e); + + &vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8); # compose "X[-6]" + &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + + &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]" + eval(shift(@insns)); + eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/); + if ($Xi%5) { + &vmovdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX... + } else { # ... or load next one + &vmovdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)"); + } + &vpaddd (@Tx[1],@Tx[1],@X[-1&7]); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-6]" + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + + &vpsrld (@Tx[0],@X[0],30); + &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &vpslld (@X[0],@X[0],2); + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # ror + eval(shift(@insns)); + + &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=2 + eval(shift(@insns)); # body_20_39 + eval(shift(@insns)); + &vmovdqa (@Tx[1],@X[0]) if ($Xi<19); + eval(shift(@insns)); + eval(shift(@insns)); # rol + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); # rol + eval(shift(@insns)); + + foreach (@insns) { eval; } # remaining instructions + + $Xi++; push(@X,shift(@X)); # "rotate" X[] + push(@Tx,shift(@Tx)); +} + +sub Xuplast_avx_80() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + eval(shift(@insns)); + &vpaddd (@Tx[1],@Tx[1],@X[-1&7]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + + &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU + + foreach (@insns) { eval; } # remaining instructions + + &cmp ($inp,$num); + &je (".Ldone_avx"); + + unshift(@Tx,pop(@Tx)); + + &vmovdqa(@X[2],"64($K_XX_XX)"); # pbswap mask + &vmovdqa(@Tx[1],"0($K_XX_XX)"); # K_00_19 + &vmovdqu(@X[-4&7],"0($inp)"); # load input + &vmovdqu(@X[-3&7],"16($inp)"); + &vmovdqu(@X[-2&7],"32($inp)"); + &vmovdqu(@X[-1&7],"48($inp)"); + &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap + &add ($inp,64); + + $Xi=0; +} + +sub Xloop_avx() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + eval(shift(@insns)); + eval(shift(@insns)); + &vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]); + eval(shift(@insns)); + eval(shift(@insns)); + &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@Tx[1]); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vmovdqa(eval(16*$Xi)."(%rsp)",@X[$Xi&7]); # X[]+K xfer to IALU + eval(shift(@insns)); + eval(shift(@insns)); + + foreach (@insns) { eval; } + $Xi++; +} + +sub Xtail_avx() +{ use integer; + my $body = shift; + my @insns = (&$body,&$body,&$body,&$body); # 32 instructions + my ($a,$b,$c,$d,$e); + + foreach (@insns) { eval; } +} + +$code.=<<___; +.align 16 +.Loop_avx: +___ + &Xupdate_avx_16_31(\&body_00_19); + &Xupdate_avx_16_31(\&body_00_19); + &Xupdate_avx_16_31(\&body_00_19); + &Xupdate_avx_16_31(\&body_00_19); + &Xupdate_avx_32_79(\&body_00_19); + &Xupdate_avx_32_79(\&body_20_39); + &Xupdate_avx_32_79(\&body_20_39); + &Xupdate_avx_32_79(\&body_20_39); + &Xupdate_avx_32_79(\&body_20_39); + &Xupdate_avx_32_79(\&body_20_39); + &Xupdate_avx_32_79(\&body_40_59); + &Xupdate_avx_32_79(\&body_40_59); + &Xupdate_avx_32_79(\&body_40_59); + &Xupdate_avx_32_79(\&body_40_59); + &Xupdate_avx_32_79(\&body_40_59); + &Xupdate_avx_32_79(\&body_20_39); + &Xuplast_avx_80(\&body_20_39); # can jump to "done" + + $saved_j=$j; @saved_V=@V; + + &Xloop_avx(\&body_20_39); + &Xloop_avx(\&body_20_39); + &Xloop_avx(\&body_20_39); + +$code.=<<___; + add 0($ctx),$A # update context + add 4($ctx),@T[0] + add 8($ctx),$C + add 12($ctx),$D + mov $A,0($ctx) + add 16($ctx),$E + mov @T[0],4($ctx) + mov @T[0],$B # magic seed + mov $C,8($ctx) + mov $D,12($ctx) + mov $E,16($ctx) + jmp .Loop_avx + +.align 16 +.Ldone_avx: +___ + $j=$saved_j; @V=@saved_V; + + &Xtail_avx(\&body_20_39); + &Xtail_avx(\&body_20_39); + &Xtail_avx(\&body_20_39); + +$code.=<<___; + vzeroupper + + add 0($ctx),$A # update context + add 4($ctx),@T[0] + add 8($ctx),$C + mov $A,0($ctx) + add 12($ctx),$D + mov @T[0],4($ctx) + add 16($ctx),$E + mov $C,8($ctx) + mov $D,12($ctx) + mov $E,16($ctx) +___ +$code.=<<___ if ($win64); + movaps 64+0(%rsp),%xmm6 + movaps 64+16(%rsp),%xmm7 + movaps 64+32(%rsp),%xmm8 + movaps 64+48(%rsp),%xmm9 + movaps 64+64(%rsp),%xmm10 +___ +$code.=<<___; + lea `64+($win64?5*16:0)`(%rsp),%rsi + mov 0(%rsi),%r12 + mov 8(%rsi),%rbp + mov 16(%rsi),%rbx + lea 24(%rsi),%rsp +.Lepilogue_avx: + ret +.size sha1_block_data_order_avx,.-sha1_block_data_order_avx +___ +} +$code.=<<___; +.align 64 +K_XX_XX: +.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19 +.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39 +.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59 +.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79 +.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap mask +___ +}}} +$code.=<<___; +.asciz "SHA1 block transform for x86_64, CRYPTOGAMS by " +.align 64 ___ # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, @@ -272,25 +1113,75 @@ se_handler: lea .Lprologue(%rip),%r10 cmp %r10,%rbx # context->Rip<.Lprologue - jb .Lin_prologue + jb .Lcommon_seh_tail mov 152($context),%rax # pull context->Rsp lea .Lepilogue(%rip),%r10 cmp %r10,%rbx # context->Rip>=.Lepilogue - jae .Lin_prologue + jae .Lcommon_seh_tail mov `16*4`(%rax),%rax # pull saved stack pointer - lea 24(%rax),%rax + lea 32(%rax),%rax mov -8(%rax),%rbx mov -16(%rax),%rbp mov -24(%rax),%r12 + mov -32(%rax),%r13 mov %rbx,144($context) # restore context->Rbx mov %rbp,160($context) # restore context->Rbp mov %r12,216($context) # restore context->R12 + mov %r13,224($context) # restore context->R13 + + jmp .Lcommon_seh_tail +.size se_handler,.-se_handler -.Lin_prologue: +.type ssse3_handler,\@abi-omnipotent +.align 16 +ssse3_handler: + push %rsi + push %rdi + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + pushfq + sub \$64,%rsp + + mov 120($context),%rax # pull context->Rax + mov 248($context),%rbx # pull context->Rip + + mov 8($disp),%rsi # disp->ImageBase + mov 56($disp),%r11 # disp->HandlerData + + mov 0(%r11),%r10d # HandlerData[0] + lea (%rsi,%r10),%r10 # prologue label + cmp %r10,%rbx # context->RipRsp + + mov 4(%r11),%r10d # HandlerData[1] + lea (%rsi,%r10),%r10 # epilogue label + cmp %r10,%rbx # context->Rip>=epilogue label + jae .Lcommon_seh_tail + + lea 64(%rax),%rsi + lea 512($context),%rdi # &context.Xmm6 + mov \$10,%ecx + .long 0xa548f3fc # cld; rep movsq + lea `24+64+5*16`(%rax),%rax # adjust stack pointer + + mov -8(%rax),%rbx + mov -16(%rax),%rbp + mov -24(%rax),%r12 + mov %rbx,144($context) # restore context->Rbx + mov %rbp,160($context) # restore context->Rbp + mov %r12,216($context) # restore cotnext->R12 + +.Lcommon_seh_tail: mov 8(%rax),%rdi mov 16(%rax),%rsi mov %rax,152($context) # restore context->Rsp @@ -328,19 +1219,38 @@ se_handler: pop %rdi pop %rsi ret -.size se_handler,.-se_handler +.size ssse3_handler,.-ssse3_handler .section .pdata .align 4 .rva .LSEH_begin_sha1_block_data_order .rva .LSEH_end_sha1_block_data_order .rva .LSEH_info_sha1_block_data_order - + .rva .LSEH_begin_sha1_block_data_order_ssse3 + .rva .LSEH_end_sha1_block_data_order_ssse3 + .rva .LSEH_info_sha1_block_data_order_ssse3 +___ +$code.=<<___ if ($avx); + .rva .LSEH_begin_sha1_block_data_order_avx + .rva .LSEH_end_sha1_block_data_order_avx + .rva .LSEH_info_sha1_block_data_order_avx +___ +$code.=<<___; .section .xdata .align 8 .LSEH_info_sha1_block_data_order: .byte 9,0,0,0 .rva se_handler +.LSEH_info_sha1_block_data_order_ssse3: + .byte 9,0,0,0 + .rva ssse3_handler + .rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[] +___ +$code.=<<___ if ($avx); +.LSEH_info_sha1_block_data_order_avx: + .byte 9,0,0,0 + .rva ssse3_handler + .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[] ___ } diff --git a/app/openssl/crypto/sha/asm/sha256-586.S b/app/openssl/crypto/sha/asm/sha256-586.S new file mode 100644 index 00000000..77a89514 --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha256-586.S @@ -0,0 +1,258 @@ +.file "sha512-586.s" +.text +.globl sha256_block_data_order +.type sha256_block_data_order,@function +.align 16 +sha256_block_data_order: +.L_sha256_block_data_order_begin: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + movl 20(%esp),%esi + movl 24(%esp),%edi + movl 28(%esp),%eax + movl %esp,%ebx + call .L000pic_point +.L000pic_point: + popl %ebp + leal .L001K256-.L000pic_point(%ebp),%ebp + subl $16,%esp + andl $-64,%esp + shll $6,%eax + addl %edi,%eax + movl %esi,(%esp) + movl %edi,4(%esp) + movl %eax,8(%esp) + movl %ebx,12(%esp) +.align 16 +.L002loop: + movl (%edi),%eax + movl 4(%edi),%ebx + movl 8(%edi),%ecx + movl 12(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + movl 16(%edi),%eax + movl 20(%edi),%ebx + movl 24(%edi),%ecx + movl 28(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + movl 32(%edi),%eax + movl 36(%edi),%ebx + movl 40(%edi),%ecx + movl 44(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + movl 48(%edi),%eax + movl 52(%edi),%ebx + movl 56(%edi),%ecx + movl 60(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + addl $64,%edi + subl $32,%esp + movl %edi,100(%esp) + movl (%esi),%eax + movl 4(%esi),%ebx + movl 8(%esi),%ecx + movl 12(%esi),%edi + movl %ebx,4(%esp) + movl %ecx,8(%esp) + movl %edi,12(%esp) + movl 16(%esi),%edx + movl 20(%esi),%ebx + movl 24(%esi),%ecx + movl 28(%esi),%edi + movl %ebx,20(%esp) + movl %ecx,24(%esp) + movl %edi,28(%esp) +.align 16 +.L00300_15: + movl 92(%esp),%ebx + movl %edx,%ecx + rorl $14,%ecx + movl 20(%esp),%esi + xorl %edx,%ecx + rorl $5,%ecx + xorl %edx,%ecx + rorl $6,%ecx + movl 24(%esp),%edi + addl %ecx,%ebx + xorl %edi,%esi + movl %edx,16(%esp) + movl %eax,%ecx + andl %edx,%esi + movl 12(%esp),%edx + xorl %edi,%esi + movl %eax,%edi + addl %esi,%ebx + rorl $9,%ecx + addl 28(%esp),%ebx + xorl %eax,%ecx + rorl $11,%ecx + movl 4(%esp),%esi + xorl %eax,%ecx + rorl $2,%ecx + addl %ebx,%edx + movl 8(%esp),%edi + addl %ecx,%ebx + movl %eax,(%esp) + movl %eax,%ecx + subl $4,%esp + orl %esi,%eax + andl %esi,%ecx + andl %edi,%eax + movl (%ebp),%esi + orl %ecx,%eax + addl $4,%ebp + addl %ebx,%eax + addl %esi,%edx + addl %esi,%eax + cmpl $3248222580,%esi + jne .L00300_15 + movl 152(%esp),%ebx +.align 16 +.L00416_63: + movl %ebx,%esi + movl 100(%esp),%ecx + rorl $11,%esi + movl %ecx,%edi + xorl %ebx,%esi + rorl $7,%esi + shrl $3,%ebx + rorl $2,%edi + xorl %esi,%ebx + xorl %ecx,%edi + rorl $17,%edi + shrl $10,%ecx + addl 156(%esp),%ebx + xorl %ecx,%edi + addl 120(%esp),%ebx + movl %edx,%ecx + addl %edi,%ebx + rorl $14,%ecx + movl 20(%esp),%esi + xorl %edx,%ecx + rorl $5,%ecx + movl %ebx,92(%esp) + xorl %edx,%ecx + rorl $6,%ecx + movl 24(%esp),%edi + addl %ecx,%ebx + xorl %edi,%esi + movl %edx,16(%esp) + movl %eax,%ecx + andl %edx,%esi + movl 12(%esp),%edx + xorl %edi,%esi + movl %eax,%edi + addl %esi,%ebx + rorl $9,%ecx + addl 28(%esp),%ebx + xorl %eax,%ecx + rorl $11,%ecx + movl 4(%esp),%esi + xorl %eax,%ecx + rorl $2,%ecx + addl %ebx,%edx + movl 8(%esp),%edi + addl %ecx,%ebx + movl %eax,(%esp) + movl %eax,%ecx + subl $4,%esp + orl %esi,%eax + andl %esi,%ecx + andl %edi,%eax + movl (%ebp),%esi + orl %ecx,%eax + addl $4,%ebp + addl %ebx,%eax + movl 152(%esp),%ebx + addl %esi,%edx + addl %esi,%eax + cmpl $3329325298,%esi + jne .L00416_63 + movl 352(%esp),%esi + movl 4(%esp),%ebx + movl 8(%esp),%ecx + movl 12(%esp),%edi + addl (%esi),%eax + addl 4(%esi),%ebx + addl 8(%esi),%ecx + addl 12(%esi),%edi + movl %eax,(%esi) + movl %ebx,4(%esi) + movl %ecx,8(%esi) + movl %edi,12(%esi) + movl 20(%esp),%eax + movl 24(%esp),%ebx + movl 28(%esp),%ecx + movl 356(%esp),%edi + addl 16(%esi),%edx + addl 20(%esi),%eax + addl 24(%esi),%ebx + addl 28(%esi),%ecx + movl %edx,16(%esi) + movl %eax,20(%esi) + movl %ebx,24(%esi) + movl %ecx,28(%esi) + addl $352,%esp + subl $256,%ebp + cmpl 8(%esp),%edi + jb .L002loop + movl 12(%esp),%esp + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.align 64 +.L001K256: +.long 1116352408,1899447441,3049323471,3921009573 +.long 961987163,1508970993,2453635748,2870763221 +.long 3624381080,310598401,607225278,1426881987 +.long 1925078388,2162078206,2614888103,3248222580 +.long 3835390401,4022224774,264347078,604807628 +.long 770255983,1249150122,1555081692,1996064986 +.long 2554220882,2821834349,2952996808,3210313671 +.long 3336571891,3584528711,113926993,338241895 +.long 666307205,773529912,1294757372,1396182291 +.long 1695183700,1986661051,2177026350,2456956037 +.long 2730485921,2820302411,3259730800,3345764771 +.long 3516065817,3600352804,4094571909,275423344 +.long 430227734,506948616,659060556,883997877 +.long 958139571,1322822218,1537002063,1747873779 +.long 1955562222,2024104815,2227730452,2361852424 +.long 2428436474,2756734187,3204031479,3329325298 +.size sha256_block_data_order,.-.L_sha256_block_data_order_begin +.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97 +.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 +.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 +.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 +.byte 62,0 diff --git a/app/openssl/crypto/sha/asm/sha256-586.pl b/app/openssl/crypto/sha/asm/sha256-586.pl index ecc8b69c..52a7c7f8 100644 --- a/app/openssl/crypto/sha/asm/sha256-586.pl +++ b/app/openssl/crypto/sha/asm/sha256-586.pl @@ -14,14 +14,14 @@ # Pentium PIII P4 AMD K8 Core2 # gcc 46 36 41 27 26 # icc 57 33 38 25 23 -# x86 asm 40 30 35 20 20 -# x86_64 asm(*) - - 21 15.8 16.5 +# x86 asm 40 30 33 20 18 +# x86_64 asm(*) - - 21 16 16 # # (*) x86_64 assembler performance is presented for reference # purposes. # # Performance improvement over compiler generated code varies from -# 10% to 40% [see above]. Not very impressive on some µ-archs, but +# 10% to 40% [see above]. Not very impressive on some µ-archs, but # it's 5 times smaller and optimizies amount of writes. $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; @@ -48,20 +48,19 @@ sub BODY_00_15() { my $in_16_63=shift; &mov ("ecx",$E); - &add ($T,&DWP(4*(8+15+16-9),"esp")) if ($in_16_63); # T += X[-7] - &ror ("ecx",6); - &mov ("edi",$E); - &ror ("edi",11); + &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2]) + &ror ("ecx",25-11); &mov ("esi",$Foff); - &xor ("ecx","edi"); - &ror ("edi",25-11); + &xor ("ecx",$E); + &ror ("ecx",11-6); &mov (&DWP(4*(8+15),"esp"),$T) if ($in_16_63); # save X[0] - &xor ("ecx","edi"); # Sigma1(e) + &xor ("ecx",$E); + &ror ("ecx",6); # Sigma1(e) &mov ("edi",$Goff); &add ($T,"ecx"); # T += Sigma1(e) - &mov ($Eoff,$E); # modulo-scheduled &xor ("esi","edi"); + &mov ($Eoff,$E); # modulo-scheduled &mov ("ecx",$A); &and ("esi",$E); &mov ($E,$Doff); # e becomes d, which is e in next iteration @@ -69,14 +68,14 @@ sub BODY_00_15() { &mov ("edi",$A); &add ($T,"esi"); # T += Ch(e,f,g) - &ror ("ecx",2); + &ror ("ecx",22-13); &add ($T,$Hoff); # T += h - &ror ("edi",13); + &xor ("ecx",$A); + &ror ("ecx",13-2); &mov ("esi",$Boff); - &xor ("ecx","edi"); - &ror ("edi",22-13); + &xor ("ecx",$A); + &ror ("ecx",2); # Sigma0(a) &add ($E,$T); # d += T - &xor ("ecx","edi"); # Sigma0(a) &mov ("edi",$Coff); &add ($T,"ecx"); # T += Sigma0(a) @@ -168,23 +167,22 @@ sub BODY_00_15() { &set_label("16_63",16); &mov ("esi",$T); &mov ("ecx",&DWP(4*(8+15+16-14),"esp")); - &shr ($T,3); - &ror ("esi",7); - &xor ($T,"esi"); &ror ("esi",18-7); &mov ("edi","ecx"); - &xor ($T,"esi"); # T = sigma0(X[-15]) + &xor ("esi",$T); + &ror ("esi",7); + &shr ($T,3); - &shr ("ecx",10); - &mov ("esi",&DWP(4*(8+15+16),"esp")); - &ror ("edi",17); - &xor ("ecx","edi"); &ror ("edi",19-17); - &add ($T,"esi"); # T += X[-16] - &xor ("edi","ecx") # sigma1(X[-2]) + &xor ($T,"esi"); # T = sigma0(X[-15]) + &xor ("edi","ecx"); + &ror ("edi",17); + &shr ("ecx",10); + &add ($T,&DWP(4*(8+15+16),"esp")); # T += X[-16] + &xor ("edi","ecx"); # sigma1(X[-2]) - &add ($T,"edi"); # T += sigma1(X[-2]) - # &add ($T,&DWP(4*(8+15+16-9),"esp")); # T += X[-7], moved to BODY_00_15(1) + &add ($T,&DWP(4*(8+15+16-9),"esp")); # T += X[-7] + # &add ($T,"edi"); # T += sigma1(X[-2]) # &mov (&DWP(4*(8+15),"esp"),$T); # save X[0] &BODY_00_15(1); diff --git a/app/openssl/crypto/sha/asm/sha256-armv4.S b/app/openssl/crypto/sha/asm/sha256-armv4.S new file mode 120000 index 00000000..d4f53c1d --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha256-armv4.S @@ -0,0 +1 @@ +sha256-armv4.s \ No newline at end of file diff --git a/app/openssl/crypto/sha/asm/sha256-armv4.pl b/app/openssl/crypto/sha/asm/sha256-armv4.pl index 492cb62b..9c84e8d9 100644 --- a/app/openssl/crypto/sha/asm/sha256-armv4.pl +++ b/app/openssl/crypto/sha/asm/sha256-armv4.pl @@ -18,11 +18,16 @@ # Rescheduling for dual-issue pipeline resulted in 22% improvement on # Cortex A8 core and ~20 cycles per processed byte. +# February 2011. +# +# Profiler-assisted and platform-specific optimization resulted in 16% +# improvement on Cortex A8 core and ~17 cycles per processed byte. + while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} open STDOUT,">$output"; $ctx="r0"; $t0="r0"; -$inp="r1"; +$inp="r1"; $t3="r1"; $len="r2"; $t1="r2"; $T1="r3"; $A="r4"; @@ -46,6 +51,9 @@ sub BODY_00_15 { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; $code.=<<___ if ($i<16); +#if __ARM_ARCH__>=7 + ldr $T1,[$inp],#4 +#else ldrb $T1,[$inp,#3] @ $i ldrb $t2,[$inp,#2] ldrb $t1,[$inp,#1] @@ -53,16 +61,24 @@ $code.=<<___ if ($i<16); orr $T1,$T1,$t2,lsl#8 orr $T1,$T1,$t1,lsl#16 orr $T1,$T1,$t0,lsl#24 - `"str $inp,[sp,#17*4]" if ($i==15)` +#endif ___ $code.=<<___; - ldr $t2,[$Ktbl],#4 @ *K256++ mov $t0,$e,ror#$Sigma1[0] - str $T1,[sp,#`$i%16`*4] + ldr $t2,[$Ktbl],#4 @ *K256++ eor $t0,$t0,$e,ror#$Sigma1[1] eor $t1,$f,$g +#if $i>=16 + add $T1,$T1,$t3 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev $T1,$T1 +#endif +#if $i==15 + str $inp,[sp,#17*4] @ leave room for $t3 +#endif eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e) and $t1,$t1,$e + str $T1,[sp,#`$i%16`*4] add $T1,$T1,$t0 eor $t1,$t1,$g @ Ch(e,f,g) add $T1,$T1,$h @@ -71,6 +87,9 @@ $code.=<<___; eor $h,$h,$a,ror#$Sigma0[1] add $T1,$T1,$t2 eor $h,$h,$a,ror#$Sigma0[2] @ Sigma0(a) +#if $i>=15 + ldr $t3,[sp,#`($i+2)%16`*4] @ from BODY_16_xx +#endif orr $t0,$a,$b and $t1,$a,$b and $t0,$t0,$c @@ -85,24 +104,26 @@ sub BODY_16_XX { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; $code.=<<___; - ldr $t1,[sp,#`($i+1)%16`*4] @ $i + @ ldr $t3,[sp,#`($i+1)%16`*4] @ $i ldr $t2,[sp,#`($i+14)%16`*4] + mov $t0,$t3,ror#$sigma0[0] ldr $T1,[sp,#`($i+0)%16`*4] - mov $t0,$t1,ror#$sigma0[0] - ldr $inp,[sp,#`($i+9)%16`*4] - eor $t0,$t0,$t1,ror#$sigma0[1] - eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1]) - mov $t1,$t2,ror#$sigma1[0] + eor $t0,$t0,$t3,ror#$sigma0[1] + ldr $t1,[sp,#`($i+9)%16`*4] + eor $t0,$t0,$t3,lsr#$sigma0[2] @ sigma0(X[i+1]) + mov $t3,$t2,ror#$sigma1[0] add $T1,$T1,$t0 - eor $t1,$t1,$t2,ror#$sigma1[1] - add $T1,$T1,$inp - eor $t1,$t1,$t2,lsr#$sigma1[2] @ sigma1(X[i+14]) + eor $t3,$t3,$t2,ror#$sigma1[1] add $T1,$T1,$t1 + eor $t3,$t3,$t2,lsr#$sigma1[2] @ sigma1(X[i+14]) + @ add $T1,$T1,$t3 ___ &BODY_00_15(@_); } $code=<<___; +#include "arm_arch.h" + .text .code 32 @@ -132,7 +153,7 @@ K256: sha256_block_data_order: sub r3,pc,#8 @ sha256_block_data_order add $len,$inp,$len,lsl#6 @ len to point at the end of inp - stmdb sp!,{$ctx,$inp,$len,r4-r12,lr} + stmdb sp!,{$ctx,$inp,$len,r4-r11,lr} ldmia $ctx,{$A,$B,$C,$D,$E,$F,$G,$H} sub $Ktbl,r3,#256 @ K256 sub sp,sp,#16*4 @ alloca(X[16]) @@ -171,10 +192,14 @@ $code.=<<___; bne .Loop add sp,sp,#`16+3`*4 @ destroy frame - ldmia sp!,{r4-r12,lr} +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r11,pc} +#else + ldmia sp!,{r4-r11,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet bx lr @ interoperable with Thumb ISA:-) +#endif .size sha256_block_data_order,.-sha256_block_data_order .asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by " .align 2 diff --git a/app/openssl/crypto/sha/asm/sha256-armv4.s b/app/openssl/crypto/sha/asm/sha256-armv4.s index ee903dc4..9c20a63c 100644 --- a/app/openssl/crypto/sha/asm/sha256-armv4.s +++ b/app/openssl/crypto/sha/asm/sha256-armv4.s @@ -1,3 +1,5 @@ +#include "arm_arch.h" + .text .code 32 @@ -27,11 +29,14 @@ K256: sha256_block_data_order: sub r3,pc,#8 @ sha256_block_data_order add r2,r1,r2,lsl#6 @ len to point at the end of inp - stmdb sp!,{r0,r1,r2,r4-r12,lr} + stmdb sp!,{r0,r1,r2,r4-r11,lr} ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} sub r14,r3,#256 @ K256 sub sp,sp,#16*4 @ alloca(X[16]) .Loop: +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 0 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -39,14 +44,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r8,ror#6 - str r3,[sp,#0*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r8,ror#11 eor r2,r9,r10 +#if 0>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 0==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 + str r3,[sp,#0*4] add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) add r3,r3,r11 @@ -55,6 +68,9 @@ sha256_block_data_order: eor r11,r11,r4,ror#13 add r3,r3,r12 eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 0>=15 + ldr r1,[sp,#2*4] @ from BODY_16_xx +#endif orr r0,r4,r5 and r2,r4,r5 and r0,r0,r6 @@ -62,6 +78,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r7,r7,r3 add r11,r11,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 1 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -69,14 +88,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r7,ror#6 - str r3,[sp,#1*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r7,ror#11 eor r2,r8,r9 +#if 1>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 1==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 + str r3,[sp,#1*4] add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) add r3,r3,r10 @@ -85,6 +112,9 @@ sha256_block_data_order: eor r10,r10,r11,ror#13 add r3,r3,r12 eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 1>=15 + ldr r1,[sp,#3*4] @ from BODY_16_xx +#endif orr r0,r11,r4 and r2,r11,r4 and r0,r0,r5 @@ -92,6 +122,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r6,r6,r3 add r10,r10,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 2 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -99,14 +132,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r6,ror#6 - str r3,[sp,#2*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r6,ror#11 eor r2,r7,r8 +#if 2>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 2==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 + str r3,[sp,#2*4] add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) add r3,r3,r9 @@ -115,6 +156,9 @@ sha256_block_data_order: eor r9,r9,r10,ror#13 add r3,r3,r12 eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 2>=15 + ldr r1,[sp,#4*4] @ from BODY_16_xx +#endif orr r0,r10,r11 and r2,r10,r11 and r0,r0,r4 @@ -122,6 +166,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r5,r5,r3 add r9,r9,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 3 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -129,14 +176,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r5,ror#6 - str r3,[sp,#3*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r5,ror#11 eor r2,r6,r7 +#if 3>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 3==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 + str r3,[sp,#3*4] add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) add r3,r3,r8 @@ -145,6 +200,9 @@ sha256_block_data_order: eor r8,r8,r9,ror#13 add r3,r3,r12 eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 3>=15 + ldr r1,[sp,#5*4] @ from BODY_16_xx +#endif orr r0,r9,r10 and r2,r9,r10 and r0,r0,r11 @@ -152,6 +210,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r4,r4,r3 add r8,r8,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 4 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -159,14 +220,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r4,ror#6 - str r3,[sp,#4*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r4,ror#11 eor r2,r5,r6 +#if 4>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 4==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 + str r3,[sp,#4*4] add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) add r3,r3,r7 @@ -175,6 +244,9 @@ sha256_block_data_order: eor r7,r7,r8,ror#13 add r3,r3,r12 eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 4>=15 + ldr r1,[sp,#6*4] @ from BODY_16_xx +#endif orr r0,r8,r9 and r2,r8,r9 and r0,r0,r10 @@ -182,6 +254,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r11,r11,r3 add r7,r7,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 5 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -189,14 +264,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r11,ror#6 - str r3,[sp,#5*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r11,ror#11 eor r2,r4,r5 +#if 5>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 5==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 + str r3,[sp,#5*4] add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) add r3,r3,r6 @@ -205,6 +288,9 @@ sha256_block_data_order: eor r6,r6,r7,ror#13 add r3,r3,r12 eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 5>=15 + ldr r1,[sp,#7*4] @ from BODY_16_xx +#endif orr r0,r7,r8 and r2,r7,r8 and r0,r0,r9 @@ -212,6 +298,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r10,r10,r3 add r6,r6,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 6 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -219,14 +308,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r10,ror#6 - str r3,[sp,#6*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r10,ror#11 eor r2,r11,r4 +#if 6>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 6==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 + str r3,[sp,#6*4] add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) add r3,r3,r5 @@ -235,6 +332,9 @@ sha256_block_data_order: eor r5,r5,r6,ror#13 add r3,r3,r12 eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 6>=15 + ldr r1,[sp,#8*4] @ from BODY_16_xx +#endif orr r0,r6,r7 and r2,r6,r7 and r0,r0,r8 @@ -242,6 +342,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r9,r9,r3 add r5,r5,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 7 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -249,14 +352,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r9,ror#6 - str r3,[sp,#7*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r9,ror#11 eor r2,r10,r11 +#if 7>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 7==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 + str r3,[sp,#7*4] add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) add r3,r3,r4 @@ -265,6 +376,9 @@ sha256_block_data_order: eor r4,r4,r5,ror#13 add r3,r3,r12 eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 7>=15 + ldr r1,[sp,#9*4] @ from BODY_16_xx +#endif orr r0,r5,r6 and r2,r5,r6 and r0,r0,r7 @@ -272,6 +386,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r8,r8,r3 add r4,r4,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 8 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -279,14 +396,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r8,ror#6 - str r3,[sp,#8*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r8,ror#11 eor r2,r9,r10 +#if 8>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 8==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 + str r3,[sp,#8*4] add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) add r3,r3,r11 @@ -295,6 +420,9 @@ sha256_block_data_order: eor r11,r11,r4,ror#13 add r3,r3,r12 eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 8>=15 + ldr r1,[sp,#10*4] @ from BODY_16_xx +#endif orr r0,r4,r5 and r2,r4,r5 and r0,r0,r6 @@ -302,6 +430,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r7,r7,r3 add r11,r11,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 9 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -309,14 +440,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r7,ror#6 - str r3,[sp,#9*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r7,ror#11 eor r2,r8,r9 +#if 9>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 9==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 + str r3,[sp,#9*4] add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) add r3,r3,r10 @@ -325,6 +464,9 @@ sha256_block_data_order: eor r10,r10,r11,ror#13 add r3,r3,r12 eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 9>=15 + ldr r1,[sp,#11*4] @ from BODY_16_xx +#endif orr r0,r11,r4 and r2,r11,r4 and r0,r0,r5 @@ -332,6 +474,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r6,r6,r3 add r10,r10,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 10 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -339,14 +484,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r6,ror#6 - str r3,[sp,#10*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r6,ror#11 eor r2,r7,r8 +#if 10>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 10==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 + str r3,[sp,#10*4] add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) add r3,r3,r9 @@ -355,6 +508,9 @@ sha256_block_data_order: eor r9,r9,r10,ror#13 add r3,r3,r12 eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 10>=15 + ldr r1,[sp,#12*4] @ from BODY_16_xx +#endif orr r0,r10,r11 and r2,r10,r11 and r0,r0,r4 @@ -362,6 +518,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r5,r5,r3 add r9,r9,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 11 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -369,14 +528,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r5,ror#6 - str r3,[sp,#11*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r5,ror#11 eor r2,r6,r7 +#if 11>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 11==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 + str r3,[sp,#11*4] add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) add r3,r3,r8 @@ -385,6 +552,9 @@ sha256_block_data_order: eor r8,r8,r9,ror#13 add r3,r3,r12 eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 11>=15 + ldr r1,[sp,#13*4] @ from BODY_16_xx +#endif orr r0,r9,r10 and r2,r9,r10 and r0,r0,r11 @@ -392,6 +562,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r4,r4,r3 add r8,r8,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 12 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -399,14 +572,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r4,ror#6 - str r3,[sp,#12*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r4,ror#11 eor r2,r5,r6 +#if 12>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 12==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 + str r3,[sp,#12*4] add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) add r3,r3,r7 @@ -415,6 +596,9 @@ sha256_block_data_order: eor r7,r7,r8,ror#13 add r3,r3,r12 eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 12>=15 + ldr r1,[sp,#14*4] @ from BODY_16_xx +#endif orr r0,r8,r9 and r2,r8,r9 and r0,r0,r10 @@ -422,6 +606,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r11,r11,r3 add r7,r7,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 13 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -429,14 +616,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r11,ror#6 - str r3,[sp,#13*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r11,ror#11 eor r2,r4,r5 +#if 13>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 13==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 + str r3,[sp,#13*4] add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) add r3,r3,r6 @@ -445,6 +640,9 @@ sha256_block_data_order: eor r6,r6,r7,ror#13 add r3,r3,r12 eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 13>=15 + ldr r1,[sp,#15*4] @ from BODY_16_xx +#endif orr r0,r7,r8 and r2,r7,r8 and r0,r0,r9 @@ -452,6 +650,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r10,r10,r3 add r6,r6,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 14 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -459,14 +660,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r10,ror#6 - str r3,[sp,#14*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r10,ror#11 eor r2,r11,r4 +#if 14>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 14==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 + str r3,[sp,#14*4] add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) add r3,r3,r5 @@ -475,6 +684,9 @@ sha256_block_data_order: eor r5,r5,r6,ror#13 add r3,r3,r12 eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 14>=15 + ldr r1,[sp,#0*4] @ from BODY_16_xx +#endif orr r0,r6,r7 and r2,r6,r7 and r0,r0,r8 @@ -482,6 +694,9 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r9,r9,r3 add r5,r5,r0 +#if __ARM_ARCH__>=7 + ldr r3,[r1],#4 +#else ldrb r3,[r1,#3] @ 15 ldrb r12,[r1,#2] ldrb r2,[r1,#1] @@ -489,14 +704,22 @@ sha256_block_data_order: orr r3,r3,r12,lsl#8 orr r3,r3,r2,lsl#16 orr r3,r3,r0,lsl#24 - str r1,[sp,#17*4] - ldr r12,[r14],#4 @ *K256++ +#endif mov r0,r9,ror#6 - str r3,[sp,#15*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r9,ror#11 eor r2,r10,r11 +#if 15>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 15==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 + str r3,[sp,#15*4] add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) add r3,r3,r4 @@ -505,6 +728,9 @@ sha256_block_data_order: eor r4,r4,r5,ror#13 add r3,r3,r12 eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 15>=15 + ldr r1,[sp,#1*4] @ from BODY_16_xx +#endif orr r0,r5,r6 and r2,r5,r6 and r0,r0,r7 @@ -513,26 +739,34 @@ sha256_block_data_order: add r8,r8,r3 add r4,r4,r0 .Lrounds_16_xx: - ldr r2,[sp,#1*4] @ 16 + @ ldr r1,[sp,#1*4] @ 16 ldr r12,[sp,#14*4] + mov r0,r1,ror#7 ldr r3,[sp,#0*4] - mov r0,r2,ror#7 - ldr r1,[sp,#9*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#9*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r8,ror#6 - str r3,[sp,#0*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r8,ror#11 eor r2,r9,r10 +#if 16>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 16==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 + str r3,[sp,#0*4] add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) add r3,r3,r11 @@ -541,6 +775,9 @@ sha256_block_data_order: eor r11,r11,r4,ror#13 add r3,r3,r12 eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 16>=15 + ldr r1,[sp,#2*4] @ from BODY_16_xx +#endif orr r0,r4,r5 and r2,r4,r5 and r0,r0,r6 @@ -548,26 +785,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r7,r7,r3 add r11,r11,r0 - ldr r2,[sp,#2*4] @ 17 + @ ldr r1,[sp,#2*4] @ 17 ldr r12,[sp,#15*4] + mov r0,r1,ror#7 ldr r3,[sp,#1*4] - mov r0,r2,ror#7 - ldr r1,[sp,#10*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#10*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r7,ror#6 - str r3,[sp,#1*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r7,ror#11 eor r2,r8,r9 +#if 17>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 17==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 + str r3,[sp,#1*4] add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) add r3,r3,r10 @@ -576,6 +821,9 @@ sha256_block_data_order: eor r10,r10,r11,ror#13 add r3,r3,r12 eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 17>=15 + ldr r1,[sp,#3*4] @ from BODY_16_xx +#endif orr r0,r11,r4 and r2,r11,r4 and r0,r0,r5 @@ -583,26 +831,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r6,r6,r3 add r10,r10,r0 - ldr r2,[sp,#3*4] @ 18 + @ ldr r1,[sp,#3*4] @ 18 ldr r12,[sp,#0*4] + mov r0,r1,ror#7 ldr r3,[sp,#2*4] - mov r0,r2,ror#7 - ldr r1,[sp,#11*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#11*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r6,ror#6 - str r3,[sp,#2*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r6,ror#11 eor r2,r7,r8 +#if 18>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 18==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 + str r3,[sp,#2*4] add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) add r3,r3,r9 @@ -611,6 +867,9 @@ sha256_block_data_order: eor r9,r9,r10,ror#13 add r3,r3,r12 eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 18>=15 + ldr r1,[sp,#4*4] @ from BODY_16_xx +#endif orr r0,r10,r11 and r2,r10,r11 and r0,r0,r4 @@ -618,26 +877,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r5,r5,r3 add r9,r9,r0 - ldr r2,[sp,#4*4] @ 19 + @ ldr r1,[sp,#4*4] @ 19 ldr r12,[sp,#1*4] + mov r0,r1,ror#7 ldr r3,[sp,#3*4] - mov r0,r2,ror#7 - ldr r1,[sp,#12*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#12*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r5,ror#6 - str r3,[sp,#3*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r5,ror#11 eor r2,r6,r7 +#if 19>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 19==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 + str r3,[sp,#3*4] add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) add r3,r3,r8 @@ -646,6 +913,9 @@ sha256_block_data_order: eor r8,r8,r9,ror#13 add r3,r3,r12 eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 19>=15 + ldr r1,[sp,#5*4] @ from BODY_16_xx +#endif orr r0,r9,r10 and r2,r9,r10 and r0,r0,r11 @@ -653,26 +923,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r4,r4,r3 add r8,r8,r0 - ldr r2,[sp,#5*4] @ 20 + @ ldr r1,[sp,#5*4] @ 20 ldr r12,[sp,#2*4] + mov r0,r1,ror#7 ldr r3,[sp,#4*4] - mov r0,r2,ror#7 - ldr r1,[sp,#13*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#13*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r4,ror#6 - str r3,[sp,#4*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r4,ror#11 eor r2,r5,r6 +#if 20>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 20==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 + str r3,[sp,#4*4] add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) add r3,r3,r7 @@ -681,6 +959,9 @@ sha256_block_data_order: eor r7,r7,r8,ror#13 add r3,r3,r12 eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 20>=15 + ldr r1,[sp,#6*4] @ from BODY_16_xx +#endif orr r0,r8,r9 and r2,r8,r9 and r0,r0,r10 @@ -688,26 +969,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r11,r11,r3 add r7,r7,r0 - ldr r2,[sp,#6*4] @ 21 + @ ldr r1,[sp,#6*4] @ 21 ldr r12,[sp,#3*4] + mov r0,r1,ror#7 ldr r3,[sp,#5*4] - mov r0,r2,ror#7 - ldr r1,[sp,#14*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#14*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r11,ror#6 - str r3,[sp,#5*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r11,ror#11 eor r2,r4,r5 +#if 21>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 21==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 + str r3,[sp,#5*4] add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) add r3,r3,r6 @@ -716,6 +1005,9 @@ sha256_block_data_order: eor r6,r6,r7,ror#13 add r3,r3,r12 eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 21>=15 + ldr r1,[sp,#7*4] @ from BODY_16_xx +#endif orr r0,r7,r8 and r2,r7,r8 and r0,r0,r9 @@ -723,26 +1015,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r10,r10,r3 add r6,r6,r0 - ldr r2,[sp,#7*4] @ 22 + @ ldr r1,[sp,#7*4] @ 22 ldr r12,[sp,#4*4] + mov r0,r1,ror#7 ldr r3,[sp,#6*4] - mov r0,r2,ror#7 - ldr r1,[sp,#15*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#15*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r10,ror#6 - str r3,[sp,#6*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r10,ror#11 eor r2,r11,r4 +#if 22>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 22==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 + str r3,[sp,#6*4] add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) add r3,r3,r5 @@ -751,6 +1051,9 @@ sha256_block_data_order: eor r5,r5,r6,ror#13 add r3,r3,r12 eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 22>=15 + ldr r1,[sp,#8*4] @ from BODY_16_xx +#endif orr r0,r6,r7 and r2,r6,r7 and r0,r0,r8 @@ -758,26 +1061,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r9,r9,r3 add r5,r5,r0 - ldr r2,[sp,#8*4] @ 23 + @ ldr r1,[sp,#8*4] @ 23 ldr r12,[sp,#5*4] + mov r0,r1,ror#7 ldr r3,[sp,#7*4] - mov r0,r2,ror#7 - ldr r1,[sp,#0*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#0*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r9,ror#6 - str r3,[sp,#7*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r9,ror#11 eor r2,r10,r11 +#if 23>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 23==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 + str r3,[sp,#7*4] add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) add r3,r3,r4 @@ -786,6 +1097,9 @@ sha256_block_data_order: eor r4,r4,r5,ror#13 add r3,r3,r12 eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 23>=15 + ldr r1,[sp,#9*4] @ from BODY_16_xx +#endif orr r0,r5,r6 and r2,r5,r6 and r0,r0,r7 @@ -793,26 +1107,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r8,r8,r3 add r4,r4,r0 - ldr r2,[sp,#9*4] @ 24 + @ ldr r1,[sp,#9*4] @ 24 ldr r12,[sp,#6*4] + mov r0,r1,ror#7 ldr r3,[sp,#8*4] - mov r0,r2,ror#7 - ldr r1,[sp,#1*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#1*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r8,ror#6 - str r3,[sp,#8*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r8,ror#11 eor r2,r9,r10 +#if 24>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 24==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r8,ror#25 @ Sigma1(e) and r2,r2,r8 + str r3,[sp,#8*4] add r3,r3,r0 eor r2,r2,r10 @ Ch(e,f,g) add r3,r3,r11 @@ -821,6 +1143,9 @@ sha256_block_data_order: eor r11,r11,r4,ror#13 add r3,r3,r12 eor r11,r11,r4,ror#22 @ Sigma0(a) +#if 24>=15 + ldr r1,[sp,#10*4] @ from BODY_16_xx +#endif orr r0,r4,r5 and r2,r4,r5 and r0,r0,r6 @@ -828,26 +1153,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r7,r7,r3 add r11,r11,r0 - ldr r2,[sp,#10*4] @ 25 + @ ldr r1,[sp,#10*4] @ 25 ldr r12,[sp,#7*4] + mov r0,r1,ror#7 ldr r3,[sp,#9*4] - mov r0,r2,ror#7 - ldr r1,[sp,#2*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#2*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r7,ror#6 - str r3,[sp,#9*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r7,ror#11 eor r2,r8,r9 +#if 25>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 25==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r7,ror#25 @ Sigma1(e) and r2,r2,r7 + str r3,[sp,#9*4] add r3,r3,r0 eor r2,r2,r9 @ Ch(e,f,g) add r3,r3,r10 @@ -856,6 +1189,9 @@ sha256_block_data_order: eor r10,r10,r11,ror#13 add r3,r3,r12 eor r10,r10,r11,ror#22 @ Sigma0(a) +#if 25>=15 + ldr r1,[sp,#11*4] @ from BODY_16_xx +#endif orr r0,r11,r4 and r2,r11,r4 and r0,r0,r5 @@ -863,26 +1199,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r6,r6,r3 add r10,r10,r0 - ldr r2,[sp,#11*4] @ 26 + @ ldr r1,[sp,#11*4] @ 26 ldr r12,[sp,#8*4] + mov r0,r1,ror#7 ldr r3,[sp,#10*4] - mov r0,r2,ror#7 - ldr r1,[sp,#3*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#3*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r6,ror#6 - str r3,[sp,#10*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r6,ror#11 eor r2,r7,r8 +#if 26>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 26==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r6,ror#25 @ Sigma1(e) and r2,r2,r6 + str r3,[sp,#10*4] add r3,r3,r0 eor r2,r2,r8 @ Ch(e,f,g) add r3,r3,r9 @@ -891,6 +1235,9 @@ sha256_block_data_order: eor r9,r9,r10,ror#13 add r3,r3,r12 eor r9,r9,r10,ror#22 @ Sigma0(a) +#if 26>=15 + ldr r1,[sp,#12*4] @ from BODY_16_xx +#endif orr r0,r10,r11 and r2,r10,r11 and r0,r0,r4 @@ -898,26 +1245,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r5,r5,r3 add r9,r9,r0 - ldr r2,[sp,#12*4] @ 27 + @ ldr r1,[sp,#12*4] @ 27 ldr r12,[sp,#9*4] + mov r0,r1,ror#7 ldr r3,[sp,#11*4] - mov r0,r2,ror#7 - ldr r1,[sp,#4*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#4*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r5,ror#6 - str r3,[sp,#11*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r5,ror#11 eor r2,r6,r7 +#if 27>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 27==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r5,ror#25 @ Sigma1(e) and r2,r2,r5 + str r3,[sp,#11*4] add r3,r3,r0 eor r2,r2,r7 @ Ch(e,f,g) add r3,r3,r8 @@ -926,6 +1281,9 @@ sha256_block_data_order: eor r8,r8,r9,ror#13 add r3,r3,r12 eor r8,r8,r9,ror#22 @ Sigma0(a) +#if 27>=15 + ldr r1,[sp,#13*4] @ from BODY_16_xx +#endif orr r0,r9,r10 and r2,r9,r10 and r0,r0,r11 @@ -933,26 +1291,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r4,r4,r3 add r8,r8,r0 - ldr r2,[sp,#13*4] @ 28 + @ ldr r1,[sp,#13*4] @ 28 ldr r12,[sp,#10*4] + mov r0,r1,ror#7 ldr r3,[sp,#12*4] - mov r0,r2,ror#7 - ldr r1,[sp,#5*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#5*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r4,ror#6 - str r3,[sp,#12*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r4,ror#11 eor r2,r5,r6 +#if 28>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 28==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r4,ror#25 @ Sigma1(e) and r2,r2,r4 + str r3,[sp,#12*4] add r3,r3,r0 eor r2,r2,r6 @ Ch(e,f,g) add r3,r3,r7 @@ -961,6 +1327,9 @@ sha256_block_data_order: eor r7,r7,r8,ror#13 add r3,r3,r12 eor r7,r7,r8,ror#22 @ Sigma0(a) +#if 28>=15 + ldr r1,[sp,#14*4] @ from BODY_16_xx +#endif orr r0,r8,r9 and r2,r8,r9 and r0,r0,r10 @@ -968,26 +1337,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r11,r11,r3 add r7,r7,r0 - ldr r2,[sp,#14*4] @ 29 + @ ldr r1,[sp,#14*4] @ 29 ldr r12,[sp,#11*4] + mov r0,r1,ror#7 ldr r3,[sp,#13*4] - mov r0,r2,ror#7 - ldr r1,[sp,#6*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#6*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r11,ror#6 - str r3,[sp,#13*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r11,ror#11 eor r2,r4,r5 +#if 29>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 29==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r11,ror#25 @ Sigma1(e) and r2,r2,r11 + str r3,[sp,#13*4] add r3,r3,r0 eor r2,r2,r5 @ Ch(e,f,g) add r3,r3,r6 @@ -996,6 +1373,9 @@ sha256_block_data_order: eor r6,r6,r7,ror#13 add r3,r3,r12 eor r6,r6,r7,ror#22 @ Sigma0(a) +#if 29>=15 + ldr r1,[sp,#15*4] @ from BODY_16_xx +#endif orr r0,r7,r8 and r2,r7,r8 and r0,r0,r9 @@ -1003,26 +1383,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r10,r10,r3 add r6,r6,r0 - ldr r2,[sp,#15*4] @ 30 + @ ldr r1,[sp,#15*4] @ 30 ldr r12,[sp,#12*4] + mov r0,r1,ror#7 ldr r3,[sp,#14*4] - mov r0,r2,ror#7 - ldr r1,[sp,#7*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#7*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r10,ror#6 - str r3,[sp,#14*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r10,ror#11 eor r2,r11,r4 +#if 30>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 30==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r10,ror#25 @ Sigma1(e) and r2,r2,r10 + str r3,[sp,#14*4] add r3,r3,r0 eor r2,r2,r4 @ Ch(e,f,g) add r3,r3,r5 @@ -1031,6 +1419,9 @@ sha256_block_data_order: eor r5,r5,r6,ror#13 add r3,r3,r12 eor r5,r5,r6,ror#22 @ Sigma0(a) +#if 30>=15 + ldr r1,[sp,#0*4] @ from BODY_16_xx +#endif orr r0,r6,r7 and r2,r6,r7 and r0,r0,r8 @@ -1038,26 +1429,34 @@ sha256_block_data_order: orr r0,r0,r2 @ Maj(a,b,c) add r9,r9,r3 add r5,r5,r0 - ldr r2,[sp,#0*4] @ 31 + @ ldr r1,[sp,#0*4] @ 31 ldr r12,[sp,#13*4] + mov r0,r1,ror#7 ldr r3,[sp,#15*4] - mov r0,r2,ror#7 - ldr r1,[sp,#8*4] - eor r0,r0,r2,ror#18 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - mov r2,r12,ror#17 + eor r0,r0,r1,ror#18 + ldr r2,[sp,#8*4] + eor r0,r0,r1,lsr#3 @ sigma0(X[i+1]) + mov r1,r12,ror#17 add r3,r3,r0 - eor r2,r2,r12,ror#19 - add r3,r3,r1 - eor r2,r2,r12,lsr#10 @ sigma1(X[i+14]) + eor r1,r1,r12,ror#19 add r3,r3,r2 - ldr r12,[r14],#4 @ *K256++ + eor r1,r1,r12,lsr#10 @ sigma1(X[i+14]) + @ add r3,r3,r1 mov r0,r9,ror#6 - str r3,[sp,#15*4] + ldr r12,[r14],#4 @ *K256++ eor r0,r0,r9,ror#11 eor r2,r10,r11 +#if 31>=16 + add r3,r3,r1 @ from BODY_16_xx +#elif __ARM_ARCH__>=7 && defined(__ARMEL__) + rev r3,r3 +#endif +#if 31==15 + str r1,[sp,#17*4] @ leave room for r1 +#endif eor r0,r0,r9,ror#25 @ Sigma1(e) and r2,r2,r9 + str r3,[sp,#15*4] add r3,r3,r0 eor r2,r2,r11 @ Ch(e,f,g) add r3,r3,r4 @@ -1066,6 +1465,9 @@ sha256_block_data_order: eor r4,r4,r5,ror#13 add r3,r3,r12 eor r4,r4,r5,ror#22 @ Sigma0(a) +#if 31>=15 + ldr r1,[sp,#1*4] @ from BODY_16_xx +#endif orr r0,r5,r6 and r2,r5,r6 and r0,r0,r7 @@ -1102,10 +1504,14 @@ sha256_block_data_order: bne .Loop add sp,sp,#19*4 @ destroy frame - ldmia sp!,{r4-r12,lr} +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r11,pc} +#else + ldmia sp!,{r4-r11,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) +#endif .size sha256_block_data_order,.-sha256_block_data_order .asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by " .align 2 diff --git a/app/openssl/crypto/sha/asm/sha256-mips.S b/app/openssl/crypto/sha/asm/sha256-mips.S new file mode 100644 index 00000000..2bd728e9 --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha256-mips.S @@ -0,0 +1,1998 @@ +#ifdef OPENSSL_FIPSCANISTER +# include +#endif + +.text +.set noat +#if !defined(__vxworks) || defined(__pic__) +.option pic2 +#endif + +.align 5 +.globl sha256_block_data_order +.ent sha256_block_data_order +sha256_block_data_order: + .frame $29,128,$31 + .mask 3237937152,-4 + .set noreorder + .cpload $25 + sub $29,128 + sw $31,128-1*4($29) + sw $30,128-2*4($29) + sw $23,128-3*4($29) + sw $22,128-4*4($29) + sw $21,128-5*4($29) + sw $20,128-6*4($29) + sw $19,128-7*4($29) + sw $18,128-8*4($29) + sw $17,128-9*4($29) + sw $16,128-10*4($29) + sll $23,$6,6 + .set reorder + la $6,K256 # PIC-ified 'load address' + + lw $1,0*4($4) # load context + lw $2,1*4($4) + lw $3,2*4($4) + lw $7,3*4($4) + lw $24,4*4($4) + lw $25,5*4($4) + lw $30,6*4($4) + lw $31,7*4($4) + + add $23,$5 # pointer to the end of input + sw $23,16*4($29) + b .Loop + +.align 5 +.Loop: + lwl $8,3($5) + lwr $8,0($5) + lwl $9,7($5) + lwr $9,4($5) + srl $13,$8,24 # byte swap(0) + srl $14,$8,8 + andi $15,$8,0xFF00 + sll $8,$8,24 + andi $14,0xFF00 + sll $15,$15,8 + or $8,$13 + or $14,$15 + or $8,$14 + addu $12,$8,$31 # 0 + srl $31,$24,6 + xor $15,$25,$30 + sll $14,$24,7 + and $15,$24 + srl $13,$24,11 + xor $31,$14 + sll $14,$24,21 + xor $31,$13 + srl $13,$24,25 + xor $31,$14 + sll $14,$24,26 + xor $31,$13 + xor $15,$30 # Ch(e,f,g) + xor $13,$14,$31 # Sigma1(e) + + srl $31,$1,2 + addu $12,$15 + lw $15,0($6) # K[0] + sll $14,$1,10 + addu $12,$13 + srl $13,$1,13 + xor $31,$14 + sll $14,$1,19 + xor $31,$13 + srl $13,$1,22 + xor $31,$14 + sll $14,$1,30 + xor $31,$13 + sw $8,0($29) # offload to ring buffer + xor $31,$14 # Sigma0(a) + + or $13,$1,$2 + and $14,$1,$2 + and $13,$3 + or $14,$13 # Maj(a,b,c) + addu $12,$15 # +=K[0] + addu $31,$14 + + addu $7,$12 + addu $31,$12 + lwl $10,11($5) + lwr $10,8($5) + srl $14,$9,24 # byte swap(1) + srl $15,$9,8 + andi $16,$9,0xFF00 + sll $9,$9,24 + andi $15,0xFF00 + sll $16,$16,8 + or $9,$14 + or $15,$16 + or $9,$15 + addu $13,$9,$30 # 1 + srl $30,$7,6 + xor $16,$24,$25 + sll $15,$7,7 + and $16,$7 + srl $14,$7,11 + xor $30,$15 + sll $15,$7,21 + xor $30,$14 + srl $14,$7,25 + xor $30,$15 + sll $15,$7,26 + xor $30,$14 + xor $16,$25 # Ch(e,f,g) + xor $14,$15,$30 # Sigma1(e) + + srl $30,$31,2 + addu $13,$16 + lw $16,4($6) # K[1] + sll $15,$31,10 + addu $13,$14 + srl $14,$31,13 + xor $30,$15 + sll $15,$31,19 + xor $30,$14 + srl $14,$31,22 + xor $30,$15 + sll $15,$31,30 + xor $30,$14 + sw $9,4($29) # offload to ring buffer + xor $30,$15 # Sigma0(a) + + or $14,$31,$1 + and $15,$31,$1 + and $14,$2 + or $15,$14 # Maj(a,b,c) + addu $13,$16 # +=K[1] + addu $30,$15 + + addu $3,$13 + addu $30,$13 + lwl $11,15($5) + lwr $11,12($5) + srl $15,$10,24 # byte swap(2) + srl $16,$10,8 + andi $17,$10,0xFF00 + sll $10,$10,24 + andi $16,0xFF00 + sll $17,$17,8 + or $10,$15 + or $16,$17 + or $10,$16 + addu $14,$10,$25 # 2 + srl $25,$3,6 + xor $17,$7,$24 + sll $16,$3,7 + and $17,$3 + srl $15,$3,11 + xor $25,$16 + sll $16,$3,21 + xor $25,$15 + srl $15,$3,25 + xor $25,$16 + sll $16,$3,26 + xor $25,$15 + xor $17,$24 # Ch(e,f,g) + xor $15,$16,$25 # Sigma1(e) + + srl $25,$30,2 + addu $14,$17 + lw $17,8($6) # K[2] + sll $16,$30,10 + addu $14,$15 + srl $15,$30,13 + xor $25,$16 + sll $16,$30,19 + xor $25,$15 + srl $15,$30,22 + xor $25,$16 + sll $16,$30,30 + xor $25,$15 + sw $10,8($29) # offload to ring buffer + xor $25,$16 # Sigma0(a) + + or $15,$30,$31 + and $16,$30,$31 + and $15,$1 + or $16,$15 # Maj(a,b,c) + addu $14,$17 # +=K[2] + addu $25,$16 + + addu $2,$14 + addu $25,$14 + lwl $12,19($5) + lwr $12,16($5) + srl $16,$11,24 # byte swap(3) + srl $17,$11,8 + andi $18,$11,0xFF00 + sll $11,$11,24 + andi $17,0xFF00 + sll $18,$18,8 + or $11,$16 + or $17,$18 + or $11,$17 + addu $15,$11,$24 # 3 + srl $24,$2,6 + xor $18,$3,$7 + sll $17,$2,7 + and $18,$2 + srl $16,$2,11 + xor $24,$17 + sll $17,$2,21 + xor $24,$16 + srl $16,$2,25 + xor $24,$17 + sll $17,$2,26 + xor $24,$16 + xor $18,$7 # Ch(e,f,g) + xor $16,$17,$24 # Sigma1(e) + + srl $24,$25,2 + addu $15,$18 + lw $18,12($6) # K[3] + sll $17,$25,10 + addu $15,$16 + srl $16,$25,13 + xor $24,$17 + sll $17,$25,19 + xor $24,$16 + srl $16,$25,22 + xor $24,$17 + sll $17,$25,30 + xor $24,$16 + sw $11,12($29) # offload to ring buffer + xor $24,$17 # Sigma0(a) + + or $16,$25,$30 + and $17,$25,$30 + and $16,$31 + or $17,$16 # Maj(a,b,c) + addu $15,$18 # +=K[3] + addu $24,$17 + + addu $1,$15 + addu $24,$15 + lwl $13,23($5) + lwr $13,20($5) + srl $17,$12,24 # byte swap(4) + srl $18,$12,8 + andi $19,$12,0xFF00 + sll $12,$12,24 + andi $18,0xFF00 + sll $19,$19,8 + or $12,$17 + or $18,$19 + or $12,$18 + addu $16,$12,$7 # 4 + srl $7,$1,6 + xor $19,$2,$3 + sll $18,$1,7 + and $19,$1 + srl $17,$1,11 + xor $7,$18 + sll $18,$1,21 + xor $7,$17 + srl $17,$1,25 + xor $7,$18 + sll $18,$1,26 + xor $7,$17 + xor $19,$3 # Ch(e,f,g) + xor $17,$18,$7 # Sigma1(e) + + srl $7,$24,2 + addu $16,$19 + lw $19,16($6) # K[4] + sll $18,$24,10 + addu $16,$17 + srl $17,$24,13 + xor $7,$18 + sll $18,$24,19 + xor $7,$17 + srl $17,$24,22 + xor $7,$18 + sll $18,$24,30 + xor $7,$17 + sw $12,16($29) # offload to ring buffer + xor $7,$18 # Sigma0(a) + + or $17,$24,$25 + and $18,$24,$25 + and $17,$30 + or $18,$17 # Maj(a,b,c) + addu $16,$19 # +=K[4] + addu $7,$18 + + addu $31,$16 + addu $7,$16 + lwl $14,27($5) + lwr $14,24($5) + srl $18,$13,24 # byte swap(5) + srl $19,$13,8 + andi $20,$13,0xFF00 + sll $13,$13,24 + andi $19,0xFF00 + sll $20,$20,8 + or $13,$18 + or $19,$20 + or $13,$19 + addu $17,$13,$3 # 5 + srl $3,$31,6 + xor $20,$1,$2 + sll $19,$31,7 + and $20,$31 + srl $18,$31,11 + xor $3,$19 + sll $19,$31,21 + xor $3,$18 + srl $18,$31,25 + xor $3,$19 + sll $19,$31,26 + xor $3,$18 + xor $20,$2 # Ch(e,f,g) + xor $18,$19,$3 # Sigma1(e) + + srl $3,$7,2 + addu $17,$20 + lw $20,20($6) # K[5] + sll $19,$7,10 + addu $17,$18 + srl $18,$7,13 + xor $3,$19 + sll $19,$7,19 + xor $3,$18 + srl $18,$7,22 + xor $3,$19 + sll $19,$7,30 + xor $3,$18 + sw $13,20($29) # offload to ring buffer + xor $3,$19 # Sigma0(a) + + or $18,$7,$24 + and $19,$7,$24 + and $18,$25 + or $19,$18 # Maj(a,b,c) + addu $17,$20 # +=K[5] + addu $3,$19 + + addu $30,$17 + addu $3,$17 + lwl $15,31($5) + lwr $15,28($5) + srl $19,$14,24 # byte swap(6) + srl $20,$14,8 + andi $21,$14,0xFF00 + sll $14,$14,24 + andi $20,0xFF00 + sll $21,$21,8 + or $14,$19 + or $20,$21 + or $14,$20 + addu $18,$14,$2 # 6 + srl $2,$30,6 + xor $21,$31,$1 + sll $20,$30,7 + and $21,$30 + srl $19,$30,11 + xor $2,$20 + sll $20,$30,21 + xor $2,$19 + srl $19,$30,25 + xor $2,$20 + sll $20,$30,26 + xor $2,$19 + xor $21,$1 # Ch(e,f,g) + xor $19,$20,$2 # Sigma1(e) + + srl $2,$3,2 + addu $18,$21 + lw $21,24($6) # K[6] + sll $20,$3,10 + addu $18,$19 + srl $19,$3,13 + xor $2,$20 + sll $20,$3,19 + xor $2,$19 + srl $19,$3,22 + xor $2,$20 + sll $20,$3,30 + xor $2,$19 + sw $14,24($29) # offload to ring buffer + xor $2,$20 # Sigma0(a) + + or $19,$3,$7 + and $20,$3,$7 + and $19,$24 + or $20,$19 # Maj(a,b,c) + addu $18,$21 # +=K[6] + addu $2,$20 + + addu $25,$18 + addu $2,$18 + lwl $16,35($5) + lwr $16,32($5) + srl $20,$15,24 # byte swap(7) + srl $21,$15,8 + andi $22,$15,0xFF00 + sll $15,$15,24 + andi $21,0xFF00 + sll $22,$22,8 + or $15,$20 + or $21,$22 + or $15,$21 + addu $19,$15,$1 # 7 + srl $1,$25,6 + xor $22,$30,$31 + sll $21,$25,7 + and $22,$25 + srl $20,$25,11 + xor $1,$21 + sll $21,$25,21 + xor $1,$20 + srl $20,$25,25 + xor $1,$21 + sll $21,$25,26 + xor $1,$20 + xor $22,$31 # Ch(e,f,g) + xor $20,$21,$1 # Sigma1(e) + + srl $1,$2,2 + addu $19,$22 + lw $22,28($6) # K[7] + sll $21,$2,10 + addu $19,$20 + srl $20,$2,13 + xor $1,$21 + sll $21,$2,19 + xor $1,$20 + srl $20,$2,22 + xor $1,$21 + sll $21,$2,30 + xor $1,$20 + sw $15,28($29) # offload to ring buffer + xor $1,$21 # Sigma0(a) + + or $20,$2,$3 + and $21,$2,$3 + and $20,$7 + or $21,$20 # Maj(a,b,c) + addu $19,$22 # +=K[7] + addu $1,$21 + + addu $24,$19 + addu $1,$19 + lwl $17,39($5) + lwr $17,36($5) + srl $21,$16,24 # byte swap(8) + srl $22,$16,8 + andi $23,$16,0xFF00 + sll $16,$16,24 + andi $22,0xFF00 + sll $23,$23,8 + or $16,$21 + or $22,$23 + or $16,$22 + addu $20,$16,$31 # 8 + srl $31,$24,6 + xor $23,$25,$30 + sll $22,$24,7 + and $23,$24 + srl $21,$24,11 + xor $31,$22 + sll $22,$24,21 + xor $31,$21 + srl $21,$24,25 + xor $31,$22 + sll $22,$24,26 + xor $31,$21 + xor $23,$30 # Ch(e,f,g) + xor $21,$22,$31 # Sigma1(e) + + srl $31,$1,2 + addu $20,$23 + lw $23,32($6) # K[8] + sll $22,$1,10 + addu $20,$21 + srl $21,$1,13 + xor $31,$22 + sll $22,$1,19 + xor $31,$21 + srl $21,$1,22 + xor $31,$22 + sll $22,$1,30 + xor $31,$21 + sw $16,32($29) # offload to ring buffer + xor $31,$22 # Sigma0(a) + + or $21,$1,$2 + and $22,$1,$2 + and $21,$3 + or $22,$21 # Maj(a,b,c) + addu $20,$23 # +=K[8] + addu $31,$22 + + addu $7,$20 + addu $31,$20 + lwl $18,43($5) + lwr $18,40($5) + srl $22,$17,24 # byte swap(9) + srl $23,$17,8 + andi $8,$17,0xFF00 + sll $17,$17,24 + andi $23,0xFF00 + sll $8,$8,8 + or $17,$22 + or $23,$8 + or $17,$23 + addu $21,$17,$30 # 9 + srl $30,$7,6 + xor $8,$24,$25 + sll $23,$7,7 + and $8,$7 + srl $22,$7,11 + xor $30,$23 + sll $23,$7,21 + xor $30,$22 + srl $22,$7,25 + xor $30,$23 + sll $23,$7,26 + xor $30,$22 + xor $8,$25 # Ch(e,f,g) + xor $22,$23,$30 # Sigma1(e) + + srl $30,$31,2 + addu $21,$8 + lw $8,36($6) # K[9] + sll $23,$31,10 + addu $21,$22 + srl $22,$31,13 + xor $30,$23 + sll $23,$31,19 + xor $30,$22 + srl $22,$31,22 + xor $30,$23 + sll $23,$31,30 + xor $30,$22 + sw $17,36($29) # offload to ring buffer + xor $30,$23 # Sigma0(a) + + or $22,$31,$1 + and $23,$31,$1 + and $22,$2 + or $23,$22 # Maj(a,b,c) + addu $21,$8 # +=K[9] + addu $30,$23 + + addu $3,$21 + addu $30,$21 + lwl $19,47($5) + lwr $19,44($5) + srl $23,$18,24 # byte swap(10) + srl $8,$18,8 + andi $9,$18,0xFF00 + sll $18,$18,24 + andi $8,0xFF00 + sll $9,$9,8 + or $18,$23 + or $8,$9 + or $18,$8 + addu $22,$18,$25 # 10 + srl $25,$3,6 + xor $9,$7,$24 + sll $8,$3,7 + and $9,$3 + srl $23,$3,11 + xor $25,$8 + sll $8,$3,21 + xor $25,$23 + srl $23,$3,25 + xor $25,$8 + sll $8,$3,26 + xor $25,$23 + xor $9,$24 # Ch(e,f,g) + xor $23,$8,$25 # Sigma1(e) + + srl $25,$30,2 + addu $22,$9 + lw $9,40($6) # K[10] + sll $8,$30,10 + addu $22,$23 + srl $23,$30,13 + xor $25,$8 + sll $8,$30,19 + xor $25,$23 + srl $23,$30,22 + xor $25,$8 + sll $8,$30,30 + xor $25,$23 + sw $18,40($29) # offload to ring buffer + xor $25,$8 # Sigma0(a) + + or $23,$30,$31 + and $8,$30,$31 + and $23,$1 + or $8,$23 # Maj(a,b,c) + addu $22,$9 # +=K[10] + addu $25,$8 + + addu $2,$22 + addu $25,$22 + lwl $20,51($5) + lwr $20,48($5) + srl $8,$19,24 # byte swap(11) + srl $9,$19,8 + andi $10,$19,0xFF00 + sll $19,$19,24 + andi $9,0xFF00 + sll $10,$10,8 + or $19,$8 + or $9,$10 + or $19,$9 + addu $23,$19,$24 # 11 + srl $24,$2,6 + xor $10,$3,$7 + sll $9,$2,7 + and $10,$2 + srl $8,$2,11 + xor $24,$9 + sll $9,$2,21 + xor $24,$8 + srl $8,$2,25 + xor $24,$9 + sll $9,$2,26 + xor $24,$8 + xor $10,$7 # Ch(e,f,g) + xor $8,$9,$24 # Sigma1(e) + + srl $24,$25,2 + addu $23,$10 + lw $10,44($6) # K[11] + sll $9,$25,10 + addu $23,$8 + srl $8,$25,13 + xor $24,$9 + sll $9,$25,19 + xor $24,$8 + srl $8,$25,22 + xor $24,$9 + sll $9,$25,30 + xor $24,$8 + sw $19,44($29) # offload to ring buffer + xor $24,$9 # Sigma0(a) + + or $8,$25,$30 + and $9,$25,$30 + and $8,$31 + or $9,$8 # Maj(a,b,c) + addu $23,$10 # +=K[11] + addu $24,$9 + + addu $1,$23 + addu $24,$23 + lwl $21,55($5) + lwr $21,52($5) + srl $9,$20,24 # byte swap(12) + srl $10,$20,8 + andi $11,$20,0xFF00 + sll $20,$20,24 + andi $10,0xFF00 + sll $11,$11,8 + or $20,$9 + or $10,$11 + or $20,$10 + addu $8,$20,$7 # 12 + srl $7,$1,6 + xor $11,$2,$3 + sll $10,$1,7 + and $11,$1 + srl $9,$1,11 + xor $7,$10 + sll $10,$1,21 + xor $7,$9 + srl $9,$1,25 + xor $7,$10 + sll $10,$1,26 + xor $7,$9 + xor $11,$3 # Ch(e,f,g) + xor $9,$10,$7 # Sigma1(e) + + srl $7,$24,2 + addu $8,$11 + lw $11,48($6) # K[12] + sll $10,$24,10 + addu $8,$9 + srl $9,$24,13 + xor $7,$10 + sll $10,$24,19 + xor $7,$9 + srl $9,$24,22 + xor $7,$10 + sll $10,$24,30 + xor $7,$9 + sw $20,48($29) # offload to ring buffer + xor $7,$10 # Sigma0(a) + + or $9,$24,$25 + and $10,$24,$25 + and $9,$30 + or $10,$9 # Maj(a,b,c) + addu $8,$11 # +=K[12] + addu $7,$10 + + addu $31,$8 + addu $7,$8 + lwl $22,59($5) + lwr $22,56($5) + srl $10,$21,24 # byte swap(13) + srl $11,$21,8 + andi $12,$21,0xFF00 + sll $21,$21,24 + andi $11,0xFF00 + sll $12,$12,8 + or $21,$10 + or $11,$12 + or $21,$11 + addu $9,$21,$3 # 13 + srl $3,$31,6 + xor $12,$1,$2 + sll $11,$31,7 + and $12,$31 + srl $10,$31,11 + xor $3,$11 + sll $11,$31,21 + xor $3,$10 + srl $10,$31,25 + xor $3,$11 + sll $11,$31,26 + xor $3,$10 + xor $12,$2 # Ch(e,f,g) + xor $10,$11,$3 # Sigma1(e) + + srl $3,$7,2 + addu $9,$12 + lw $12,52($6) # K[13] + sll $11,$7,10 + addu $9,$10 + srl $10,$7,13 + xor $3,$11 + sll $11,$7,19 + xor $3,$10 + srl $10,$7,22 + xor $3,$11 + sll $11,$7,30 + xor $3,$10 + sw $21,52($29) # offload to ring buffer + xor $3,$11 # Sigma0(a) + + or $10,$7,$24 + and $11,$7,$24 + and $10,$25 + or $11,$10 # Maj(a,b,c) + addu $9,$12 # +=K[13] + addu $3,$11 + + addu $30,$9 + addu $3,$9 + lw $8,0($29) # prefetch from ring buffer + lwl $23,63($5) + lwr $23,60($5) + srl $11,$22,24 # byte swap(14) + srl $12,$22,8 + andi $13,$22,0xFF00 + sll $22,$22,24 + andi $12,0xFF00 + sll $13,$13,8 + or $22,$11 + or $12,$13 + or $22,$12 + addu $10,$22,$2 # 14 + srl $2,$30,6 + xor $13,$31,$1 + sll $12,$30,7 + and $13,$30 + srl $11,$30,11 + xor $2,$12 + sll $12,$30,21 + xor $2,$11 + srl $11,$30,25 + xor $2,$12 + sll $12,$30,26 + xor $2,$11 + xor $13,$1 # Ch(e,f,g) + xor $11,$12,$2 # Sigma1(e) + + srl $2,$3,2 + addu $10,$13 + lw $13,56($6) # K[14] + sll $12,$3,10 + addu $10,$11 + srl $11,$3,13 + xor $2,$12 + sll $12,$3,19 + xor $2,$11 + srl $11,$3,22 + xor $2,$12 + sll $12,$3,30 + xor $2,$11 + sw $22,56($29) # offload to ring buffer + xor $2,$12 # Sigma0(a) + + or $11,$3,$7 + and $12,$3,$7 + and $11,$24 + or $12,$11 # Maj(a,b,c) + addu $10,$13 # +=K[14] + addu $2,$12 + + addu $25,$10 + addu $2,$10 + lw $9,4($29) # prefetch from ring buffer + srl $12,$23,24 # byte swap(15) + srl $13,$23,8 + andi $14,$23,0xFF00 + sll $23,$23,24 + andi $13,0xFF00 + sll $14,$14,8 + or $23,$12 + or $13,$14 + or $23,$13 + addu $11,$23,$1 # 15 + srl $1,$25,6 + xor $14,$30,$31 + sll $13,$25,7 + and $14,$25 + srl $12,$25,11 + xor $1,$13 + sll $13,$25,21 + xor $1,$12 + srl $12,$25,25 + xor $1,$13 + sll $13,$25,26 + xor $1,$12 + xor $14,$31 # Ch(e,f,g) + xor $12,$13,$1 # Sigma1(e) + + srl $1,$2,2 + addu $11,$14 + lw $14,60($6) # K[15] + sll $13,$2,10 + addu $11,$12 + srl $12,$2,13 + xor $1,$13 + sll $13,$2,19 + xor $1,$12 + srl $12,$2,22 + xor $1,$13 + sll $13,$2,30 + xor $1,$12 + sw $23,60($29) # offload to ring buffer + xor $1,$13 # Sigma0(a) + + or $12,$2,$3 + and $13,$2,$3 + and $12,$7 + or $13,$12 # Maj(a,b,c) + addu $11,$14 # +=K[15] + addu $1,$13 + + addu $24,$11 + addu $1,$11 + lw $10,8($29) # prefetch from ring buffer + b .L16_xx +.align 4 +.L16_xx: + srl $14,$9,3 # Xupdate(16) + addu $8,$17 # +=X[i+9] + sll $13,$9,14 + srl $12,$9,7 + xor $14,$13 + sll $13,11 + xor $14,$12 + srl $12,$9,18 + xor $14,$13 + + srl $15,$22,10 + xor $14,$12 # sigma0(X[i+1]) + sll $13,$22,13 + addu $8,$14 + srl $12,$22,17 + xor $15,$13 + sll $13,2 + xor $15,$12 + srl $12,$22,19 + xor $15,$13 + + xor $15,$12 # sigma1(X[i+14]) + addu $8,$15 + addu $12,$8,$31 # 16 + srl $31,$24,6 + xor $15,$25,$30 + sll $14,$24,7 + and $15,$24 + srl $13,$24,11 + xor $31,$14 + sll $14,$24,21 + xor $31,$13 + srl $13,$24,25 + xor $31,$14 + sll $14,$24,26 + xor $31,$13 + xor $15,$30 # Ch(e,f,g) + xor $13,$14,$31 # Sigma1(e) + + srl $31,$1,2 + addu $12,$15 + lw $15,64($6) # K[16] + sll $14,$1,10 + addu $12,$13 + srl $13,$1,13 + xor $31,$14 + sll $14,$1,19 + xor $31,$13 + srl $13,$1,22 + xor $31,$14 + sll $14,$1,30 + xor $31,$13 + sw $8,0($29) # offload to ring buffer + xor $31,$14 # Sigma0(a) + + or $13,$1,$2 + and $14,$1,$2 + and $13,$3 + or $14,$13 # Maj(a,b,c) + addu $12,$15 # +=K[16] + addu $31,$14 + + addu $7,$12 + addu $31,$12 + lw $11,12($29) # prefetch from ring buffer + srl $15,$10,3 # Xupdate(17) + addu $9,$18 # +=X[i+9] + sll $14,$10,14 + srl $13,$10,7 + xor $15,$14 + sll $14,11 + xor $15,$13 + srl $13,$10,18 + xor $15,$14 + + srl $16,$23,10 + xor $15,$13 # sigma0(X[i+1]) + sll $14,$23,13 + addu $9,$15 + srl $13,$23,17 + xor $16,$14 + sll $14,2 + xor $16,$13 + srl $13,$23,19 + xor $16,$14 + + xor $16,$13 # sigma1(X[i+14]) + addu $9,$16 + addu $13,$9,$30 # 17 + srl $30,$7,6 + xor $16,$24,$25 + sll $15,$7,7 + and $16,$7 + srl $14,$7,11 + xor $30,$15 + sll $15,$7,21 + xor $30,$14 + srl $14,$7,25 + xor $30,$15 + sll $15,$7,26 + xor $30,$14 + xor $16,$25 # Ch(e,f,g) + xor $14,$15,$30 # Sigma1(e) + + srl $30,$31,2 + addu $13,$16 + lw $16,68($6) # K[17] + sll $15,$31,10 + addu $13,$14 + srl $14,$31,13 + xor $30,$15 + sll $15,$31,19 + xor $30,$14 + srl $14,$31,22 + xor $30,$15 + sll $15,$31,30 + xor $30,$14 + sw $9,4($29) # offload to ring buffer + xor $30,$15 # Sigma0(a) + + or $14,$31,$1 + and $15,$31,$1 + and $14,$2 + or $15,$14 # Maj(a,b,c) + addu $13,$16 # +=K[17] + addu $30,$15 + + addu $3,$13 + addu $30,$13 + lw $12,16($29) # prefetch from ring buffer + srl $16,$11,3 # Xupdate(18) + addu $10,$19 # +=X[i+9] + sll $15,$11,14 + srl $14,$11,7 + xor $16,$15 + sll $15,11 + xor $16,$14 + srl $14,$11,18 + xor $16,$15 + + srl $17,$8,10 + xor $16,$14 # sigma0(X[i+1]) + sll $15,$8,13 + addu $10,$16 + srl $14,$8,17 + xor $17,$15 + sll $15,2 + xor $17,$14 + srl $14,$8,19 + xor $17,$15 + + xor $17,$14 # sigma1(X[i+14]) + addu $10,$17 + addu $14,$10,$25 # 18 + srl $25,$3,6 + xor $17,$7,$24 + sll $16,$3,7 + and $17,$3 + srl $15,$3,11 + xor $25,$16 + sll $16,$3,21 + xor $25,$15 + srl $15,$3,25 + xor $25,$16 + sll $16,$3,26 + xor $25,$15 + xor $17,$24 # Ch(e,f,g) + xor $15,$16,$25 # Sigma1(e) + + srl $25,$30,2 + addu $14,$17 + lw $17,72($6) # K[18] + sll $16,$30,10 + addu $14,$15 + srl $15,$30,13 + xor $25,$16 + sll $16,$30,19 + xor $25,$15 + srl $15,$30,22 + xor $25,$16 + sll $16,$30,30 + xor $25,$15 + sw $10,8($29) # offload to ring buffer + xor $25,$16 # Sigma0(a) + + or $15,$30,$31 + and $16,$30,$31 + and $15,$1 + or $16,$15 # Maj(a,b,c) + addu $14,$17 # +=K[18] + addu $25,$16 + + addu $2,$14 + addu $25,$14 + lw $13,20($29) # prefetch from ring buffer + srl $17,$12,3 # Xupdate(19) + addu $11,$20 # +=X[i+9] + sll $16,$12,14 + srl $15,$12,7 + xor $17,$16 + sll $16,11 + xor $17,$15 + srl $15,$12,18 + xor $17,$16 + + srl $18,$9,10 + xor $17,$15 # sigma0(X[i+1]) + sll $16,$9,13 + addu $11,$17 + srl $15,$9,17 + xor $18,$16 + sll $16,2 + xor $18,$15 + srl $15,$9,19 + xor $18,$16 + + xor $18,$15 # sigma1(X[i+14]) + addu $11,$18 + addu $15,$11,$24 # 19 + srl $24,$2,6 + xor $18,$3,$7 + sll $17,$2,7 + and $18,$2 + srl $16,$2,11 + xor $24,$17 + sll $17,$2,21 + xor $24,$16 + srl $16,$2,25 + xor $24,$17 + sll $17,$2,26 + xor $24,$16 + xor $18,$7 # Ch(e,f,g) + xor $16,$17,$24 # Sigma1(e) + + srl $24,$25,2 + addu $15,$18 + lw $18,76($6) # K[19] + sll $17,$25,10 + addu $15,$16 + srl $16,$25,13 + xor $24,$17 + sll $17,$25,19 + xor $24,$16 + srl $16,$25,22 + xor $24,$17 + sll $17,$25,30 + xor $24,$16 + sw $11,12($29) # offload to ring buffer + xor $24,$17 # Sigma0(a) + + or $16,$25,$30 + and $17,$25,$30 + and $16,$31 + or $17,$16 # Maj(a,b,c) + addu $15,$18 # +=K[19] + addu $24,$17 + + addu $1,$15 + addu $24,$15 + lw $14,24($29) # prefetch from ring buffer + srl $18,$13,3 # Xupdate(20) + addu $12,$21 # +=X[i+9] + sll $17,$13,14 + srl $16,$13,7 + xor $18,$17 + sll $17,11 + xor $18,$16 + srl $16,$13,18 + xor $18,$17 + + srl $19,$10,10 + xor $18,$16 # sigma0(X[i+1]) + sll $17,$10,13 + addu $12,$18 + srl $16,$10,17 + xor $19,$17 + sll $17,2 + xor $19,$16 + srl $16,$10,19 + xor $19,$17 + + xor $19,$16 # sigma1(X[i+14]) + addu $12,$19 + addu $16,$12,$7 # 20 + srl $7,$1,6 + xor $19,$2,$3 + sll $18,$1,7 + and $19,$1 + srl $17,$1,11 + xor $7,$18 + sll $18,$1,21 + xor $7,$17 + srl $17,$1,25 + xor $7,$18 + sll $18,$1,26 + xor $7,$17 + xor $19,$3 # Ch(e,f,g) + xor $17,$18,$7 # Sigma1(e) + + srl $7,$24,2 + addu $16,$19 + lw $19,80($6) # K[20] + sll $18,$24,10 + addu $16,$17 + srl $17,$24,13 + xor $7,$18 + sll $18,$24,19 + xor $7,$17 + srl $17,$24,22 + xor $7,$18 + sll $18,$24,30 + xor $7,$17 + sw $12,16($29) # offload to ring buffer + xor $7,$18 # Sigma0(a) + + or $17,$24,$25 + and $18,$24,$25 + and $17,$30 + or $18,$17 # Maj(a,b,c) + addu $16,$19 # +=K[20] + addu $7,$18 + + addu $31,$16 + addu $7,$16 + lw $15,28($29) # prefetch from ring buffer + srl $19,$14,3 # Xupdate(21) + addu $13,$22 # +=X[i+9] + sll $18,$14,14 + srl $17,$14,7 + xor $19,$18 + sll $18,11 + xor $19,$17 + srl $17,$14,18 + xor $19,$18 + + srl $20,$11,10 + xor $19,$17 # sigma0(X[i+1]) + sll $18,$11,13 + addu $13,$19 + srl $17,$11,17 + xor $20,$18 + sll $18,2 + xor $20,$17 + srl $17,$11,19 + xor $20,$18 + + xor $20,$17 # sigma1(X[i+14]) + addu $13,$20 + addu $17,$13,$3 # 21 + srl $3,$31,6 + xor $20,$1,$2 + sll $19,$31,7 + and $20,$31 + srl $18,$31,11 + xor $3,$19 + sll $19,$31,21 + xor $3,$18 + srl $18,$31,25 + xor $3,$19 + sll $19,$31,26 + xor $3,$18 + xor $20,$2 # Ch(e,f,g) + xor $18,$19,$3 # Sigma1(e) + + srl $3,$7,2 + addu $17,$20 + lw $20,84($6) # K[21] + sll $19,$7,10 + addu $17,$18 + srl $18,$7,13 + xor $3,$19 + sll $19,$7,19 + xor $3,$18 + srl $18,$7,22 + xor $3,$19 + sll $19,$7,30 + xor $3,$18 + sw $13,20($29) # offload to ring buffer + xor $3,$19 # Sigma0(a) + + or $18,$7,$24 + and $19,$7,$24 + and $18,$25 + or $19,$18 # Maj(a,b,c) + addu $17,$20 # +=K[21] + addu $3,$19 + + addu $30,$17 + addu $3,$17 + lw $16,32($29) # prefetch from ring buffer + srl $20,$15,3 # Xupdate(22) + addu $14,$23 # +=X[i+9] + sll $19,$15,14 + srl $18,$15,7 + xor $20,$19 + sll $19,11 + xor $20,$18 + srl $18,$15,18 + xor $20,$19 + + srl $21,$12,10 + xor $20,$18 # sigma0(X[i+1]) + sll $19,$12,13 + addu $14,$20 + srl $18,$12,17 + xor $21,$19 + sll $19,2 + xor $21,$18 + srl $18,$12,19 + xor $21,$19 + + xor $21,$18 # sigma1(X[i+14]) + addu $14,$21 + addu $18,$14,$2 # 22 + srl $2,$30,6 + xor $21,$31,$1 + sll $20,$30,7 + and $21,$30 + srl $19,$30,11 + xor $2,$20 + sll $20,$30,21 + xor $2,$19 + srl $19,$30,25 + xor $2,$20 + sll $20,$30,26 + xor $2,$19 + xor $21,$1 # Ch(e,f,g) + xor $19,$20,$2 # Sigma1(e) + + srl $2,$3,2 + addu $18,$21 + lw $21,88($6) # K[22] + sll $20,$3,10 + addu $18,$19 + srl $19,$3,13 + xor $2,$20 + sll $20,$3,19 + xor $2,$19 + srl $19,$3,22 + xor $2,$20 + sll $20,$3,30 + xor $2,$19 + sw $14,24($29) # offload to ring buffer + xor $2,$20 # Sigma0(a) + + or $19,$3,$7 + and $20,$3,$7 + and $19,$24 + or $20,$19 # Maj(a,b,c) + addu $18,$21 # +=K[22] + addu $2,$20 + + addu $25,$18 + addu $2,$18 + lw $17,36($29) # prefetch from ring buffer + srl $21,$16,3 # Xupdate(23) + addu $15,$8 # +=X[i+9] + sll $20,$16,14 + srl $19,$16,7 + xor $21,$20 + sll $20,11 + xor $21,$19 + srl $19,$16,18 + xor $21,$20 + + srl $22,$13,10 + xor $21,$19 # sigma0(X[i+1]) + sll $20,$13,13 + addu $15,$21 + srl $19,$13,17 + xor $22,$20 + sll $20,2 + xor $22,$19 + srl $19,$13,19 + xor $22,$20 + + xor $22,$19 # sigma1(X[i+14]) + addu $15,$22 + addu $19,$15,$1 # 23 + srl $1,$25,6 + xor $22,$30,$31 + sll $21,$25,7 + and $22,$25 + srl $20,$25,11 + xor $1,$21 + sll $21,$25,21 + xor $1,$20 + srl $20,$25,25 + xor $1,$21 + sll $21,$25,26 + xor $1,$20 + xor $22,$31 # Ch(e,f,g) + xor $20,$21,$1 # Sigma1(e) + + srl $1,$2,2 + addu $19,$22 + lw $22,92($6) # K[23] + sll $21,$2,10 + addu $19,$20 + srl $20,$2,13 + xor $1,$21 + sll $21,$2,19 + xor $1,$20 + srl $20,$2,22 + xor $1,$21 + sll $21,$2,30 + xor $1,$20 + sw $15,28($29) # offload to ring buffer + xor $1,$21 # Sigma0(a) + + or $20,$2,$3 + and $21,$2,$3 + and $20,$7 + or $21,$20 # Maj(a,b,c) + addu $19,$22 # +=K[23] + addu $1,$21 + + addu $24,$19 + addu $1,$19 + lw $18,40($29) # prefetch from ring buffer + srl $22,$17,3 # Xupdate(24) + addu $16,$9 # +=X[i+9] + sll $21,$17,14 + srl $20,$17,7 + xor $22,$21 + sll $21,11 + xor $22,$20 + srl $20,$17,18 + xor $22,$21 + + srl $23,$14,10 + xor $22,$20 # sigma0(X[i+1]) + sll $21,$14,13 + addu $16,$22 + srl $20,$14,17 + xor $23,$21 + sll $21,2 + xor $23,$20 + srl $20,$14,19 + xor $23,$21 + + xor $23,$20 # sigma1(X[i+14]) + addu $16,$23 + addu $20,$16,$31 # 24 + srl $31,$24,6 + xor $23,$25,$30 + sll $22,$24,7 + and $23,$24 + srl $21,$24,11 + xor $31,$22 + sll $22,$24,21 + xor $31,$21 + srl $21,$24,25 + xor $31,$22 + sll $22,$24,26 + xor $31,$21 + xor $23,$30 # Ch(e,f,g) + xor $21,$22,$31 # Sigma1(e) + + srl $31,$1,2 + addu $20,$23 + lw $23,96($6) # K[24] + sll $22,$1,10 + addu $20,$21 + srl $21,$1,13 + xor $31,$22 + sll $22,$1,19 + xor $31,$21 + srl $21,$1,22 + xor $31,$22 + sll $22,$1,30 + xor $31,$21 + sw $16,32($29) # offload to ring buffer + xor $31,$22 # Sigma0(a) + + or $21,$1,$2 + and $22,$1,$2 + and $21,$3 + or $22,$21 # Maj(a,b,c) + addu $20,$23 # +=K[24] + addu $31,$22 + + addu $7,$20 + addu $31,$20 + lw $19,44($29) # prefetch from ring buffer + srl $23,$18,3 # Xupdate(25) + addu $17,$10 # +=X[i+9] + sll $22,$18,14 + srl $21,$18,7 + xor $23,$22 + sll $22,11 + xor $23,$21 + srl $21,$18,18 + xor $23,$22 + + srl $8,$15,10 + xor $23,$21 # sigma0(X[i+1]) + sll $22,$15,13 + addu $17,$23 + srl $21,$15,17 + xor $8,$22 + sll $22,2 + xor $8,$21 + srl $21,$15,19 + xor $8,$22 + + xor $8,$21 # sigma1(X[i+14]) + addu $17,$8 + addu $21,$17,$30 # 25 + srl $30,$7,6 + xor $8,$24,$25 + sll $23,$7,7 + and $8,$7 + srl $22,$7,11 + xor $30,$23 + sll $23,$7,21 + xor $30,$22 + srl $22,$7,25 + xor $30,$23 + sll $23,$7,26 + xor $30,$22 + xor $8,$25 # Ch(e,f,g) + xor $22,$23,$30 # Sigma1(e) + + srl $30,$31,2 + addu $21,$8 + lw $8,100($6) # K[25] + sll $23,$31,10 + addu $21,$22 + srl $22,$31,13 + xor $30,$23 + sll $23,$31,19 + xor $30,$22 + srl $22,$31,22 + xor $30,$23 + sll $23,$31,30 + xor $30,$22 + sw $17,36($29) # offload to ring buffer + xor $30,$23 # Sigma0(a) + + or $22,$31,$1 + and $23,$31,$1 + and $22,$2 + or $23,$22 # Maj(a,b,c) + addu $21,$8 # +=K[25] + addu $30,$23 + + addu $3,$21 + addu $30,$21 + lw $20,48($29) # prefetch from ring buffer + srl $8,$19,3 # Xupdate(26) + addu $18,$11 # +=X[i+9] + sll $23,$19,14 + srl $22,$19,7 + xor $8,$23 + sll $23,11 + xor $8,$22 + srl $22,$19,18 + xor $8,$23 + + srl $9,$16,10 + xor $8,$22 # sigma0(X[i+1]) + sll $23,$16,13 + addu $18,$8 + srl $22,$16,17 + xor $9,$23 + sll $23,2 + xor $9,$22 + srl $22,$16,19 + xor $9,$23 + + xor $9,$22 # sigma1(X[i+14]) + addu $18,$9 + addu $22,$18,$25 # 26 + srl $25,$3,6 + xor $9,$7,$24 + sll $8,$3,7 + and $9,$3 + srl $23,$3,11 + xor $25,$8 + sll $8,$3,21 + xor $25,$23 + srl $23,$3,25 + xor $25,$8 + sll $8,$3,26 + xor $25,$23 + xor $9,$24 # Ch(e,f,g) + xor $23,$8,$25 # Sigma1(e) + + srl $25,$30,2 + addu $22,$9 + lw $9,104($6) # K[26] + sll $8,$30,10 + addu $22,$23 + srl $23,$30,13 + xor $25,$8 + sll $8,$30,19 + xor $25,$23 + srl $23,$30,22 + xor $25,$8 + sll $8,$30,30 + xor $25,$23 + sw $18,40($29) # offload to ring buffer + xor $25,$8 # Sigma0(a) + + or $23,$30,$31 + and $8,$30,$31 + and $23,$1 + or $8,$23 # Maj(a,b,c) + addu $22,$9 # +=K[26] + addu $25,$8 + + addu $2,$22 + addu $25,$22 + lw $21,52($29) # prefetch from ring buffer + srl $9,$20,3 # Xupdate(27) + addu $19,$12 # +=X[i+9] + sll $8,$20,14 + srl $23,$20,7 + xor $9,$8 + sll $8,11 + xor $9,$23 + srl $23,$20,18 + xor $9,$8 + + srl $10,$17,10 + xor $9,$23 # sigma0(X[i+1]) + sll $8,$17,13 + addu $19,$9 + srl $23,$17,17 + xor $10,$8 + sll $8,2 + xor $10,$23 + srl $23,$17,19 + xor $10,$8 + + xor $10,$23 # sigma1(X[i+14]) + addu $19,$10 + addu $23,$19,$24 # 27 + srl $24,$2,6 + xor $10,$3,$7 + sll $9,$2,7 + and $10,$2 + srl $8,$2,11 + xor $24,$9 + sll $9,$2,21 + xor $24,$8 + srl $8,$2,25 + xor $24,$9 + sll $9,$2,26 + xor $24,$8 + xor $10,$7 # Ch(e,f,g) + xor $8,$9,$24 # Sigma1(e) + + srl $24,$25,2 + addu $23,$10 + lw $10,108($6) # K[27] + sll $9,$25,10 + addu $23,$8 + srl $8,$25,13 + xor $24,$9 + sll $9,$25,19 + xor $24,$8 + srl $8,$25,22 + xor $24,$9 + sll $9,$25,30 + xor $24,$8 + sw $19,44($29) # offload to ring buffer + xor $24,$9 # Sigma0(a) + + or $8,$25,$30 + and $9,$25,$30 + and $8,$31 + or $9,$8 # Maj(a,b,c) + addu $23,$10 # +=K[27] + addu $24,$9 + + addu $1,$23 + addu $24,$23 + lw $22,56($29) # prefetch from ring buffer + srl $10,$21,3 # Xupdate(28) + addu $20,$13 # +=X[i+9] + sll $9,$21,14 + srl $8,$21,7 + xor $10,$9 + sll $9,11 + xor $10,$8 + srl $8,$21,18 + xor $10,$9 + + srl $11,$18,10 + xor $10,$8 # sigma0(X[i+1]) + sll $9,$18,13 + addu $20,$10 + srl $8,$18,17 + xor $11,$9 + sll $9,2 + xor $11,$8 + srl $8,$18,19 + xor $11,$9 + + xor $11,$8 # sigma1(X[i+14]) + addu $20,$11 + addu $8,$20,$7 # 28 + srl $7,$1,6 + xor $11,$2,$3 + sll $10,$1,7 + and $11,$1 + srl $9,$1,11 + xor $7,$10 + sll $10,$1,21 + xor $7,$9 + srl $9,$1,25 + xor $7,$10 + sll $10,$1,26 + xor $7,$9 + xor $11,$3 # Ch(e,f,g) + xor $9,$10,$7 # Sigma1(e) + + srl $7,$24,2 + addu $8,$11 + lw $11,112($6) # K[28] + sll $10,$24,10 + addu $8,$9 + srl $9,$24,13 + xor $7,$10 + sll $10,$24,19 + xor $7,$9 + srl $9,$24,22 + xor $7,$10 + sll $10,$24,30 + xor $7,$9 + sw $20,48($29) # offload to ring buffer + xor $7,$10 # Sigma0(a) + + or $9,$24,$25 + and $10,$24,$25 + and $9,$30 + or $10,$9 # Maj(a,b,c) + addu $8,$11 # +=K[28] + addu $7,$10 + + addu $31,$8 + addu $7,$8 + lw $23,60($29) # prefetch from ring buffer + srl $11,$22,3 # Xupdate(29) + addu $21,$14 # +=X[i+9] + sll $10,$22,14 + srl $9,$22,7 + xor $11,$10 + sll $10,11 + xor $11,$9 + srl $9,$22,18 + xor $11,$10 + + srl $12,$19,10 + xor $11,$9 # sigma0(X[i+1]) + sll $10,$19,13 + addu $21,$11 + srl $9,$19,17 + xor $12,$10 + sll $10,2 + xor $12,$9 + srl $9,$19,19 + xor $12,$10 + + xor $12,$9 # sigma1(X[i+14]) + addu $21,$12 + addu $9,$21,$3 # 29 + srl $3,$31,6 + xor $12,$1,$2 + sll $11,$31,7 + and $12,$31 + srl $10,$31,11 + xor $3,$11 + sll $11,$31,21 + xor $3,$10 + srl $10,$31,25 + xor $3,$11 + sll $11,$31,26 + xor $3,$10 + xor $12,$2 # Ch(e,f,g) + xor $10,$11,$3 # Sigma1(e) + + srl $3,$7,2 + addu $9,$12 + lw $12,116($6) # K[29] + sll $11,$7,10 + addu $9,$10 + srl $10,$7,13 + xor $3,$11 + sll $11,$7,19 + xor $3,$10 + srl $10,$7,22 + xor $3,$11 + sll $11,$7,30 + xor $3,$10 + sw $21,52($29) # offload to ring buffer + xor $3,$11 # Sigma0(a) + + or $10,$7,$24 + and $11,$7,$24 + and $10,$25 + or $11,$10 # Maj(a,b,c) + addu $9,$12 # +=K[29] + addu $3,$11 + + addu $30,$9 + addu $3,$9 + lw $8,0($29) # prefetch from ring buffer + srl $12,$23,3 # Xupdate(30) + addu $22,$15 # +=X[i+9] + sll $11,$23,14 + srl $10,$23,7 + xor $12,$11 + sll $11,11 + xor $12,$10 + srl $10,$23,18 + xor $12,$11 + + srl $13,$20,10 + xor $12,$10 # sigma0(X[i+1]) + sll $11,$20,13 + addu $22,$12 + srl $10,$20,17 + xor $13,$11 + sll $11,2 + xor $13,$10 + srl $10,$20,19 + xor $13,$11 + + xor $13,$10 # sigma1(X[i+14]) + addu $22,$13 + addu $10,$22,$2 # 30 + srl $2,$30,6 + xor $13,$31,$1 + sll $12,$30,7 + and $13,$30 + srl $11,$30,11 + xor $2,$12 + sll $12,$30,21 + xor $2,$11 + srl $11,$30,25 + xor $2,$12 + sll $12,$30,26 + xor $2,$11 + xor $13,$1 # Ch(e,f,g) + xor $11,$12,$2 # Sigma1(e) + + srl $2,$3,2 + addu $10,$13 + lw $13,120($6) # K[30] + sll $12,$3,10 + addu $10,$11 + srl $11,$3,13 + xor $2,$12 + sll $12,$3,19 + xor $2,$11 + srl $11,$3,22 + xor $2,$12 + sll $12,$3,30 + xor $2,$11 + sw $22,56($29) # offload to ring buffer + xor $2,$12 # Sigma0(a) + + or $11,$3,$7 + and $12,$3,$7 + and $11,$24 + or $12,$11 # Maj(a,b,c) + addu $10,$13 # +=K[30] + addu $2,$12 + + addu $25,$10 + addu $2,$10 + lw $9,4($29) # prefetch from ring buffer + srl $13,$8,3 # Xupdate(31) + addu $23,$16 # +=X[i+9] + sll $12,$8,14 + srl $11,$8,7 + xor $13,$12 + sll $12,11 + xor $13,$11 + srl $11,$8,18 + xor $13,$12 + + srl $14,$21,10 + xor $13,$11 # sigma0(X[i+1]) + sll $12,$21,13 + addu $23,$13 + srl $11,$21,17 + xor $14,$12 + sll $12,2 + xor $14,$11 + srl $11,$21,19 + xor $14,$12 + + xor $14,$11 # sigma1(X[i+14]) + addu $23,$14 + addu $11,$23,$1 # 31 + srl $1,$25,6 + xor $14,$30,$31 + sll $13,$25,7 + and $14,$25 + srl $12,$25,11 + xor $1,$13 + sll $13,$25,21 + xor $1,$12 + srl $12,$25,25 + xor $1,$13 + sll $13,$25,26 + xor $1,$12 + xor $14,$31 # Ch(e,f,g) + xor $12,$13,$1 # Sigma1(e) + + srl $1,$2,2 + addu $11,$14 + lw $14,124($6) # K[31] + sll $13,$2,10 + addu $11,$12 + srl $12,$2,13 + xor $1,$13 + sll $13,$2,19 + xor $1,$12 + srl $12,$2,22 + xor $1,$13 + sll $13,$2,30 + xor $1,$12 + sw $23,60($29) # offload to ring buffer + xor $1,$13 # Sigma0(a) + + or $12,$2,$3 + and $13,$2,$3 + and $12,$7 + or $13,$12 # Maj(a,b,c) + addu $11,$14 # +=K[31] + addu $1,$13 + + addu $24,$11 + addu $1,$11 + lw $10,8($29) # prefetch from ring buffer + and $14,0xfff + li $15,2290 + .set noreorder + bne $14,$15,.L16_xx + add $6,16*4 # Ktbl+=16 + + lw $23,16*4($29) # restore pointer to the end of input + lw $8,0*4($4) + lw $9,1*4($4) + lw $10,2*4($4) + add $5,16*4 + lw $11,3*4($4) + addu $1,$8 + lw $12,4*4($4) + addu $2,$9 + lw $13,5*4($4) + addu $3,$10 + lw $14,6*4($4) + addu $7,$11 + lw $15,7*4($4) + addu $24,$12 + sw $1,0*4($4) + addu $25,$13 + sw $2,1*4($4) + addu $30,$14 + sw $3,2*4($4) + addu $31,$15 + sw $7,3*4($4) + sw $24,4*4($4) + sw $25,5*4($4) + sw $30,6*4($4) + sw $31,7*4($4) + + bne $5,$23,.Loop + sub $6,192 # rewind $6 + + lw $31,128-1*4($29) + lw $30,128-2*4($29) + lw $23,128-3*4($29) + lw $22,128-4*4($29) + lw $21,128-5*4($29) + lw $20,128-6*4($29) + lw $19,128-7*4($29) + lw $18,128-8*4($29) + lw $17,128-9*4($29) + lw $16,128-10*4($29) + jr $31 + add $29,128 +.end sha256_block_data_order + +.rdata +.align 5 +K256: + .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 + .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5 + .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3 + .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174 + .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc + .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da + .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7 + .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967 + .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13 + .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85 + .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3 + .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070 + .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5 + .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3 + .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208 + .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 +.asciiz "SHA256 for MIPS, CRYPTOGAMS by " +.align 5 + diff --git a/app/openssl/crypto/sha/asm/sha256-x86_64.S b/app/openssl/crypto/sha/asm/sha256-x86_64.S new file mode 100644 index 00000000..db5b898f --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha256-x86_64.S @@ -0,0 +1,1778 @@ +.text + +.globl sha256_block_data_order +.type sha256_block_data_order,@function +.align 16 +sha256_block_data_order: + pushq %rbx + pushq %rbp + pushq %r12 + pushq %r13 + pushq %r14 + pushq %r15 + movq %rsp,%r11 + shlq $4,%rdx + subq $64+32,%rsp + leaq (%rsi,%rdx,4),%rdx + andq $-64,%rsp + movq %rdi,64+0(%rsp) + movq %rsi,64+8(%rsp) + movq %rdx,64+16(%rsp) + movq %r11,64+24(%rsp) +.Lprologue: + + leaq K256(%rip),%rbp + + movl 0(%rdi),%eax + movl 4(%rdi),%ebx + movl 8(%rdi),%ecx + movl 12(%rdi),%edx + movl 16(%rdi),%r8d + movl 20(%rdi),%r9d + movl 24(%rdi),%r10d + movl 28(%rdi),%r11d + jmp .Lloop + +.align 16 +.Lloop: + xorq %rdi,%rdi + movl 0(%rsi),%r12d + movl %r8d,%r13d + movl %eax,%r14d + bswapl %r12d + rorl $14,%r13d + movl %r9d,%r15d + movl %r12d,0(%rsp) + + rorl $9,%r14d + xorl %r8d,%r13d + xorl %r10d,%r15d + + rorl $5,%r13d + addl %r11d,%r12d + xorl %eax,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r8d,%r15d + movl %ebx,%r11d + + rorl $11,%r14d + xorl %r8d,%r13d + xorl %r10d,%r15d + + xorl %ecx,%r11d + xorl %eax,%r14d + addl %r15d,%r12d + movl %ebx,%r15d + + rorl $6,%r13d + andl %eax,%r11d + andl %ecx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r11d + + addl %r12d,%edx + addl %r12d,%r11d + leaq 1(%rdi),%rdi + addl %r14d,%r11d + + movl 4(%rsi),%r12d + movl %edx,%r13d + movl %r11d,%r14d + bswapl %r12d + rorl $14,%r13d + movl %r8d,%r15d + movl %r12d,4(%rsp) + + rorl $9,%r14d + xorl %edx,%r13d + xorl %r9d,%r15d + + rorl $5,%r13d + addl %r10d,%r12d + xorl %r11d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %edx,%r15d + movl %eax,%r10d + + rorl $11,%r14d + xorl %edx,%r13d + xorl %r9d,%r15d + + xorl %ebx,%r10d + xorl %r11d,%r14d + addl %r15d,%r12d + movl %eax,%r15d + + rorl $6,%r13d + andl %r11d,%r10d + andl %ebx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r10d + + addl %r12d,%ecx + addl %r12d,%r10d + leaq 1(%rdi),%rdi + addl %r14d,%r10d + + movl 8(%rsi),%r12d + movl %ecx,%r13d + movl %r10d,%r14d + bswapl %r12d + rorl $14,%r13d + movl %edx,%r15d + movl %r12d,8(%rsp) + + rorl $9,%r14d + xorl %ecx,%r13d + xorl %r8d,%r15d + + rorl $5,%r13d + addl %r9d,%r12d + xorl %r10d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %ecx,%r15d + movl %r11d,%r9d + + rorl $11,%r14d + xorl %ecx,%r13d + xorl %r8d,%r15d + + xorl %eax,%r9d + xorl %r10d,%r14d + addl %r15d,%r12d + movl %r11d,%r15d + + rorl $6,%r13d + andl %r10d,%r9d + andl %eax,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r9d + + addl %r12d,%ebx + addl %r12d,%r9d + leaq 1(%rdi),%rdi + addl %r14d,%r9d + + movl 12(%rsi),%r12d + movl %ebx,%r13d + movl %r9d,%r14d + bswapl %r12d + rorl $14,%r13d + movl %ecx,%r15d + movl %r12d,12(%rsp) + + rorl $9,%r14d + xorl %ebx,%r13d + xorl %edx,%r15d + + rorl $5,%r13d + addl %r8d,%r12d + xorl %r9d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %ebx,%r15d + movl %r10d,%r8d + + rorl $11,%r14d + xorl %ebx,%r13d + xorl %edx,%r15d + + xorl %r11d,%r8d + xorl %r9d,%r14d + addl %r15d,%r12d + movl %r10d,%r15d + + rorl $6,%r13d + andl %r9d,%r8d + andl %r11d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r8d + + addl %r12d,%eax + addl %r12d,%r8d + leaq 1(%rdi),%rdi + addl %r14d,%r8d + + movl 16(%rsi),%r12d + movl %eax,%r13d + movl %r8d,%r14d + bswapl %r12d + rorl $14,%r13d + movl %ebx,%r15d + movl %r12d,16(%rsp) + + rorl $9,%r14d + xorl %eax,%r13d + xorl %ecx,%r15d + + rorl $5,%r13d + addl %edx,%r12d + xorl %r8d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %eax,%r15d + movl %r9d,%edx + + rorl $11,%r14d + xorl %eax,%r13d + xorl %ecx,%r15d + + xorl %r10d,%edx + xorl %r8d,%r14d + addl %r15d,%r12d + movl %r9d,%r15d + + rorl $6,%r13d + andl %r8d,%edx + andl %r10d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%edx + + addl %r12d,%r11d + addl %r12d,%edx + leaq 1(%rdi),%rdi + addl %r14d,%edx + + movl 20(%rsi),%r12d + movl %r11d,%r13d + movl %edx,%r14d + bswapl %r12d + rorl $14,%r13d + movl %eax,%r15d + movl %r12d,20(%rsp) + + rorl $9,%r14d + xorl %r11d,%r13d + xorl %ebx,%r15d + + rorl $5,%r13d + addl %ecx,%r12d + xorl %edx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r11d,%r15d + movl %r8d,%ecx + + rorl $11,%r14d + xorl %r11d,%r13d + xorl %ebx,%r15d + + xorl %r9d,%ecx + xorl %edx,%r14d + addl %r15d,%r12d + movl %r8d,%r15d + + rorl $6,%r13d + andl %edx,%ecx + andl %r9d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%ecx + + addl %r12d,%r10d + addl %r12d,%ecx + leaq 1(%rdi),%rdi + addl %r14d,%ecx + + movl 24(%rsi),%r12d + movl %r10d,%r13d + movl %ecx,%r14d + bswapl %r12d + rorl $14,%r13d + movl %r11d,%r15d + movl %r12d,24(%rsp) + + rorl $9,%r14d + xorl %r10d,%r13d + xorl %eax,%r15d + + rorl $5,%r13d + addl %ebx,%r12d + xorl %ecx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r10d,%r15d + movl %edx,%ebx + + rorl $11,%r14d + xorl %r10d,%r13d + xorl %eax,%r15d + + xorl %r8d,%ebx + xorl %ecx,%r14d + addl %r15d,%r12d + movl %edx,%r15d + + rorl $6,%r13d + andl %ecx,%ebx + andl %r8d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%ebx + + addl %r12d,%r9d + addl %r12d,%ebx + leaq 1(%rdi),%rdi + addl %r14d,%ebx + + movl 28(%rsi),%r12d + movl %r9d,%r13d + movl %ebx,%r14d + bswapl %r12d + rorl $14,%r13d + movl %r10d,%r15d + movl %r12d,28(%rsp) + + rorl $9,%r14d + xorl %r9d,%r13d + xorl %r11d,%r15d + + rorl $5,%r13d + addl %eax,%r12d + xorl %ebx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r9d,%r15d + movl %ecx,%eax + + rorl $11,%r14d + xorl %r9d,%r13d + xorl %r11d,%r15d + + xorl %edx,%eax + xorl %ebx,%r14d + addl %r15d,%r12d + movl %ecx,%r15d + + rorl $6,%r13d + andl %ebx,%eax + andl %edx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%eax + + addl %r12d,%r8d + addl %r12d,%eax + leaq 1(%rdi),%rdi + addl %r14d,%eax + + movl 32(%rsi),%r12d + movl %r8d,%r13d + movl %eax,%r14d + bswapl %r12d + rorl $14,%r13d + movl %r9d,%r15d + movl %r12d,32(%rsp) + + rorl $9,%r14d + xorl %r8d,%r13d + xorl %r10d,%r15d + + rorl $5,%r13d + addl %r11d,%r12d + xorl %eax,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r8d,%r15d + movl %ebx,%r11d + + rorl $11,%r14d + xorl %r8d,%r13d + xorl %r10d,%r15d + + xorl %ecx,%r11d + xorl %eax,%r14d + addl %r15d,%r12d + movl %ebx,%r15d + + rorl $6,%r13d + andl %eax,%r11d + andl %ecx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r11d + + addl %r12d,%edx + addl %r12d,%r11d + leaq 1(%rdi),%rdi + addl %r14d,%r11d + + movl 36(%rsi),%r12d + movl %edx,%r13d + movl %r11d,%r14d + bswapl %r12d + rorl $14,%r13d + movl %r8d,%r15d + movl %r12d,36(%rsp) + + rorl $9,%r14d + xorl %edx,%r13d + xorl %r9d,%r15d + + rorl $5,%r13d + addl %r10d,%r12d + xorl %r11d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %edx,%r15d + movl %eax,%r10d + + rorl $11,%r14d + xorl %edx,%r13d + xorl %r9d,%r15d + + xorl %ebx,%r10d + xorl %r11d,%r14d + addl %r15d,%r12d + movl %eax,%r15d + + rorl $6,%r13d + andl %r11d,%r10d + andl %ebx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r10d + + addl %r12d,%ecx + addl %r12d,%r10d + leaq 1(%rdi),%rdi + addl %r14d,%r10d + + movl 40(%rsi),%r12d + movl %ecx,%r13d + movl %r10d,%r14d + bswapl %r12d + rorl $14,%r13d + movl %edx,%r15d + movl %r12d,40(%rsp) + + rorl $9,%r14d + xorl %ecx,%r13d + xorl %r8d,%r15d + + rorl $5,%r13d + addl %r9d,%r12d + xorl %r10d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %ecx,%r15d + movl %r11d,%r9d + + rorl $11,%r14d + xorl %ecx,%r13d + xorl %r8d,%r15d + + xorl %eax,%r9d + xorl %r10d,%r14d + addl %r15d,%r12d + movl %r11d,%r15d + + rorl $6,%r13d + andl %r10d,%r9d + andl %eax,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r9d + + addl %r12d,%ebx + addl %r12d,%r9d + leaq 1(%rdi),%rdi + addl %r14d,%r9d + + movl 44(%rsi),%r12d + movl %ebx,%r13d + movl %r9d,%r14d + bswapl %r12d + rorl $14,%r13d + movl %ecx,%r15d + movl %r12d,44(%rsp) + + rorl $9,%r14d + xorl %ebx,%r13d + xorl %edx,%r15d + + rorl $5,%r13d + addl %r8d,%r12d + xorl %r9d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %ebx,%r15d + movl %r10d,%r8d + + rorl $11,%r14d + xorl %ebx,%r13d + xorl %edx,%r15d + + xorl %r11d,%r8d + xorl %r9d,%r14d + addl %r15d,%r12d + movl %r10d,%r15d + + rorl $6,%r13d + andl %r9d,%r8d + andl %r11d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r8d + + addl %r12d,%eax + addl %r12d,%r8d + leaq 1(%rdi),%rdi + addl %r14d,%r8d + + movl 48(%rsi),%r12d + movl %eax,%r13d + movl %r8d,%r14d + bswapl %r12d + rorl $14,%r13d + movl %ebx,%r15d + movl %r12d,48(%rsp) + + rorl $9,%r14d + xorl %eax,%r13d + xorl %ecx,%r15d + + rorl $5,%r13d + addl %edx,%r12d + xorl %r8d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %eax,%r15d + movl %r9d,%edx + + rorl $11,%r14d + xorl %eax,%r13d + xorl %ecx,%r15d + + xorl %r10d,%edx + xorl %r8d,%r14d + addl %r15d,%r12d + movl %r9d,%r15d + + rorl $6,%r13d + andl %r8d,%edx + andl %r10d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%edx + + addl %r12d,%r11d + addl %r12d,%edx + leaq 1(%rdi),%rdi + addl %r14d,%edx + + movl 52(%rsi),%r12d + movl %r11d,%r13d + movl %edx,%r14d + bswapl %r12d + rorl $14,%r13d + movl %eax,%r15d + movl %r12d,52(%rsp) + + rorl $9,%r14d + xorl %r11d,%r13d + xorl %ebx,%r15d + + rorl $5,%r13d + addl %ecx,%r12d + xorl %edx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r11d,%r15d + movl %r8d,%ecx + + rorl $11,%r14d + xorl %r11d,%r13d + xorl %ebx,%r15d + + xorl %r9d,%ecx + xorl %edx,%r14d + addl %r15d,%r12d + movl %r8d,%r15d + + rorl $6,%r13d + andl %edx,%ecx + andl %r9d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%ecx + + addl %r12d,%r10d + addl %r12d,%ecx + leaq 1(%rdi),%rdi + addl %r14d,%ecx + + movl 56(%rsi),%r12d + movl %r10d,%r13d + movl %ecx,%r14d + bswapl %r12d + rorl $14,%r13d + movl %r11d,%r15d + movl %r12d,56(%rsp) + + rorl $9,%r14d + xorl %r10d,%r13d + xorl %eax,%r15d + + rorl $5,%r13d + addl %ebx,%r12d + xorl %ecx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r10d,%r15d + movl %edx,%ebx + + rorl $11,%r14d + xorl %r10d,%r13d + xorl %eax,%r15d + + xorl %r8d,%ebx + xorl %ecx,%r14d + addl %r15d,%r12d + movl %edx,%r15d + + rorl $6,%r13d + andl %ecx,%ebx + andl %r8d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%ebx + + addl %r12d,%r9d + addl %r12d,%ebx + leaq 1(%rdi),%rdi + addl %r14d,%ebx + + movl 60(%rsi),%r12d + movl %r9d,%r13d + movl %ebx,%r14d + bswapl %r12d + rorl $14,%r13d + movl %r10d,%r15d + movl %r12d,60(%rsp) + + rorl $9,%r14d + xorl %r9d,%r13d + xorl %r11d,%r15d + + rorl $5,%r13d + addl %eax,%r12d + xorl %ebx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r9d,%r15d + movl %ecx,%eax + + rorl $11,%r14d + xorl %r9d,%r13d + xorl %r11d,%r15d + + xorl %edx,%eax + xorl %ebx,%r14d + addl %r15d,%r12d + movl %ecx,%r15d + + rorl $6,%r13d + andl %ebx,%eax + andl %edx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%eax + + addl %r12d,%r8d + addl %r12d,%eax + leaq 1(%rdi),%rdi + addl %r14d,%eax + + jmp .Lrounds_16_xx +.align 16 +.Lrounds_16_xx: + movl 4(%rsp),%r13d + movl 56(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 36(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 0(%rsp),%r12d + movl %r8d,%r13d + addl %r14d,%r12d + movl %eax,%r14d + rorl $14,%r13d + movl %r9d,%r15d + movl %r12d,0(%rsp) + + rorl $9,%r14d + xorl %r8d,%r13d + xorl %r10d,%r15d + + rorl $5,%r13d + addl %r11d,%r12d + xorl %eax,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r8d,%r15d + movl %ebx,%r11d + + rorl $11,%r14d + xorl %r8d,%r13d + xorl %r10d,%r15d + + xorl %ecx,%r11d + xorl %eax,%r14d + addl %r15d,%r12d + movl %ebx,%r15d + + rorl $6,%r13d + andl %eax,%r11d + andl %ecx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r11d + + addl %r12d,%edx + addl %r12d,%r11d + leaq 1(%rdi),%rdi + addl %r14d,%r11d + + movl 8(%rsp),%r13d + movl 60(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 40(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 4(%rsp),%r12d + movl %edx,%r13d + addl %r14d,%r12d + movl %r11d,%r14d + rorl $14,%r13d + movl %r8d,%r15d + movl %r12d,4(%rsp) + + rorl $9,%r14d + xorl %edx,%r13d + xorl %r9d,%r15d + + rorl $5,%r13d + addl %r10d,%r12d + xorl %r11d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %edx,%r15d + movl %eax,%r10d + + rorl $11,%r14d + xorl %edx,%r13d + xorl %r9d,%r15d + + xorl %ebx,%r10d + xorl %r11d,%r14d + addl %r15d,%r12d + movl %eax,%r15d + + rorl $6,%r13d + andl %r11d,%r10d + andl %ebx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r10d + + addl %r12d,%ecx + addl %r12d,%r10d + leaq 1(%rdi),%rdi + addl %r14d,%r10d + + movl 12(%rsp),%r13d + movl 0(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 44(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 8(%rsp),%r12d + movl %ecx,%r13d + addl %r14d,%r12d + movl %r10d,%r14d + rorl $14,%r13d + movl %edx,%r15d + movl %r12d,8(%rsp) + + rorl $9,%r14d + xorl %ecx,%r13d + xorl %r8d,%r15d + + rorl $5,%r13d + addl %r9d,%r12d + xorl %r10d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %ecx,%r15d + movl %r11d,%r9d + + rorl $11,%r14d + xorl %ecx,%r13d + xorl %r8d,%r15d + + xorl %eax,%r9d + xorl %r10d,%r14d + addl %r15d,%r12d + movl %r11d,%r15d + + rorl $6,%r13d + andl %r10d,%r9d + andl %eax,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r9d + + addl %r12d,%ebx + addl %r12d,%r9d + leaq 1(%rdi),%rdi + addl %r14d,%r9d + + movl 16(%rsp),%r13d + movl 4(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 48(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 12(%rsp),%r12d + movl %ebx,%r13d + addl %r14d,%r12d + movl %r9d,%r14d + rorl $14,%r13d + movl %ecx,%r15d + movl %r12d,12(%rsp) + + rorl $9,%r14d + xorl %ebx,%r13d + xorl %edx,%r15d + + rorl $5,%r13d + addl %r8d,%r12d + xorl %r9d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %ebx,%r15d + movl %r10d,%r8d + + rorl $11,%r14d + xorl %ebx,%r13d + xorl %edx,%r15d + + xorl %r11d,%r8d + xorl %r9d,%r14d + addl %r15d,%r12d + movl %r10d,%r15d + + rorl $6,%r13d + andl %r9d,%r8d + andl %r11d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r8d + + addl %r12d,%eax + addl %r12d,%r8d + leaq 1(%rdi),%rdi + addl %r14d,%r8d + + movl 20(%rsp),%r13d + movl 8(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 52(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 16(%rsp),%r12d + movl %eax,%r13d + addl %r14d,%r12d + movl %r8d,%r14d + rorl $14,%r13d + movl %ebx,%r15d + movl %r12d,16(%rsp) + + rorl $9,%r14d + xorl %eax,%r13d + xorl %ecx,%r15d + + rorl $5,%r13d + addl %edx,%r12d + xorl %r8d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %eax,%r15d + movl %r9d,%edx + + rorl $11,%r14d + xorl %eax,%r13d + xorl %ecx,%r15d + + xorl %r10d,%edx + xorl %r8d,%r14d + addl %r15d,%r12d + movl %r9d,%r15d + + rorl $6,%r13d + andl %r8d,%edx + andl %r10d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%edx + + addl %r12d,%r11d + addl %r12d,%edx + leaq 1(%rdi),%rdi + addl %r14d,%edx + + movl 24(%rsp),%r13d + movl 12(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 56(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 20(%rsp),%r12d + movl %r11d,%r13d + addl %r14d,%r12d + movl %edx,%r14d + rorl $14,%r13d + movl %eax,%r15d + movl %r12d,20(%rsp) + + rorl $9,%r14d + xorl %r11d,%r13d + xorl %ebx,%r15d + + rorl $5,%r13d + addl %ecx,%r12d + xorl %edx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r11d,%r15d + movl %r8d,%ecx + + rorl $11,%r14d + xorl %r11d,%r13d + xorl %ebx,%r15d + + xorl %r9d,%ecx + xorl %edx,%r14d + addl %r15d,%r12d + movl %r8d,%r15d + + rorl $6,%r13d + andl %edx,%ecx + andl %r9d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%ecx + + addl %r12d,%r10d + addl %r12d,%ecx + leaq 1(%rdi),%rdi + addl %r14d,%ecx + + movl 28(%rsp),%r13d + movl 16(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 60(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 24(%rsp),%r12d + movl %r10d,%r13d + addl %r14d,%r12d + movl %ecx,%r14d + rorl $14,%r13d + movl %r11d,%r15d + movl %r12d,24(%rsp) + + rorl $9,%r14d + xorl %r10d,%r13d + xorl %eax,%r15d + + rorl $5,%r13d + addl %ebx,%r12d + xorl %ecx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r10d,%r15d + movl %edx,%ebx + + rorl $11,%r14d + xorl %r10d,%r13d + xorl %eax,%r15d + + xorl %r8d,%ebx + xorl %ecx,%r14d + addl %r15d,%r12d + movl %edx,%r15d + + rorl $6,%r13d + andl %ecx,%ebx + andl %r8d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%ebx + + addl %r12d,%r9d + addl %r12d,%ebx + leaq 1(%rdi),%rdi + addl %r14d,%ebx + + movl 32(%rsp),%r13d + movl 20(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 0(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 28(%rsp),%r12d + movl %r9d,%r13d + addl %r14d,%r12d + movl %ebx,%r14d + rorl $14,%r13d + movl %r10d,%r15d + movl %r12d,28(%rsp) + + rorl $9,%r14d + xorl %r9d,%r13d + xorl %r11d,%r15d + + rorl $5,%r13d + addl %eax,%r12d + xorl %ebx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r9d,%r15d + movl %ecx,%eax + + rorl $11,%r14d + xorl %r9d,%r13d + xorl %r11d,%r15d + + xorl %edx,%eax + xorl %ebx,%r14d + addl %r15d,%r12d + movl %ecx,%r15d + + rorl $6,%r13d + andl %ebx,%eax + andl %edx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%eax + + addl %r12d,%r8d + addl %r12d,%eax + leaq 1(%rdi),%rdi + addl %r14d,%eax + + movl 36(%rsp),%r13d + movl 24(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 4(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 32(%rsp),%r12d + movl %r8d,%r13d + addl %r14d,%r12d + movl %eax,%r14d + rorl $14,%r13d + movl %r9d,%r15d + movl %r12d,32(%rsp) + + rorl $9,%r14d + xorl %r8d,%r13d + xorl %r10d,%r15d + + rorl $5,%r13d + addl %r11d,%r12d + xorl %eax,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r8d,%r15d + movl %ebx,%r11d + + rorl $11,%r14d + xorl %r8d,%r13d + xorl %r10d,%r15d + + xorl %ecx,%r11d + xorl %eax,%r14d + addl %r15d,%r12d + movl %ebx,%r15d + + rorl $6,%r13d + andl %eax,%r11d + andl %ecx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r11d + + addl %r12d,%edx + addl %r12d,%r11d + leaq 1(%rdi),%rdi + addl %r14d,%r11d + + movl 40(%rsp),%r13d + movl 28(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 8(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 36(%rsp),%r12d + movl %edx,%r13d + addl %r14d,%r12d + movl %r11d,%r14d + rorl $14,%r13d + movl %r8d,%r15d + movl %r12d,36(%rsp) + + rorl $9,%r14d + xorl %edx,%r13d + xorl %r9d,%r15d + + rorl $5,%r13d + addl %r10d,%r12d + xorl %r11d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %edx,%r15d + movl %eax,%r10d + + rorl $11,%r14d + xorl %edx,%r13d + xorl %r9d,%r15d + + xorl %ebx,%r10d + xorl %r11d,%r14d + addl %r15d,%r12d + movl %eax,%r15d + + rorl $6,%r13d + andl %r11d,%r10d + andl %ebx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r10d + + addl %r12d,%ecx + addl %r12d,%r10d + leaq 1(%rdi),%rdi + addl %r14d,%r10d + + movl 44(%rsp),%r13d + movl 32(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 12(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 40(%rsp),%r12d + movl %ecx,%r13d + addl %r14d,%r12d + movl %r10d,%r14d + rorl $14,%r13d + movl %edx,%r15d + movl %r12d,40(%rsp) + + rorl $9,%r14d + xorl %ecx,%r13d + xorl %r8d,%r15d + + rorl $5,%r13d + addl %r9d,%r12d + xorl %r10d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %ecx,%r15d + movl %r11d,%r9d + + rorl $11,%r14d + xorl %ecx,%r13d + xorl %r8d,%r15d + + xorl %eax,%r9d + xorl %r10d,%r14d + addl %r15d,%r12d + movl %r11d,%r15d + + rorl $6,%r13d + andl %r10d,%r9d + andl %eax,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r9d + + addl %r12d,%ebx + addl %r12d,%r9d + leaq 1(%rdi),%rdi + addl %r14d,%r9d + + movl 48(%rsp),%r13d + movl 36(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 16(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 44(%rsp),%r12d + movl %ebx,%r13d + addl %r14d,%r12d + movl %r9d,%r14d + rorl $14,%r13d + movl %ecx,%r15d + movl %r12d,44(%rsp) + + rorl $9,%r14d + xorl %ebx,%r13d + xorl %edx,%r15d + + rorl $5,%r13d + addl %r8d,%r12d + xorl %r9d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %ebx,%r15d + movl %r10d,%r8d + + rorl $11,%r14d + xorl %ebx,%r13d + xorl %edx,%r15d + + xorl %r11d,%r8d + xorl %r9d,%r14d + addl %r15d,%r12d + movl %r10d,%r15d + + rorl $6,%r13d + andl %r9d,%r8d + andl %r11d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%r8d + + addl %r12d,%eax + addl %r12d,%r8d + leaq 1(%rdi),%rdi + addl %r14d,%r8d + + movl 52(%rsp),%r13d + movl 40(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 20(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 48(%rsp),%r12d + movl %eax,%r13d + addl %r14d,%r12d + movl %r8d,%r14d + rorl $14,%r13d + movl %ebx,%r15d + movl %r12d,48(%rsp) + + rorl $9,%r14d + xorl %eax,%r13d + xorl %ecx,%r15d + + rorl $5,%r13d + addl %edx,%r12d + xorl %r8d,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %eax,%r15d + movl %r9d,%edx + + rorl $11,%r14d + xorl %eax,%r13d + xorl %ecx,%r15d + + xorl %r10d,%edx + xorl %r8d,%r14d + addl %r15d,%r12d + movl %r9d,%r15d + + rorl $6,%r13d + andl %r8d,%edx + andl %r10d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%edx + + addl %r12d,%r11d + addl %r12d,%edx + leaq 1(%rdi),%rdi + addl %r14d,%edx + + movl 56(%rsp),%r13d + movl 44(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 24(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 52(%rsp),%r12d + movl %r11d,%r13d + addl %r14d,%r12d + movl %edx,%r14d + rorl $14,%r13d + movl %eax,%r15d + movl %r12d,52(%rsp) + + rorl $9,%r14d + xorl %r11d,%r13d + xorl %ebx,%r15d + + rorl $5,%r13d + addl %ecx,%r12d + xorl %edx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r11d,%r15d + movl %r8d,%ecx + + rorl $11,%r14d + xorl %r11d,%r13d + xorl %ebx,%r15d + + xorl %r9d,%ecx + xorl %edx,%r14d + addl %r15d,%r12d + movl %r8d,%r15d + + rorl $6,%r13d + andl %edx,%ecx + andl %r9d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%ecx + + addl %r12d,%r10d + addl %r12d,%ecx + leaq 1(%rdi),%rdi + addl %r14d,%ecx + + movl 60(%rsp),%r13d + movl 48(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 28(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 56(%rsp),%r12d + movl %r10d,%r13d + addl %r14d,%r12d + movl %ecx,%r14d + rorl $14,%r13d + movl %r11d,%r15d + movl %r12d,56(%rsp) + + rorl $9,%r14d + xorl %r10d,%r13d + xorl %eax,%r15d + + rorl $5,%r13d + addl %ebx,%r12d + xorl %ecx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r10d,%r15d + movl %edx,%ebx + + rorl $11,%r14d + xorl %r10d,%r13d + xorl %eax,%r15d + + xorl %r8d,%ebx + xorl %ecx,%r14d + addl %r15d,%r12d + movl %edx,%r15d + + rorl $6,%r13d + andl %ecx,%ebx + andl %r8d,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%ebx + + addl %r12d,%r9d + addl %r12d,%ebx + leaq 1(%rdi),%rdi + addl %r14d,%ebx + + movl 0(%rsp),%r13d + movl 52(%rsp),%r14d + movl %r13d,%r12d + movl %r14d,%r15d + + rorl $11,%r12d + xorl %r13d,%r12d + shrl $3,%r13d + + rorl $7,%r12d + xorl %r12d,%r13d + movl 32(%rsp),%r12d + + rorl $2,%r15d + xorl %r14d,%r15d + shrl $10,%r14d + + rorl $17,%r15d + addl %r13d,%r12d + xorl %r15d,%r14d + + addl 60(%rsp),%r12d + movl %r9d,%r13d + addl %r14d,%r12d + movl %ebx,%r14d + rorl $14,%r13d + movl %r10d,%r15d + movl %r12d,60(%rsp) + + rorl $9,%r14d + xorl %r9d,%r13d + xorl %r11d,%r15d + + rorl $5,%r13d + addl %eax,%r12d + xorl %ebx,%r14d + + addl (%rbp,%rdi,4),%r12d + andl %r9d,%r15d + movl %ecx,%eax + + rorl $11,%r14d + xorl %r9d,%r13d + xorl %r11d,%r15d + + xorl %edx,%eax + xorl %ebx,%r14d + addl %r15d,%r12d + movl %ecx,%r15d + + rorl $6,%r13d + andl %ebx,%eax + andl %edx,%r15d + + rorl $2,%r14d + addl %r13d,%r12d + addl %r15d,%eax + + addl %r12d,%r8d + addl %r12d,%eax + leaq 1(%rdi),%rdi + addl %r14d,%eax + + cmpq $64,%rdi + jb .Lrounds_16_xx + + movq 64+0(%rsp),%rdi + leaq 64(%rsi),%rsi + + addl 0(%rdi),%eax + addl 4(%rdi),%ebx + addl 8(%rdi),%ecx + addl 12(%rdi),%edx + addl 16(%rdi),%r8d + addl 20(%rdi),%r9d + addl 24(%rdi),%r10d + addl 28(%rdi),%r11d + + cmpq 64+16(%rsp),%rsi + + movl %eax,0(%rdi) + movl %ebx,4(%rdi) + movl %ecx,8(%rdi) + movl %edx,12(%rdi) + movl %r8d,16(%rdi) + movl %r9d,20(%rdi) + movl %r10d,24(%rdi) + movl %r11d,28(%rdi) + jb .Lloop + + movq 64+24(%rsp),%rsi + movq (%rsi),%r15 + movq 8(%rsi),%r14 + movq 16(%rsi),%r13 + movq 24(%rsi),%r12 + movq 32(%rsi),%rbp + movq 40(%rsi),%rbx + leaq 48(%rsi),%rsp +.Lepilogue: + .byte 0xf3,0xc3 +.size sha256_block_data_order,.-sha256_block_data_order +.align 64 +.type K256,@object +K256: +.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 +.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 +.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 +.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 +.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc +.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da +.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 +.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 +.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 +.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 +.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 +.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 +.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 +.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 +.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 +.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 diff --git a/app/openssl/crypto/sha/asm/sha512-586.S b/app/openssl/crypto/sha/asm/sha512-586.S new file mode 100644 index 00000000..82c76c41 --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha512-586.S @@ -0,0 +1,836 @@ +.file "sha512-586.s" +.text +.globl sha512_block_data_order +.type sha512_block_data_order,@function +.align 16 +sha512_block_data_order: +.L_sha512_block_data_order_begin: + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + movl 20(%esp),%esi + movl 24(%esp),%edi + movl 28(%esp),%eax + movl %esp,%ebx + call .L000pic_point +.L000pic_point: + popl %ebp + leal .L001K512-.L000pic_point(%ebp),%ebp + subl $16,%esp + andl $-64,%esp + shll $7,%eax + addl %edi,%eax + movl %esi,(%esp) + movl %edi,4(%esp) + movl %eax,8(%esp) + movl %ebx,12(%esp) + leal _GLOBAL_OFFSET_TABLE_+[.-.L001K512](%ebp),%edx + movl OPENSSL_ia32cap_P@GOT(%edx),%edx + btl $26,(%edx) + jnc .L002loop_x86 + movq (%esi),%mm0 + movq 8(%esi),%mm1 + movq 16(%esi),%mm2 + movq 24(%esi),%mm3 + movq 32(%esi),%mm4 + movq 40(%esi),%mm5 + movq 48(%esi),%mm6 + movq 56(%esi),%mm7 + subl $80,%esp +.align 16 +.L003loop_sse2: + movq %mm1,8(%esp) + movq %mm2,16(%esp) + movq %mm3,24(%esp) + movq %mm5,40(%esp) + movq %mm6,48(%esp) + movq %mm7,56(%esp) + movl (%edi),%ecx + movl 4(%edi),%edx + addl $8,%edi + bswap %ecx + bswap %edx + movl %ecx,76(%esp) + movl %edx,72(%esp) +.align 16 +.L00400_14_sse2: + movl (%edi),%eax + movl 4(%edi),%ebx + addl $8,%edi + bswap %eax + bswap %ebx + movl %eax,68(%esp) + movl %ebx,64(%esp) + movq 40(%esp),%mm5 + movq 48(%esp),%mm6 + movq 56(%esp),%mm7 + movq %mm4,%mm1 + movq %mm4,%mm2 + psrlq $14,%mm1 + movq %mm4,32(%esp) + psllq $23,%mm2 + movq %mm1,%mm3 + psrlq $4,%mm1 + pxor %mm2,%mm3 + psllq $23,%mm2 + pxor %mm1,%mm3 + psrlq $23,%mm1 + pxor %mm2,%mm3 + psllq $4,%mm2 + pxor %mm1,%mm3 + paddq (%ebp),%mm7 + pxor %mm2,%mm3 + pxor %mm6,%mm5 + movq 8(%esp),%mm1 + pand %mm4,%mm5 + movq 16(%esp),%mm2 + pxor %mm6,%mm5 + movq 24(%esp),%mm4 + paddq %mm5,%mm3 + movq %mm0,(%esp) + paddq %mm7,%mm3 + movq %mm0,%mm5 + movq %mm0,%mm6 + paddq 72(%esp),%mm3 + psrlq $28,%mm5 + paddq %mm3,%mm4 + psllq $25,%mm6 + movq %mm5,%mm7 + psrlq $6,%mm5 + pxor %mm6,%mm7 + psllq $5,%mm6 + pxor %mm5,%mm7 + psrlq $5,%mm5 + pxor %mm6,%mm7 + psllq $6,%mm6 + pxor %mm5,%mm7 + subl $8,%esp + pxor %mm6,%mm7 + movq %mm0,%mm5 + por %mm2,%mm0 + pand %mm2,%mm5 + pand %mm1,%mm0 + por %mm0,%mm5 + paddq %mm5,%mm7 + movq %mm3,%mm0 + movb (%ebp),%dl + paddq %mm7,%mm0 + addl $8,%ebp + cmpb $53,%dl + jne .L00400_14_sse2 + movq 40(%esp),%mm5 + movq 48(%esp),%mm6 + movq 56(%esp),%mm7 + movq %mm4,%mm1 + movq %mm4,%mm2 + psrlq $14,%mm1 + movq %mm4,32(%esp) + psllq $23,%mm2 + movq %mm1,%mm3 + psrlq $4,%mm1 + pxor %mm2,%mm3 + psllq $23,%mm2 + pxor %mm1,%mm3 + psrlq $23,%mm1 + pxor %mm2,%mm3 + psllq $4,%mm2 + pxor %mm1,%mm3 + paddq (%ebp),%mm7 + pxor %mm2,%mm3 + pxor %mm6,%mm5 + movq 8(%esp),%mm1 + pand %mm4,%mm5 + movq 16(%esp),%mm2 + pxor %mm6,%mm5 + movq 24(%esp),%mm4 + paddq %mm5,%mm3 + movq %mm0,(%esp) + paddq %mm7,%mm3 + movq %mm0,%mm5 + movq %mm0,%mm6 + paddq 72(%esp),%mm3 + psrlq $28,%mm5 + paddq %mm3,%mm4 + psllq $25,%mm6 + movq %mm5,%mm7 + psrlq $6,%mm5 + pxor %mm6,%mm7 + psllq $5,%mm6 + pxor %mm5,%mm7 + psrlq $5,%mm5 + pxor %mm6,%mm7 + psllq $6,%mm6 + pxor %mm5,%mm7 + subl $8,%esp + pxor %mm6,%mm7 + movq %mm0,%mm5 + por %mm2,%mm0 + movq 88(%esp),%mm6 + pand %mm2,%mm5 + pand %mm1,%mm0 + movq 192(%esp),%mm2 + por %mm0,%mm5 + paddq %mm5,%mm7 + movq %mm3,%mm0 + movb (%ebp),%dl + paddq %mm7,%mm0 + addl $8,%ebp +.align 16 +.L00516_79_sse2: + movq %mm2,%mm1 + psrlq $1,%mm2 + movq %mm6,%mm7 + psrlq $6,%mm6 + movq %mm2,%mm3 + psrlq $6,%mm2 + movq %mm6,%mm5 + psrlq $13,%mm6 + pxor %mm2,%mm3 + psrlq $1,%mm2 + pxor %mm6,%mm5 + psrlq $42,%mm6 + pxor %mm2,%mm3 + movq 200(%esp),%mm2 + psllq $56,%mm1 + pxor %mm6,%mm5 + psllq $3,%mm7 + pxor %mm1,%mm3 + paddq 128(%esp),%mm2 + psllq $7,%mm1 + pxor %mm7,%mm5 + psllq $42,%mm7 + pxor %mm1,%mm3 + pxor %mm7,%mm5 + paddq %mm5,%mm3 + paddq %mm2,%mm3 + movq %mm3,72(%esp) + movq 40(%esp),%mm5 + movq 48(%esp),%mm6 + movq 56(%esp),%mm7 + movq %mm4,%mm1 + movq %mm4,%mm2 + psrlq $14,%mm1 + movq %mm4,32(%esp) + psllq $23,%mm2 + movq %mm1,%mm3 + psrlq $4,%mm1 + pxor %mm2,%mm3 + psllq $23,%mm2 + pxor %mm1,%mm3 + psrlq $23,%mm1 + pxor %mm2,%mm3 + psllq $4,%mm2 + pxor %mm1,%mm3 + paddq (%ebp),%mm7 + pxor %mm2,%mm3 + pxor %mm6,%mm5 + movq 8(%esp),%mm1 + pand %mm4,%mm5 + movq 16(%esp),%mm2 + pxor %mm6,%mm5 + movq 24(%esp),%mm4 + paddq %mm5,%mm3 + movq %mm0,(%esp) + paddq %mm7,%mm3 + movq %mm0,%mm5 + movq %mm0,%mm6 + paddq 72(%esp),%mm3 + psrlq $28,%mm5 + paddq %mm3,%mm4 + psllq $25,%mm6 + movq %mm5,%mm7 + psrlq $6,%mm5 + pxor %mm6,%mm7 + psllq $5,%mm6 + pxor %mm5,%mm7 + psrlq $5,%mm5 + pxor %mm6,%mm7 + psllq $6,%mm6 + pxor %mm5,%mm7 + subl $8,%esp + pxor %mm6,%mm7 + movq %mm0,%mm5 + por %mm2,%mm0 + movq 88(%esp),%mm6 + pand %mm2,%mm5 + pand %mm1,%mm0 + movq 192(%esp),%mm2 + por %mm0,%mm5 + paddq %mm5,%mm7 + movq %mm3,%mm0 + movb (%ebp),%dl + paddq %mm7,%mm0 + addl $8,%ebp + cmpb $23,%dl + jne .L00516_79_sse2 + movq 8(%esp),%mm1 + movq 16(%esp),%mm2 + movq 24(%esp),%mm3 + movq 40(%esp),%mm5 + movq 48(%esp),%mm6 + movq 56(%esp),%mm7 + paddq (%esi),%mm0 + paddq 8(%esi),%mm1 + paddq 16(%esi),%mm2 + paddq 24(%esi),%mm3 + paddq 32(%esi),%mm4 + paddq 40(%esi),%mm5 + paddq 48(%esi),%mm6 + paddq 56(%esi),%mm7 + movq %mm0,(%esi) + movq %mm1,8(%esi) + movq %mm2,16(%esi) + movq %mm3,24(%esi) + movq %mm4,32(%esi) + movq %mm5,40(%esi) + movq %mm6,48(%esi) + movq %mm7,56(%esi) + addl $640,%esp + subl $640,%ebp + cmpl 88(%esp),%edi + jb .L003loop_sse2 + emms + movl 92(%esp),%esp + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.align 16 +.L002loop_x86: + movl (%edi),%eax + movl 4(%edi),%ebx + movl 8(%edi),%ecx + movl 12(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + movl 16(%edi),%eax + movl 20(%edi),%ebx + movl 24(%edi),%ecx + movl 28(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + movl 32(%edi),%eax + movl 36(%edi),%ebx + movl 40(%edi),%ecx + movl 44(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + movl 48(%edi),%eax + movl 52(%edi),%ebx + movl 56(%edi),%ecx + movl 60(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + movl 64(%edi),%eax + movl 68(%edi),%ebx + movl 72(%edi),%ecx + movl 76(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + movl 80(%edi),%eax + movl 84(%edi),%ebx + movl 88(%edi),%ecx + movl 92(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + movl 96(%edi),%eax + movl 100(%edi),%ebx + movl 104(%edi),%ecx + movl 108(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + movl 112(%edi),%eax + movl 116(%edi),%ebx + movl 120(%edi),%ecx + movl 124(%edi),%edx + bswap %eax + bswap %ebx + bswap %ecx + bswap %edx + pushl %eax + pushl %ebx + pushl %ecx + pushl %edx + addl $128,%edi + subl $72,%esp + movl %edi,204(%esp) + leal 8(%esp),%edi + movl $16,%ecx +.long 2784229001 +.align 16 +.L00600_15_x86: + movl 40(%esp),%ecx + movl 44(%esp),%edx + movl %ecx,%esi + shrl $9,%ecx + movl %edx,%edi + shrl $9,%edx + movl %ecx,%ebx + shll $14,%esi + movl %edx,%eax + shll $14,%edi + xorl %esi,%ebx + shrl $5,%ecx + xorl %edi,%eax + shrl $5,%edx + xorl %ecx,%eax + shll $4,%esi + xorl %edx,%ebx + shll $4,%edi + xorl %esi,%ebx + shrl $4,%ecx + xorl %edi,%eax + shrl $4,%edx + xorl %ecx,%eax + shll $5,%esi + xorl %edx,%ebx + shll $5,%edi + xorl %esi,%eax + xorl %edi,%ebx + movl 48(%esp),%ecx + movl 52(%esp),%edx + movl 56(%esp),%esi + movl 60(%esp),%edi + addl 64(%esp),%eax + adcl 68(%esp),%ebx + xorl %esi,%ecx + xorl %edi,%edx + andl 40(%esp),%ecx + andl 44(%esp),%edx + addl 192(%esp),%eax + adcl 196(%esp),%ebx + xorl %esi,%ecx + xorl %edi,%edx + movl (%ebp),%esi + movl 4(%ebp),%edi + addl %ecx,%eax + adcl %edx,%ebx + movl 32(%esp),%ecx + movl 36(%esp),%edx + addl %esi,%eax + adcl %edi,%ebx + movl %eax,(%esp) + movl %ebx,4(%esp) + addl %ecx,%eax + adcl %edx,%ebx + movl 8(%esp),%ecx + movl 12(%esp),%edx + movl %eax,32(%esp) + movl %ebx,36(%esp) + movl %ecx,%esi + shrl $2,%ecx + movl %edx,%edi + shrl $2,%edx + movl %ecx,%ebx + shll $4,%esi + movl %edx,%eax + shll $4,%edi + xorl %esi,%ebx + shrl $5,%ecx + xorl %edi,%eax + shrl $5,%edx + xorl %ecx,%ebx + shll $21,%esi + xorl %edx,%eax + shll $21,%edi + xorl %esi,%eax + shrl $21,%ecx + xorl %edi,%ebx + shrl $21,%edx + xorl %ecx,%eax + shll $5,%esi + xorl %edx,%ebx + shll $5,%edi + xorl %esi,%eax + xorl %edi,%ebx + movl 8(%esp),%ecx + movl 12(%esp),%edx + movl 16(%esp),%esi + movl 20(%esp),%edi + addl (%esp),%eax + adcl 4(%esp),%ebx + orl %esi,%ecx + orl %edi,%edx + andl 24(%esp),%ecx + andl 28(%esp),%edx + andl 8(%esp),%esi + andl 12(%esp),%edi + orl %esi,%ecx + orl %edi,%edx + addl %ecx,%eax + adcl %edx,%ebx + movl %eax,(%esp) + movl %ebx,4(%esp) + movb (%ebp),%dl + subl $8,%esp + leal 8(%ebp),%ebp + cmpb $148,%dl + jne .L00600_15_x86 +.align 16 +.L00716_79_x86: + movl 312(%esp),%ecx + movl 316(%esp),%edx + movl %ecx,%esi + shrl $1,%ecx + movl %edx,%edi + shrl $1,%edx + movl %ecx,%eax + shll $24,%esi + movl %edx,%ebx + shll $24,%edi + xorl %esi,%ebx + shrl $6,%ecx + xorl %edi,%eax + shrl $6,%edx + xorl %ecx,%eax + shll $7,%esi + xorl %edx,%ebx + shll $1,%edi + xorl %esi,%ebx + shrl $1,%ecx + xorl %edi,%eax + shrl $1,%edx + xorl %ecx,%eax + shll $6,%edi + xorl %edx,%ebx + xorl %edi,%eax + movl %eax,(%esp) + movl %ebx,4(%esp) + movl 208(%esp),%ecx + movl 212(%esp),%edx + movl %ecx,%esi + shrl $6,%ecx + movl %edx,%edi + shrl $6,%edx + movl %ecx,%eax + shll $3,%esi + movl %edx,%ebx + shll $3,%edi + xorl %esi,%eax + shrl $13,%ecx + xorl %edi,%ebx + shrl $13,%edx + xorl %ecx,%eax + shll $10,%esi + xorl %edx,%ebx + shll $10,%edi + xorl %esi,%ebx + shrl $10,%ecx + xorl %edi,%eax + shrl $10,%edx + xorl %ecx,%ebx + shll $13,%edi + xorl %edx,%eax + xorl %edi,%eax + movl 320(%esp),%ecx + movl 324(%esp),%edx + addl (%esp),%eax + adcl 4(%esp),%ebx + movl 248(%esp),%esi + movl 252(%esp),%edi + addl %ecx,%eax + adcl %edx,%ebx + addl %esi,%eax + adcl %edi,%ebx + movl %eax,192(%esp) + movl %ebx,196(%esp) + movl 40(%esp),%ecx + movl 44(%esp),%edx + movl %ecx,%esi + shrl $9,%ecx + movl %edx,%edi + shrl $9,%edx + movl %ecx,%ebx + shll $14,%esi + movl %edx,%eax + shll $14,%edi + xorl %esi,%ebx + shrl $5,%ecx + xorl %edi,%eax + shrl $5,%edx + xorl %ecx,%eax + shll $4,%esi + xorl %edx,%ebx + shll $4,%edi + xorl %esi,%ebx + shrl $4,%ecx + xorl %edi,%eax + shrl $4,%edx + xorl %ecx,%eax + shll $5,%esi + xorl %edx,%ebx + shll $5,%edi + xorl %esi,%eax + xorl %edi,%ebx + movl 48(%esp),%ecx + movl 52(%esp),%edx + movl 56(%esp),%esi + movl 60(%esp),%edi + addl 64(%esp),%eax + adcl 68(%esp),%ebx + xorl %esi,%ecx + xorl %edi,%edx + andl 40(%esp),%ecx + andl 44(%esp),%edx + addl 192(%esp),%eax + adcl 196(%esp),%ebx + xorl %esi,%ecx + xorl %edi,%edx + movl (%ebp),%esi + movl 4(%ebp),%edi + addl %ecx,%eax + adcl %edx,%ebx + movl 32(%esp),%ecx + movl 36(%esp),%edx + addl %esi,%eax + adcl %edi,%ebx + movl %eax,(%esp) + movl %ebx,4(%esp) + addl %ecx,%eax + adcl %edx,%ebx + movl 8(%esp),%ecx + movl 12(%esp),%edx + movl %eax,32(%esp) + movl %ebx,36(%esp) + movl %ecx,%esi + shrl $2,%ecx + movl %edx,%edi + shrl $2,%edx + movl %ecx,%ebx + shll $4,%esi + movl %edx,%eax + shll $4,%edi + xorl %esi,%ebx + shrl $5,%ecx + xorl %edi,%eax + shrl $5,%edx + xorl %ecx,%ebx + shll $21,%esi + xorl %edx,%eax + shll $21,%edi + xorl %esi,%eax + shrl $21,%ecx + xorl %edi,%ebx + shrl $21,%edx + xorl %ecx,%eax + shll $5,%esi + xorl %edx,%ebx + shll $5,%edi + xorl %esi,%eax + xorl %edi,%ebx + movl 8(%esp),%ecx + movl 12(%esp),%edx + movl 16(%esp),%esi + movl 20(%esp),%edi + addl (%esp),%eax + adcl 4(%esp),%ebx + orl %esi,%ecx + orl %edi,%edx + andl 24(%esp),%ecx + andl 28(%esp),%edx + andl 8(%esp),%esi + andl 12(%esp),%edi + orl %esi,%ecx + orl %edi,%edx + addl %ecx,%eax + adcl %edx,%ebx + movl %eax,(%esp) + movl %ebx,4(%esp) + movb (%ebp),%dl + subl $8,%esp + leal 8(%ebp),%ebp + cmpb $23,%dl + jne .L00716_79_x86 + movl 840(%esp),%esi + movl 844(%esp),%edi + movl (%esi),%eax + movl 4(%esi),%ebx + movl 8(%esi),%ecx + movl 12(%esi),%edx + addl 8(%esp),%eax + adcl 12(%esp),%ebx + movl %eax,(%esi) + movl %ebx,4(%esi) + addl 16(%esp),%ecx + adcl 20(%esp),%edx + movl %ecx,8(%esi) + movl %edx,12(%esi) + movl 16(%esi),%eax + movl 20(%esi),%ebx + movl 24(%esi),%ecx + movl 28(%esi),%edx + addl 24(%esp),%eax + adcl 28(%esp),%ebx + movl %eax,16(%esi) + movl %ebx,20(%esi) + addl 32(%esp),%ecx + adcl 36(%esp),%edx + movl %ecx,24(%esi) + movl %edx,28(%esi) + movl 32(%esi),%eax + movl 36(%esi),%ebx + movl 40(%esi),%ecx + movl 44(%esi),%edx + addl 40(%esp),%eax + adcl 44(%esp),%ebx + movl %eax,32(%esi) + movl %ebx,36(%esi) + addl 48(%esp),%ecx + adcl 52(%esp),%edx + movl %ecx,40(%esi) + movl %edx,44(%esi) + movl 48(%esi),%eax + movl 52(%esi),%ebx + movl 56(%esi),%ecx + movl 60(%esi),%edx + addl 56(%esp),%eax + adcl 60(%esp),%ebx + movl %eax,48(%esi) + movl %ebx,52(%esi) + addl 64(%esp),%ecx + adcl 68(%esp),%edx + movl %ecx,56(%esi) + movl %edx,60(%esi) + addl $840,%esp + subl $640,%ebp + cmpl 8(%esp),%edi + jb .L002loop_x86 + movl 12(%esp),%esp + popl %edi + popl %esi + popl %ebx + popl %ebp + ret +.align 64 +.L001K512: +.long 3609767458,1116352408 +.long 602891725,1899447441 +.long 3964484399,3049323471 +.long 2173295548,3921009573 +.long 4081628472,961987163 +.long 3053834265,1508970993 +.long 2937671579,2453635748 +.long 3664609560,2870763221 +.long 2734883394,3624381080 +.long 1164996542,310598401 +.long 1323610764,607225278 +.long 3590304994,1426881987 +.long 4068182383,1925078388 +.long 991336113,2162078206 +.long 633803317,2614888103 +.long 3479774868,3248222580 +.long 2666613458,3835390401 +.long 944711139,4022224774 +.long 2341262773,264347078 +.long 2007800933,604807628 +.long 1495990901,770255983 +.long 1856431235,1249150122 +.long 3175218132,1555081692 +.long 2198950837,1996064986 +.long 3999719339,2554220882 +.long 766784016,2821834349 +.long 2566594879,2952996808 +.long 3203337956,3210313671 +.long 1034457026,3336571891 +.long 2466948901,3584528711 +.long 3758326383,113926993 +.long 168717936,338241895 +.long 1188179964,666307205 +.long 1546045734,773529912 +.long 1522805485,1294757372 +.long 2643833823,1396182291 +.long 2343527390,1695183700 +.long 1014477480,1986661051 +.long 1206759142,2177026350 +.long 344077627,2456956037 +.long 1290863460,2730485921 +.long 3158454273,2820302411 +.long 3505952657,3259730800 +.long 106217008,3345764771 +.long 3606008344,3516065817 +.long 1432725776,3600352804 +.long 1467031594,4094571909 +.long 851169720,275423344 +.long 3100823752,430227734 +.long 1363258195,506948616 +.long 3750685593,659060556 +.long 3785050280,883997877 +.long 3318307427,958139571 +.long 3812723403,1322822218 +.long 2003034995,1537002063 +.long 3602036899,1747873779 +.long 1575990012,1955562222 +.long 1125592928,2024104815 +.long 2716904306,2227730452 +.long 442776044,2361852424 +.long 593698344,2428436474 +.long 3733110249,2756734187 +.long 2999351573,3204031479 +.long 3815920427,3329325298 +.long 3928383900,3391569614 +.long 566280711,3515267271 +.long 3454069534,3940187606 +.long 4000239992,4118630271 +.long 1914138554,116418474 +.long 2731055270,174292421 +.long 3203993006,289380356 +.long 320620315,460393269 +.long 587496836,685471733 +.long 1086792851,852142971 +.long 365543100,1017036298 +.long 2618297676,1126000580 +.long 3409855158,1288033470 +.long 4234509866,1501505948 +.long 987167468,1607167915 +.long 1246189591,1816402316 +.size sha512_block_data_order,.-.L_sha512_block_data_order_begin +.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97 +.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 +.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 +.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 +.byte 62,0 +.comm OPENSSL_ia32cap_P,8,4 diff --git a/app/openssl/crypto/sha/asm/sha512-586.pl b/app/openssl/crypto/sha/asm/sha512-586.pl index 5b9f3337..9f8c51eb 100644 --- a/app/openssl/crypto/sha/asm/sha512-586.pl +++ b/app/openssl/crypto/sha/asm/sha512-586.pl @@ -23,7 +23,7 @@ # # IALU code-path is optimized for elder Pentiums. On vanilla Pentium # performance improvement over compiler generated code reaches ~60%, -# while on PIII - ~35%. On newer µ-archs improvement varies from 15% +# while on PIII - ~35%. On newer µ-archs improvement varies from 15% # to 50%, but it's less important as they are expected to execute SSE2 # code-path, which is commonly ~2-3x faster [than compiler generated # code]. SSE2 code-path is as fast as original sha512-sse2.pl, even @@ -142,9 +142,9 @@ sub BODY_00_15_x86 { &mov ("edx",$Ehi); &mov ("esi","ecx"); - &shr ("ecx",9) # lo>>9 + &shr ("ecx",9); # lo>>9 &mov ("edi","edx"); - &shr ("edx",9) # hi>>9 + &shr ("edx",9); # hi>>9 &mov ("ebx","ecx"); &shl ("esi",14); # lo<<14 &mov ("eax","edx"); @@ -207,9 +207,9 @@ sub BODY_00_15_x86 { &mov ($Dhi,"ebx"); &mov ("esi","ecx"); - &shr ("ecx",2) # lo>>2 + &shr ("ecx",2); # lo>>2 &mov ("edi","edx"); - &shr ("edx",2) # hi>>2 + &shr ("edx",2); # hi>>2 &mov ("ebx","ecx"); &shl ("esi",4); # lo<<4 &mov ("eax","edx"); @@ -452,9 +452,9 @@ if ($sse2) { &mov ("edx",&DWP(8*(9+15+16-1)+4,"esp")); &mov ("esi","ecx"); - &shr ("ecx",1) # lo>>1 + &shr ("ecx",1); # lo>>1 &mov ("edi","edx"); - &shr ("edx",1) # hi>>1 + &shr ("edx",1); # hi>>1 &mov ("eax","ecx"); &shl ("esi",24); # lo<<24 &mov ("ebx","edx"); @@ -488,9 +488,9 @@ if ($sse2) { &mov ("edx",&DWP(8*(9+15+16-14)+4,"esp")); &mov ("esi","ecx"); - &shr ("ecx",6) # lo>>6 + &shr ("ecx",6); # lo>>6 &mov ("edi","edx"); - &shr ("edx",6) # hi>>6 + &shr ("edx",6); # hi>>6 &mov ("eax","ecx"); &shl ("esi",3); # lo<<3 &mov ("ebx","edx"); diff --git a/app/openssl/crypto/sha/asm/sha512-armv4.S b/app/openssl/crypto/sha/asm/sha512-armv4.S new file mode 120000 index 00000000..046c909a --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha512-armv4.S @@ -0,0 +1 @@ +sha512-armv4.s \ No newline at end of file diff --git a/app/openssl/crypto/sha/asm/sha512-armv4.pl b/app/openssl/crypto/sha/asm/sha512-armv4.pl index 3a35861a..7faf37b1 100644 --- a/app/openssl/crypto/sha/asm/sha512-armv4.pl +++ b/app/openssl/crypto/sha/asm/sha512-armv4.pl @@ -18,22 +18,33 @@ # Rescheduling for dual-issue pipeline resulted in 6% improvement on # Cortex A8 core and ~40 cycles per processed byte. +# February 2011. +# +# Profiler-assisted and platform-specific optimization resulted in 7% +# improvement on Coxtex A8 core and ~38 cycles per byte. + +# March 2011. +# +# Add NEON implementation. On Cortex A8 it was measured to process +# one byte in 25.5 cycles or 47% faster than integer-only code. + # Byte order [in]dependence. ========================================= # -# Caller is expected to maintain specific *dword* order in h[0-7], -# namely with most significant dword at *lower* address, which is -# reflected in below two parameters. *Byte* order within these dwords -# in turn is whatever *native* byte order on current platform. -$hi=0; -$lo=4; +# Originally caller was expected to maintain specific *dword* order in +# h[0-7], namely with most significant dword at *lower* address, which +# was reflected in below two parameters as 0 and 4. Now caller is +# expected to maintain native byte order for whole 64-bit values. +$hi="HI"; +$lo="LO"; # ==================================================================== while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} open STDOUT,">$output"; -$ctx="r0"; +$ctx="r0"; # parameter block $inp="r1"; $len="r2"; + $Tlo="r3"; $Thi="r4"; $Alo="r5"; @@ -61,15 +72,17 @@ $Xoff=8*8; sub BODY_00_15() { my $magic = shift; $code.=<<___; - ldr $t2,[sp,#$Hoff+0] @ h.lo - ldr $t3,[sp,#$Hoff+4] @ h.hi @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 mov $t0,$Elo,lsr#14 + str $Tlo,[sp,#$Xoff+0] mov $t1,$Ehi,lsr#14 + str $Thi,[sp,#$Xoff+4] eor $t0,$t0,$Ehi,lsl#18 + ldr $t2,[sp,#$Hoff+0] @ h.lo eor $t1,$t1,$Elo,lsl#18 + ldr $t3,[sp,#$Hoff+4] @ h.hi eor $t0,$t0,$Elo,lsr#18 eor $t1,$t1,$Ehi,lsr#18 eor $t0,$t0,$Ehi,lsl#14 @@ -96,25 +109,24 @@ $code.=<<___; and $t1,$t1,$Ehi str $Ahi,[sp,#$Aoff+4] eor $t0,$t0,$t2 - ldr $t2,[$Ktbl,#4] @ K[i].lo + ldr $t2,[$Ktbl,#$lo] @ K[i].lo eor $t1,$t1,$t3 @ Ch(e,f,g) - ldr $t3,[$Ktbl,#0] @ K[i].hi + ldr $t3,[$Ktbl,#$hi] @ K[i].hi adds $Tlo,$Tlo,$t0 ldr $Elo,[sp,#$Doff+0] @ d.lo adc $Thi,$Thi,$t1 @ T += Ch(e,f,g) ldr $Ehi,[sp,#$Doff+4] @ d.hi adds $Tlo,$Tlo,$t2 + and $t0,$t2,#0xff adc $Thi,$Thi,$t3 @ T += K[i] adds $Elo,$Elo,$Tlo + ldr $t2,[sp,#$Boff+0] @ b.lo adc $Ehi,$Ehi,$Thi @ d += T - - and $t0,$t2,#0xff teq $t0,#$magic - orreq $Ktbl,$Ktbl,#1 - ldr $t2,[sp,#$Boff+0] @ b.lo ldr $t3,[sp,#$Coff+0] @ c.lo + orreq $Ktbl,$Ktbl,#1 @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 @@ -131,80 +143,100 @@ $code.=<<___; eor $t0,$t0,$Alo,lsl#25 eor $t1,$t1,$Ahi,lsl#25 @ Sigma0(a) adds $Tlo,$Tlo,$t0 + and $t0,$Alo,$t2 adc $Thi,$Thi,$t1 @ T += Sigma0(a) - and $t0,$Alo,$t2 - orr $Alo,$Alo,$t2 ldr $t1,[sp,#$Boff+4] @ b.hi + orr $Alo,$Alo,$t2 ldr $t2,[sp,#$Coff+4] @ c.hi and $Alo,$Alo,$t3 - orr $Alo,$Alo,$t0 @ Maj(a,b,c).lo and $t3,$Ahi,$t1 orr $Ahi,$Ahi,$t1 + orr $Alo,$Alo,$t0 @ Maj(a,b,c).lo and $Ahi,$Ahi,$t2 - orr $Ahi,$Ahi,$t3 @ Maj(a,b,c).hi adds $Alo,$Alo,$Tlo - adc $Ahi,$Ahi,$Thi @ h += T - + orr $Ahi,$Ahi,$t3 @ Maj(a,b,c).hi sub sp,sp,#8 + adc $Ahi,$Ahi,$Thi @ h += T + tst $Ktbl,#1 add $Ktbl,$Ktbl,#8 ___ } $code=<<___; +#include "arm_arch.h" +#ifdef __ARMEL__ +# define LO 0 +# define HI 4 +# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 +#else +# define HI 0 +# define LO 4 +# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 +#endif + .text .code 32 .type K512,%object .align 5 K512: -.word 0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd -.word 0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc -.word 0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019 -.word 0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118 -.word 0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe -.word 0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2 -.word 0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1 -.word 0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694 -.word 0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3 -.word 0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65 -.word 0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483 -.word 0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5 -.word 0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210 -.word 0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4 -.word 0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725 -.word 0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70 -.word 0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926 -.word 0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df -.word 0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8 -.word 0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b -.word 0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001 -.word 0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30 -.word 0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910 -.word 0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8 -.word 0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53 -.word 0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8 -.word 0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb -.word 0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3 -.word 0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60 -.word 0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec -.word 0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9 -.word 0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b -.word 0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207 -.word 0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178 -.word 0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6 -.word 0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b -.word 0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493 -.word 0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c -.word 0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a -.word 0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817 +WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) +WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) +WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) +WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) +WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) +WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) +WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) +WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) +WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) +WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) +WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) +WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) +WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) +WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) +WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) +WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) +WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) +WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) +WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) +WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) +WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) +WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) +WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) +WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) +WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) +WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) +WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) +WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) +WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) +WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) +WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) +WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) +WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) +WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) +WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) +WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) +WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) +WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) +WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) +WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .size K512,.-K512 +.LOPENSSL_armcap: +.word OPENSSL_armcap_P-sha512_block_data_order +.skip 32-4 .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: sub r3,pc,#8 @ sha512_block_data_order add $len,$inp,$len,lsl#7 @ len to point at the end of inp +#if __ARM_ARCH__>=7 + ldr r12,.LOPENSSL_armcap + ldr r12,[r3,r12] @ OPENSSL_armcap_P + tst r12,#1 + bne .LNEON +#endif stmdb sp!,{r4-r12,lr} - sub $Ktbl,r3,#640 @ K512 + sub $Ktbl,r3,#672 @ K512 sub sp,sp,#9*8 ldr $Elo,[$ctx,#$Eoff+$lo] @@ -238,6 +270,7 @@ sha512_block_data_order: str $Thi,[sp,#$Foff+4] .L00_15: +#if __ARM_ARCH__<7 ldrb $Tlo,[$inp,#7] ldrb $t0, [$inp,#6] ldrb $t1, [$inp,#5] @@ -252,26 +285,30 @@ sha512_block_data_order: orr $Thi,$Thi,$t3,lsl#8 orr $Thi,$Thi,$t0,lsl#16 orr $Thi,$Thi,$t1,lsl#24 - str $Tlo,[sp,#$Xoff+0] - str $Thi,[sp,#$Xoff+4] +#else + ldr $Tlo,[$inp,#4] + ldr $Thi,[$inp],#8 +#ifdef __ARMEL__ + rev $Tlo,$Tlo + rev $Thi,$Thi +#endif +#endif ___ &BODY_00_15(0x94); $code.=<<___; tst $Ktbl,#1 beq .L00_15 - bic $Ktbl,$Ktbl,#1 - -.L16_79: ldr $t0,[sp,#`$Xoff+8*(16-1)`+0] ldr $t1,[sp,#`$Xoff+8*(16-1)`+4] - ldr $t2,[sp,#`$Xoff+8*(16-14)`+0] - ldr $t3,[sp,#`$Xoff+8*(16-14)`+4] - + bic $Ktbl,$Ktbl,#1 +.L16_79: @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 mov $Tlo,$t0,lsr#1 + ldr $t2,[sp,#`$Xoff+8*(16-14)`+0] mov $Thi,$t1,lsr#1 + ldr $t3,[sp,#`$Xoff+8*(16-14)`+4] eor $Tlo,$Tlo,$t1,lsl#31 eor $Thi,$Thi,$t0,lsl#31 eor $Tlo,$Tlo,$t0,lsr#8 @@ -295,25 +332,24 @@ $code.=<<___; eor $t1,$t1,$t3,lsl#3 eor $t0,$t0,$t2,lsr#6 eor $t1,$t1,$t3,lsr#6 + ldr $t2,[sp,#`$Xoff+8*(16-9)`+0] eor $t0,$t0,$t3,lsl#26 - ldr $t2,[sp,#`$Xoff+8*(16-9)`+0] ldr $t3,[sp,#`$Xoff+8*(16-9)`+4] adds $Tlo,$Tlo,$t0 + ldr $t0,[sp,#`$Xoff+8*16`+0] adc $Thi,$Thi,$t1 - ldr $t0,[sp,#`$Xoff+8*16`+0] ldr $t1,[sp,#`$Xoff+8*16`+4] adds $Tlo,$Tlo,$t2 adc $Thi,$Thi,$t3 adds $Tlo,$Tlo,$t0 adc $Thi,$Thi,$t1 - str $Tlo,[sp,#$Xoff+0] - str $Thi,[sp,#$Xoff+4] ___ &BODY_00_15(0x17); $code.=<<___; - tst $Ktbl,#1 + ldreq $t0,[sp,#`$Xoff+8*(16-1)`+0] + ldreq $t1,[sp,#`$Xoff+8*(16-1)`+4] beq .L16_79 bic $Ktbl,$Ktbl,#1 @@ -324,12 +360,12 @@ $code.=<<___; ldr $t2, [$ctx,#$Boff+$lo] ldr $t3, [$ctx,#$Boff+$hi] adds $t0,$Alo,$t0 - adc $t1,$Ahi,$t1 - adds $t2,$Tlo,$t2 - adc $t3,$Thi,$t3 str $t0, [$ctx,#$Aoff+$lo] + adc $t1,$Ahi,$t1 str $t1, [$ctx,#$Aoff+$hi] + adds $t2,$Tlo,$t2 str $t2, [$ctx,#$Boff+$lo] + adc $t3,$Thi,$t3 str $t3, [$ctx,#$Boff+$hi] ldr $Alo,[sp,#$Coff+0] @@ -341,12 +377,12 @@ $code.=<<___; ldr $t2, [$ctx,#$Doff+$lo] ldr $t3, [$ctx,#$Doff+$hi] adds $t0,$Alo,$t0 - adc $t1,$Ahi,$t1 - adds $t2,$Tlo,$t2 - adc $t3,$Thi,$t3 str $t0, [$ctx,#$Coff+$lo] + adc $t1,$Ahi,$t1 str $t1, [$ctx,#$Coff+$hi] + adds $t2,$Tlo,$t2 str $t2, [$ctx,#$Doff+$lo] + adc $t3,$Thi,$t3 str $t3, [$ctx,#$Doff+$hi] ldr $Tlo,[sp,#$Foff+0] @@ -356,12 +392,12 @@ $code.=<<___; ldr $t2, [$ctx,#$Foff+$lo] ldr $t3, [$ctx,#$Foff+$hi] adds $Elo,$Elo,$t0 - adc $Ehi,$Ehi,$t1 - adds $t2,$Tlo,$t2 - adc $t3,$Thi,$t3 str $Elo,[$ctx,#$Eoff+$lo] + adc $Ehi,$Ehi,$t1 str $Ehi,[$ctx,#$Eoff+$hi] + adds $t2,$Tlo,$t2 str $t2, [$ctx,#$Foff+$lo] + adc $t3,$Thi,$t3 str $t3, [$ctx,#$Foff+$hi] ldr $Alo,[sp,#$Goff+0] @@ -373,12 +409,12 @@ $code.=<<___; ldr $t2, [$ctx,#$Hoff+$lo] ldr $t3, [$ctx,#$Hoff+$hi] adds $t0,$Alo,$t0 - adc $t1,$Ahi,$t1 - adds $t2,$Tlo,$t2 - adc $t3,$Thi,$t3 str $t0, [$ctx,#$Goff+$lo] + adc $t1,$Ahi,$t1 str $t1, [$ctx,#$Goff+$hi] + adds $t2,$Tlo,$t2 str $t2, [$ctx,#$Hoff+$lo] + adc $t3,$Thi,$t3 str $t3, [$ctx,#$Hoff+$hi] add sp,sp,#640 @@ -388,13 +424,156 @@ $code.=<<___; bne .Loop add sp,sp,#8*9 @ destroy frame +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else ldmia sp!,{r4-r12,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet bx lr @ interoperable with Thumb ISA:-) -.size sha512_block_data_order,.-sha512_block_data_order -.asciz "SHA512 block transform for ARMv4, CRYPTOGAMS by " +#endif +___ + +{ +my @Sigma0=(28,34,39); +my @Sigma1=(14,18,41); +my @sigma0=(1, 8, 7); +my @sigma1=(19,61,6); + +my $Ktbl="r3"; +my $cnt="r12"; # volatile register known as ip, intra-procedure-call scratch + +my @X=map("d$_",(0..15)); +my @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("d$_",(16..23)); + +sub NEON_00_15() { +my $i=shift; +my ($a,$b,$c,$d,$e,$f,$g,$h)=@_; +my ($t0,$t1,$t2,$T1,$K,$Ch,$Maj)=map("d$_",(24..31)); # temps + +$code.=<<___ if ($i<16 || $i&1); + vshr.u64 $t0,$e,#@Sigma1[0] @ $i +#if $i<16 + vld1.64 {@X[$i%16]},[$inp]! @ handles unaligned +#endif + vshr.u64 $t1,$e,#@Sigma1[1] + vshr.u64 $t2,$e,#@Sigma1[2] +___ +$code.=<<___; + vld1.64 {$K},[$Ktbl,:64]! @ K[i++] + vsli.64 $t0,$e,#`64-@Sigma1[0]` + vsli.64 $t1,$e,#`64-@Sigma1[1]` + vsli.64 $t2,$e,#`64-@Sigma1[2]` +#if $i<16 && defined(__ARMEL__) + vrev64.8 @X[$i],@X[$i] +#endif + vadd.i64 $T1,$K,$h + veor $Ch,$f,$g + veor $t0,$t1 + vand $Ch,$e + veor $t0,$t2 @ Sigma1(e) + veor $Ch,$g @ Ch(e,f,g) + vadd.i64 $T1,$t0 + vshr.u64 $t0,$a,#@Sigma0[0] + vadd.i64 $T1,$Ch + vshr.u64 $t1,$a,#@Sigma0[1] + vshr.u64 $t2,$a,#@Sigma0[2] + vsli.64 $t0,$a,#`64-@Sigma0[0]` + vsli.64 $t1,$a,#`64-@Sigma0[1]` + vsli.64 $t2,$a,#`64-@Sigma0[2]` + vadd.i64 $T1,@X[$i%16] + vorr $Maj,$a,$c + vand $Ch,$a,$c + veor $h,$t0,$t1 + vand $Maj,$b + veor $h,$t2 @ Sigma0(a) + vorr $Maj,$Ch @ Maj(a,b,c) + vadd.i64 $h,$T1 + vadd.i64 $d,$T1 + vadd.i64 $h,$Maj +___ +} + +sub NEON_16_79() { +my $i=shift; + +if ($i&1) { &NEON_00_15($i,@_); return; } + +# 2x-vectorized, therefore runs every 2nd round +my @X=map("q$_",(0..7)); # view @X as 128-bit vector +my ($t0,$t1,$s0,$s1) = map("q$_",(12..15)); # temps +my ($d0,$d1,$d2) = map("d$_",(24..26)); # temps from NEON_00_15 +my $e=@_[4]; # $e from NEON_00_15 +$i /= 2; +$code.=<<___; + vshr.u64 $t0,@X[($i+7)%8],#@sigma1[0] + vshr.u64 $t1,@X[($i+7)%8],#@sigma1[1] + vshr.u64 $s1,@X[($i+7)%8],#@sigma1[2] + vsli.64 $t0,@X[($i+7)%8],#`64-@sigma1[0]` + vext.8 $s0,@X[$i%8],@X[($i+1)%8],#8 @ X[i+1] + vsli.64 $t1,@X[($i+7)%8],#`64-@sigma1[1]` + veor $s1,$t0 + vshr.u64 $t0,$s0,#@sigma0[0] + veor $s1,$t1 @ sigma1(X[i+14]) + vshr.u64 $t1,$s0,#@sigma0[1] + vadd.i64 @X[$i%8],$s1 + vshr.u64 $s1,$s0,#@sigma0[2] + vsli.64 $t0,$s0,#`64-@sigma0[0]` + vsli.64 $t1,$s0,#`64-@sigma0[1]` + vext.8 $s0,@X[($i+4)%8],@X[($i+5)%8],#8 @ X[i+9] + veor $s1,$t0 + vshr.u64 $d0,$e,#@Sigma1[0] @ from NEON_00_15 + vadd.i64 @X[$i%8],$s0 + vshr.u64 $d1,$e,#@Sigma1[1] @ from NEON_00_15 + veor $s1,$t1 @ sigma0(X[i+1]) + vshr.u64 $d2,$e,#@Sigma1[2] @ from NEON_00_15 + vadd.i64 @X[$i%8],$s1 +___ + &NEON_00_15(2*$i,@_); +} + +$code.=<<___; +#if __ARM_ARCH__>=7 +.fpu neon + +.align 4 +.LNEON: + dmb @ errata #451034 on early Cortex A8 + vstmdb sp!,{d8-d15} @ ABI specification says so + sub $Ktbl,r3,#672 @ K512 + vldmia $ctx,{$A-$H} @ load context +.Loop_neon: +___ +for($i=0;$i<16;$i++) { &NEON_00_15($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + mov $cnt,#4 +.L16_79_neon: + subs $cnt,#1 +___ +for(;$i<32;$i++) { &NEON_16_79($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + bne .L16_79_neon + + vldmia $ctx,{d24-d31} @ load context to temp + vadd.i64 q8,q12 @ vectorized accumulate + vadd.i64 q9,q13 + vadd.i64 q10,q14 + vadd.i64 q11,q15 + vstmia $ctx,{$A-$H} @ save context + teq $inp,$len + sub $Ktbl,#640 @ rewind K512 + bne .Loop_neon + + vldmia sp!,{d8-d15} @ epilogue + bx lr +#endif +___ +} +$code.=<<___; +.size sha512_block_data_order,.-sha512_block_data_order +.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by " .align 2 +.comm OPENSSL_armcap_P,4,4 ___ $code =~ s/\`([^\`]*)\`/eval $1/gem; diff --git a/app/openssl/crypto/sha/asm/sha512-armv4.s b/app/openssl/crypto/sha/asm/sha512-armv4.s index b030c16c..57301922 100644 --- a/app/openssl/crypto/sha/asm/sha512-armv4.s +++ b/app/openssl/crypto/sha/asm/sha512-armv4.s @@ -1,90 +1,111 @@ +#include "arm_arch.h" +#ifdef __ARMEL__ +# define LO 0 +# define HI 4 +# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 +#else +# define HI 0 +# define LO 4 +# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 +#endif + .text .code 32 .type K512,%object .align 5 K512: -.word 0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd -.word 0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc -.word 0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019 -.word 0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118 -.word 0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe -.word 0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2 -.word 0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1 -.word 0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694 -.word 0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3 -.word 0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65 -.word 0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483 -.word 0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5 -.word 0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210 -.word 0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4 -.word 0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725 -.word 0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70 -.word 0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926 -.word 0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df -.word 0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8 -.word 0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b -.word 0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001 -.word 0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30 -.word 0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910 -.word 0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8 -.word 0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53 -.word 0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8 -.word 0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb -.word 0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3 -.word 0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60 -.word 0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec -.word 0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9 -.word 0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b -.word 0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207 -.word 0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178 -.word 0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6 -.word 0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b -.word 0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493 -.word 0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c -.word 0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a -.word 0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817 +WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) +WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) +WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) +WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) +WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) +WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) +WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) +WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) +WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) +WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) +WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) +WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) +WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) +WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) +WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) +WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) +WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) +WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) +WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) +WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) +WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) +WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) +WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) +WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) +WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) +WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) +WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) +WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) +WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) +WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) +WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) +WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) +WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) +WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) +WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) +WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) +WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) +WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) +WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) +WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .size K512,.-K512 +.LOPENSSL_armcap: +.word OPENSSL_armcap_P-sha512_block_data_order +.skip 32-4 .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: sub r3,pc,#8 @ sha512_block_data_order add r2,r1,r2,lsl#7 @ len to point at the end of inp +#if __ARM_ARCH__>=7 + ldr r12,.LOPENSSL_armcap + ldr r12,[r3,r12] @ OPENSSL_armcap_P + tst r12,#1 + bne .LNEON +#endif stmdb sp!,{r4-r12,lr} - sub r14,r3,#640 @ K512 + sub r14,r3,#672 @ K512 sub sp,sp,#9*8 - ldr r7,[r0,#32+4] - ldr r8,[r0,#32+0] - ldr r9, [r0,#48+4] - ldr r10, [r0,#48+0] - ldr r11, [r0,#56+4] - ldr r12, [r0,#56+0] + ldr r7,[r0,#32+LO] + ldr r8,[r0,#32+HI] + ldr r9, [r0,#48+LO] + ldr r10, [r0,#48+HI] + ldr r11, [r0,#56+LO] + ldr r12, [r0,#56+HI] .Loop: str r9, [sp,#48+0] str r10, [sp,#48+4] str r11, [sp,#56+0] str r12, [sp,#56+4] - ldr r5,[r0,#0+4] - ldr r6,[r0,#0+0] - ldr r3,[r0,#8+4] - ldr r4,[r0,#8+0] - ldr r9, [r0,#16+4] - ldr r10, [r0,#16+0] - ldr r11, [r0,#24+4] - ldr r12, [r0,#24+0] + ldr r5,[r0,#0+LO] + ldr r6,[r0,#0+HI] + ldr r3,[r0,#8+LO] + ldr r4,[r0,#8+HI] + ldr r9, [r0,#16+LO] + ldr r10, [r0,#16+HI] + ldr r11, [r0,#24+LO] + ldr r12, [r0,#24+HI] str r3,[sp,#8+0] str r4,[sp,#8+4] str r9, [sp,#16+0] str r10, [sp,#16+4] str r11, [sp,#24+0] str r12, [sp,#24+4] - ldr r3,[r0,#40+4] - ldr r4,[r0,#40+0] + ldr r3,[r0,#40+LO] + ldr r4,[r0,#40+HI] str r3,[sp,#40+0] str r4,[sp,#40+4] .L00_15: +#if __ARM_ARCH__<7 ldrb r3,[r1,#7] ldrb r9, [r1,#6] ldrb r10, [r1,#5] @@ -99,17 +120,25 @@ sha512_block_data_order: orr r4,r4,r12,lsl#8 orr r4,r4,r9,lsl#16 orr r4,r4,r10,lsl#24 - str r3,[sp,#64+0] - str r4,[sp,#64+4] - ldr r11,[sp,#56+0] @ h.lo - ldr r12,[sp,#56+4] @ h.hi +#else + ldr r3,[r1,#4] + ldr r4,[r1],#8 +#ifdef __ARMEL__ + rev r3,r3 + rev r4,r4 +#endif +#endif @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 mov r9,r7,lsr#14 + str r3,[sp,#64+0] mov r10,r8,lsr#14 + str r4,[sp,#64+4] eor r9,r9,r8,lsl#18 + ldr r11,[sp,#56+0] @ h.lo eor r10,r10,r7,lsl#18 + ldr r12,[sp,#56+4] @ h.hi eor r9,r9,r7,lsr#18 eor r10,r10,r8,lsr#18 eor r9,r9,r8,lsl#14 @@ -136,25 +165,24 @@ sha512_block_data_order: and r10,r10,r8 str r6,[sp,#0+4] eor r9,r9,r11 - ldr r11,[r14,#4] @ K[i].lo + ldr r11,[r14,#LO] @ K[i].lo eor r10,r10,r12 @ Ch(e,f,g) - ldr r12,[r14,#0] @ K[i].hi + ldr r12,[r14,#HI] @ K[i].hi adds r3,r3,r9 ldr r7,[sp,#24+0] @ d.lo adc r4,r4,r10 @ T += Ch(e,f,g) ldr r8,[sp,#24+4] @ d.hi adds r3,r3,r11 + and r9,r11,#0xff adc r4,r4,r12 @ T += K[i] adds r7,r7,r3 + ldr r11,[sp,#8+0] @ b.lo adc r8,r8,r4 @ d += T - - and r9,r11,#0xff teq r9,#148 - orreq r14,r14,#1 - ldr r11,[sp,#8+0] @ b.lo ldr r12,[sp,#16+0] @ c.lo + orreq r14,r14,#1 @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 @@ -171,38 +199,36 @@ sha512_block_data_order: eor r9,r9,r5,lsl#25 eor r10,r10,r6,lsl#25 @ Sigma0(a) adds r3,r3,r9 + and r9,r5,r11 adc r4,r4,r10 @ T += Sigma0(a) - and r9,r5,r11 - orr r5,r5,r11 ldr r10,[sp,#8+4] @ b.hi + orr r5,r5,r11 ldr r11,[sp,#16+4] @ c.hi and r5,r5,r12 - orr r5,r5,r9 @ Maj(a,b,c).lo and r12,r6,r10 orr r6,r6,r10 + orr r5,r5,r9 @ Maj(a,b,c).lo and r6,r6,r11 - orr r6,r6,r12 @ Maj(a,b,c).hi adds r5,r5,r3 - adc r6,r6,r4 @ h += T - + orr r6,r6,r12 @ Maj(a,b,c).hi sub sp,sp,#8 + adc r6,r6,r4 @ h += T + tst r14,#1 add r14,r14,#8 tst r14,#1 beq .L00_15 - bic r14,r14,#1 - -.L16_79: ldr r9,[sp,#184+0] ldr r10,[sp,#184+4] - ldr r11,[sp,#80+0] - ldr r12,[sp,#80+4] - + bic r14,r14,#1 +.L16_79: @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 mov r3,r9,lsr#1 + ldr r11,[sp,#80+0] mov r4,r10,lsr#1 + ldr r12,[sp,#80+4] eor r3,r3,r10,lsl#31 eor r4,r4,r9,lsl#31 eor r3,r3,r9,lsr#8 @@ -226,30 +252,30 @@ sha512_block_data_order: eor r10,r10,r12,lsl#3 eor r9,r9,r11,lsr#6 eor r10,r10,r12,lsr#6 + ldr r11,[sp,#120+0] eor r9,r9,r12,lsl#26 - ldr r11,[sp,#120+0] ldr r12,[sp,#120+4] adds r3,r3,r9 + ldr r9,[sp,#192+0] adc r4,r4,r10 - ldr r9,[sp,#192+0] ldr r10,[sp,#192+4] adds r3,r3,r11 adc r4,r4,r12 adds r3,r3,r9 adc r4,r4,r10 - str r3,[sp,#64+0] - str r4,[sp,#64+4] - ldr r11,[sp,#56+0] @ h.lo - ldr r12,[sp,#56+4] @ h.hi @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 mov r9,r7,lsr#14 + str r3,[sp,#64+0] mov r10,r8,lsr#14 + str r4,[sp,#64+4] eor r9,r9,r8,lsl#18 + ldr r11,[sp,#56+0] @ h.lo eor r10,r10,r7,lsl#18 + ldr r12,[sp,#56+4] @ h.hi eor r9,r9,r7,lsr#18 eor r10,r10,r8,lsr#18 eor r9,r9,r8,lsl#14 @@ -276,25 +302,24 @@ sha512_block_data_order: and r10,r10,r8 str r6,[sp,#0+4] eor r9,r9,r11 - ldr r11,[r14,#4] @ K[i].lo + ldr r11,[r14,#LO] @ K[i].lo eor r10,r10,r12 @ Ch(e,f,g) - ldr r12,[r14,#0] @ K[i].hi + ldr r12,[r14,#HI] @ K[i].hi adds r3,r3,r9 ldr r7,[sp,#24+0] @ d.lo adc r4,r4,r10 @ T += Ch(e,f,g) ldr r8,[sp,#24+4] @ d.hi adds r3,r3,r11 + and r9,r11,#0xff adc r4,r4,r12 @ T += K[i] adds r7,r7,r3 + ldr r11,[sp,#8+0] @ b.lo adc r8,r8,r4 @ d += T - - and r9,r11,#0xff teq r9,#23 - orreq r14,r14,#1 - ldr r11,[sp,#8+0] @ b.lo ldr r12,[sp,#16+0] @ c.lo + orreq r14,r14,#1 @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 @@ -311,90 +336,91 @@ sha512_block_data_order: eor r9,r9,r5,lsl#25 eor r10,r10,r6,lsl#25 @ Sigma0(a) adds r3,r3,r9 + and r9,r5,r11 adc r4,r4,r10 @ T += Sigma0(a) - and r9,r5,r11 - orr r5,r5,r11 ldr r10,[sp,#8+4] @ b.hi + orr r5,r5,r11 ldr r11,[sp,#16+4] @ c.hi and r5,r5,r12 - orr r5,r5,r9 @ Maj(a,b,c).lo and r12,r6,r10 orr r6,r6,r10 + orr r5,r5,r9 @ Maj(a,b,c).lo and r6,r6,r11 - orr r6,r6,r12 @ Maj(a,b,c).hi adds r5,r5,r3 - adc r6,r6,r4 @ h += T - + orr r6,r6,r12 @ Maj(a,b,c).hi sub sp,sp,#8 - add r14,r14,#8 + adc r6,r6,r4 @ h += T tst r14,#1 + add r14,r14,#8 + ldreq r9,[sp,#184+0] + ldreq r10,[sp,#184+4] beq .L16_79 bic r14,r14,#1 ldr r3,[sp,#8+0] ldr r4,[sp,#8+4] - ldr r9, [r0,#0+4] - ldr r10, [r0,#0+0] - ldr r11, [r0,#8+4] - ldr r12, [r0,#8+0] + ldr r9, [r0,#0+LO] + ldr r10, [r0,#0+HI] + ldr r11, [r0,#8+LO] + ldr r12, [r0,#8+HI] adds r9,r5,r9 + str r9, [r0,#0+LO] adc r10,r6,r10 + str r10, [r0,#0+HI] adds r11,r3,r11 + str r11, [r0,#8+LO] adc r12,r4,r12 - str r9, [r0,#0+4] - str r10, [r0,#0+0] - str r11, [r0,#8+4] - str r12, [r0,#8+0] + str r12, [r0,#8+HI] ldr r5,[sp,#16+0] ldr r6,[sp,#16+4] ldr r3,[sp,#24+0] ldr r4,[sp,#24+4] - ldr r9, [r0,#16+4] - ldr r10, [r0,#16+0] - ldr r11, [r0,#24+4] - ldr r12, [r0,#24+0] + ldr r9, [r0,#16+LO] + ldr r10, [r0,#16+HI] + ldr r11, [r0,#24+LO] + ldr r12, [r0,#24+HI] adds r9,r5,r9 + str r9, [r0,#16+LO] adc r10,r6,r10 + str r10, [r0,#16+HI] adds r11,r3,r11 + str r11, [r0,#24+LO] adc r12,r4,r12 - str r9, [r0,#16+4] - str r10, [r0,#16+0] - str r11, [r0,#24+4] - str r12, [r0,#24+0] + str r12, [r0,#24+HI] ldr r3,[sp,#40+0] ldr r4,[sp,#40+4] - ldr r9, [r0,#32+4] - ldr r10, [r0,#32+0] - ldr r11, [r0,#40+4] - ldr r12, [r0,#40+0] + ldr r9, [r0,#32+LO] + ldr r10, [r0,#32+HI] + ldr r11, [r0,#40+LO] + ldr r12, [r0,#40+HI] adds r7,r7,r9 + str r7,[r0,#32+LO] adc r8,r8,r10 + str r8,[r0,#32+HI] adds r11,r3,r11 + str r11, [r0,#40+LO] adc r12,r4,r12 - str r7,[r0,#32+4] - str r8,[r0,#32+0] - str r11, [r0,#40+4] - str r12, [r0,#40+0] + str r12, [r0,#40+HI] ldr r5,[sp,#48+0] ldr r6,[sp,#48+4] ldr r3,[sp,#56+0] ldr r4,[sp,#56+4] - ldr r9, [r0,#48+4] - ldr r10, [r0,#48+0] - ldr r11, [r0,#56+4] - ldr r12, [r0,#56+0] + ldr r9, [r0,#48+LO] + ldr r10, [r0,#48+HI] + ldr r11, [r0,#56+LO] + ldr r12, [r0,#56+HI] adds r9,r5,r9 + str r9, [r0,#48+LO] adc r10,r6,r10 + str r10, [r0,#48+HI] adds r11,r3,r11 + str r11, [r0,#56+LO] adc r12,r4,r12 - str r9, [r0,#48+4] - str r10, [r0,#48+0] - str r11, [r0,#56+4] - str r12, [r0,#56+0] + str r12, [r0,#56+HI] add sp,sp,#640 sub r14,r14,#640 @@ -403,10 +429,1355 @@ sha512_block_data_order: bne .Loop add sp,sp,#8*9 @ destroy frame +#if __ARM_ARCH__>=5 + ldmia sp!,{r4-r12,pc} +#else ldmia sp!,{r4-r12,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) -.size sha512_block_data_order,.-sha512_block_data_order -.asciz "SHA512 block transform for ARMv4, CRYPTOGAMS by " +#endif +#if __ARM_ARCH__>=7 +.fpu neon + +.align 4 +.LNEON: + dmb @ errata #451034 on early Cortex A8 + vstmdb sp!,{d8-d15} @ ABI specification says so + sub r3,r3,#672 @ K512 + vldmia r0,{d16-d23} @ load context +.Loop_neon: + vshr.u64 d24,d20,#14 @ 0 +#if 0<16 + vld1.64 {d0},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d20,#18 + vshr.u64 d26,d20,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d20,#50 + vsli.64 d25,d20,#46 + vsli.64 d26,d20,#23 +#if 0<16 && defined(__ARMEL__) + vrev64.8 d0,d0 +#endif + vadd.i64 d27,d28,d23 + veor d29,d21,d22 + veor d24,d25 + vand d29,d20 + veor d24,d26 @ Sigma1(e) + veor d29,d22 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d16,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d16,#34 + vshr.u64 d26,d16,#39 + vsli.64 d24,d16,#36 + vsli.64 d25,d16,#30 + vsli.64 d26,d16,#25 + vadd.i64 d27,d0 + vorr d30,d16,d18 + vand d29,d16,d18 + veor d23,d24,d25 + vand d30,d17 + veor d23,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d23,d27 + vadd.i64 d19,d27 + vadd.i64 d23,d30 + vshr.u64 d24,d19,#14 @ 1 +#if 1<16 + vld1.64 {d1},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d19,#18 + vshr.u64 d26,d19,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d19,#50 + vsli.64 d25,d19,#46 + vsli.64 d26,d19,#23 +#if 1<16 && defined(__ARMEL__) + vrev64.8 d1,d1 +#endif + vadd.i64 d27,d28,d22 + veor d29,d20,d21 + veor d24,d25 + vand d29,d19 + veor d24,d26 @ Sigma1(e) + veor d29,d21 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d23,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d23,#34 + vshr.u64 d26,d23,#39 + vsli.64 d24,d23,#36 + vsli.64 d25,d23,#30 + vsli.64 d26,d23,#25 + vadd.i64 d27,d1 + vorr d30,d23,d17 + vand d29,d23,d17 + veor d22,d24,d25 + vand d30,d16 + veor d22,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d22,d27 + vadd.i64 d18,d27 + vadd.i64 d22,d30 + vshr.u64 d24,d18,#14 @ 2 +#if 2<16 + vld1.64 {d2},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d18,#18 + vshr.u64 d26,d18,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d18,#50 + vsli.64 d25,d18,#46 + vsli.64 d26,d18,#23 +#if 2<16 && defined(__ARMEL__) + vrev64.8 d2,d2 +#endif + vadd.i64 d27,d28,d21 + veor d29,d19,d20 + veor d24,d25 + vand d29,d18 + veor d24,d26 @ Sigma1(e) + veor d29,d20 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d22,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d22,#34 + vshr.u64 d26,d22,#39 + vsli.64 d24,d22,#36 + vsli.64 d25,d22,#30 + vsli.64 d26,d22,#25 + vadd.i64 d27,d2 + vorr d30,d22,d16 + vand d29,d22,d16 + veor d21,d24,d25 + vand d30,d23 + veor d21,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d21,d27 + vadd.i64 d17,d27 + vadd.i64 d21,d30 + vshr.u64 d24,d17,#14 @ 3 +#if 3<16 + vld1.64 {d3},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d17,#18 + vshr.u64 d26,d17,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d17,#50 + vsli.64 d25,d17,#46 + vsli.64 d26,d17,#23 +#if 3<16 && defined(__ARMEL__) + vrev64.8 d3,d3 +#endif + vadd.i64 d27,d28,d20 + veor d29,d18,d19 + veor d24,d25 + vand d29,d17 + veor d24,d26 @ Sigma1(e) + veor d29,d19 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d21,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d21,#34 + vshr.u64 d26,d21,#39 + vsli.64 d24,d21,#36 + vsli.64 d25,d21,#30 + vsli.64 d26,d21,#25 + vadd.i64 d27,d3 + vorr d30,d21,d23 + vand d29,d21,d23 + veor d20,d24,d25 + vand d30,d22 + veor d20,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d20,d27 + vadd.i64 d16,d27 + vadd.i64 d20,d30 + vshr.u64 d24,d16,#14 @ 4 +#if 4<16 + vld1.64 {d4},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d16,#18 + vshr.u64 d26,d16,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d16,#50 + vsli.64 d25,d16,#46 + vsli.64 d26,d16,#23 +#if 4<16 && defined(__ARMEL__) + vrev64.8 d4,d4 +#endif + vadd.i64 d27,d28,d19 + veor d29,d17,d18 + veor d24,d25 + vand d29,d16 + veor d24,d26 @ Sigma1(e) + veor d29,d18 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d20,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d20,#34 + vshr.u64 d26,d20,#39 + vsli.64 d24,d20,#36 + vsli.64 d25,d20,#30 + vsli.64 d26,d20,#25 + vadd.i64 d27,d4 + vorr d30,d20,d22 + vand d29,d20,d22 + veor d19,d24,d25 + vand d30,d21 + veor d19,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d19,d27 + vadd.i64 d23,d27 + vadd.i64 d19,d30 + vshr.u64 d24,d23,#14 @ 5 +#if 5<16 + vld1.64 {d5},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d23,#18 + vshr.u64 d26,d23,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d23,#50 + vsli.64 d25,d23,#46 + vsli.64 d26,d23,#23 +#if 5<16 && defined(__ARMEL__) + vrev64.8 d5,d5 +#endif + vadd.i64 d27,d28,d18 + veor d29,d16,d17 + veor d24,d25 + vand d29,d23 + veor d24,d26 @ Sigma1(e) + veor d29,d17 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d19,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d19,#34 + vshr.u64 d26,d19,#39 + vsli.64 d24,d19,#36 + vsli.64 d25,d19,#30 + vsli.64 d26,d19,#25 + vadd.i64 d27,d5 + vorr d30,d19,d21 + vand d29,d19,d21 + veor d18,d24,d25 + vand d30,d20 + veor d18,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d18,d27 + vadd.i64 d22,d27 + vadd.i64 d18,d30 + vshr.u64 d24,d22,#14 @ 6 +#if 6<16 + vld1.64 {d6},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d22,#18 + vshr.u64 d26,d22,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d22,#50 + vsli.64 d25,d22,#46 + vsli.64 d26,d22,#23 +#if 6<16 && defined(__ARMEL__) + vrev64.8 d6,d6 +#endif + vadd.i64 d27,d28,d17 + veor d29,d23,d16 + veor d24,d25 + vand d29,d22 + veor d24,d26 @ Sigma1(e) + veor d29,d16 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d18,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d18,#34 + vshr.u64 d26,d18,#39 + vsli.64 d24,d18,#36 + vsli.64 d25,d18,#30 + vsli.64 d26,d18,#25 + vadd.i64 d27,d6 + vorr d30,d18,d20 + vand d29,d18,d20 + veor d17,d24,d25 + vand d30,d19 + veor d17,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d17,d27 + vadd.i64 d21,d27 + vadd.i64 d17,d30 + vshr.u64 d24,d21,#14 @ 7 +#if 7<16 + vld1.64 {d7},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d21,#18 + vshr.u64 d26,d21,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d21,#50 + vsli.64 d25,d21,#46 + vsli.64 d26,d21,#23 +#if 7<16 && defined(__ARMEL__) + vrev64.8 d7,d7 +#endif + vadd.i64 d27,d28,d16 + veor d29,d22,d23 + veor d24,d25 + vand d29,d21 + veor d24,d26 @ Sigma1(e) + veor d29,d23 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d17,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d17,#34 + vshr.u64 d26,d17,#39 + vsli.64 d24,d17,#36 + vsli.64 d25,d17,#30 + vsli.64 d26,d17,#25 + vadd.i64 d27,d7 + vorr d30,d17,d19 + vand d29,d17,d19 + veor d16,d24,d25 + vand d30,d18 + veor d16,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d16,d27 + vadd.i64 d20,d27 + vadd.i64 d16,d30 + vshr.u64 d24,d20,#14 @ 8 +#if 8<16 + vld1.64 {d8},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d20,#18 + vshr.u64 d26,d20,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d20,#50 + vsli.64 d25,d20,#46 + vsli.64 d26,d20,#23 +#if 8<16 && defined(__ARMEL__) + vrev64.8 d8,d8 +#endif + vadd.i64 d27,d28,d23 + veor d29,d21,d22 + veor d24,d25 + vand d29,d20 + veor d24,d26 @ Sigma1(e) + veor d29,d22 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d16,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d16,#34 + vshr.u64 d26,d16,#39 + vsli.64 d24,d16,#36 + vsli.64 d25,d16,#30 + vsli.64 d26,d16,#25 + vadd.i64 d27,d8 + vorr d30,d16,d18 + vand d29,d16,d18 + veor d23,d24,d25 + vand d30,d17 + veor d23,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d23,d27 + vadd.i64 d19,d27 + vadd.i64 d23,d30 + vshr.u64 d24,d19,#14 @ 9 +#if 9<16 + vld1.64 {d9},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d19,#18 + vshr.u64 d26,d19,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d19,#50 + vsli.64 d25,d19,#46 + vsli.64 d26,d19,#23 +#if 9<16 && defined(__ARMEL__) + vrev64.8 d9,d9 +#endif + vadd.i64 d27,d28,d22 + veor d29,d20,d21 + veor d24,d25 + vand d29,d19 + veor d24,d26 @ Sigma1(e) + veor d29,d21 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d23,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d23,#34 + vshr.u64 d26,d23,#39 + vsli.64 d24,d23,#36 + vsli.64 d25,d23,#30 + vsli.64 d26,d23,#25 + vadd.i64 d27,d9 + vorr d30,d23,d17 + vand d29,d23,d17 + veor d22,d24,d25 + vand d30,d16 + veor d22,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d22,d27 + vadd.i64 d18,d27 + vadd.i64 d22,d30 + vshr.u64 d24,d18,#14 @ 10 +#if 10<16 + vld1.64 {d10},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d18,#18 + vshr.u64 d26,d18,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d18,#50 + vsli.64 d25,d18,#46 + vsli.64 d26,d18,#23 +#if 10<16 && defined(__ARMEL__) + vrev64.8 d10,d10 +#endif + vadd.i64 d27,d28,d21 + veor d29,d19,d20 + veor d24,d25 + vand d29,d18 + veor d24,d26 @ Sigma1(e) + veor d29,d20 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d22,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d22,#34 + vshr.u64 d26,d22,#39 + vsli.64 d24,d22,#36 + vsli.64 d25,d22,#30 + vsli.64 d26,d22,#25 + vadd.i64 d27,d10 + vorr d30,d22,d16 + vand d29,d22,d16 + veor d21,d24,d25 + vand d30,d23 + veor d21,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d21,d27 + vadd.i64 d17,d27 + vadd.i64 d21,d30 + vshr.u64 d24,d17,#14 @ 11 +#if 11<16 + vld1.64 {d11},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d17,#18 + vshr.u64 d26,d17,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d17,#50 + vsli.64 d25,d17,#46 + vsli.64 d26,d17,#23 +#if 11<16 && defined(__ARMEL__) + vrev64.8 d11,d11 +#endif + vadd.i64 d27,d28,d20 + veor d29,d18,d19 + veor d24,d25 + vand d29,d17 + veor d24,d26 @ Sigma1(e) + veor d29,d19 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d21,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d21,#34 + vshr.u64 d26,d21,#39 + vsli.64 d24,d21,#36 + vsli.64 d25,d21,#30 + vsli.64 d26,d21,#25 + vadd.i64 d27,d11 + vorr d30,d21,d23 + vand d29,d21,d23 + veor d20,d24,d25 + vand d30,d22 + veor d20,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d20,d27 + vadd.i64 d16,d27 + vadd.i64 d20,d30 + vshr.u64 d24,d16,#14 @ 12 +#if 12<16 + vld1.64 {d12},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d16,#18 + vshr.u64 d26,d16,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d16,#50 + vsli.64 d25,d16,#46 + vsli.64 d26,d16,#23 +#if 12<16 && defined(__ARMEL__) + vrev64.8 d12,d12 +#endif + vadd.i64 d27,d28,d19 + veor d29,d17,d18 + veor d24,d25 + vand d29,d16 + veor d24,d26 @ Sigma1(e) + veor d29,d18 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d20,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d20,#34 + vshr.u64 d26,d20,#39 + vsli.64 d24,d20,#36 + vsli.64 d25,d20,#30 + vsli.64 d26,d20,#25 + vadd.i64 d27,d12 + vorr d30,d20,d22 + vand d29,d20,d22 + veor d19,d24,d25 + vand d30,d21 + veor d19,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d19,d27 + vadd.i64 d23,d27 + vadd.i64 d19,d30 + vshr.u64 d24,d23,#14 @ 13 +#if 13<16 + vld1.64 {d13},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d23,#18 + vshr.u64 d26,d23,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d23,#50 + vsli.64 d25,d23,#46 + vsli.64 d26,d23,#23 +#if 13<16 && defined(__ARMEL__) + vrev64.8 d13,d13 +#endif + vadd.i64 d27,d28,d18 + veor d29,d16,d17 + veor d24,d25 + vand d29,d23 + veor d24,d26 @ Sigma1(e) + veor d29,d17 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d19,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d19,#34 + vshr.u64 d26,d19,#39 + vsli.64 d24,d19,#36 + vsli.64 d25,d19,#30 + vsli.64 d26,d19,#25 + vadd.i64 d27,d13 + vorr d30,d19,d21 + vand d29,d19,d21 + veor d18,d24,d25 + vand d30,d20 + veor d18,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d18,d27 + vadd.i64 d22,d27 + vadd.i64 d18,d30 + vshr.u64 d24,d22,#14 @ 14 +#if 14<16 + vld1.64 {d14},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d22,#18 + vshr.u64 d26,d22,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d22,#50 + vsli.64 d25,d22,#46 + vsli.64 d26,d22,#23 +#if 14<16 && defined(__ARMEL__) + vrev64.8 d14,d14 +#endif + vadd.i64 d27,d28,d17 + veor d29,d23,d16 + veor d24,d25 + vand d29,d22 + veor d24,d26 @ Sigma1(e) + veor d29,d16 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d18,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d18,#34 + vshr.u64 d26,d18,#39 + vsli.64 d24,d18,#36 + vsli.64 d25,d18,#30 + vsli.64 d26,d18,#25 + vadd.i64 d27,d14 + vorr d30,d18,d20 + vand d29,d18,d20 + veor d17,d24,d25 + vand d30,d19 + veor d17,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d17,d27 + vadd.i64 d21,d27 + vadd.i64 d17,d30 + vshr.u64 d24,d21,#14 @ 15 +#if 15<16 + vld1.64 {d15},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d21,#18 + vshr.u64 d26,d21,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d21,#50 + vsli.64 d25,d21,#46 + vsli.64 d26,d21,#23 +#if 15<16 && defined(__ARMEL__) + vrev64.8 d15,d15 +#endif + vadd.i64 d27,d28,d16 + veor d29,d22,d23 + veor d24,d25 + vand d29,d21 + veor d24,d26 @ Sigma1(e) + veor d29,d23 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d17,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d17,#34 + vshr.u64 d26,d17,#39 + vsli.64 d24,d17,#36 + vsli.64 d25,d17,#30 + vsli.64 d26,d17,#25 + vadd.i64 d27,d15 + vorr d30,d17,d19 + vand d29,d17,d19 + veor d16,d24,d25 + vand d30,d18 + veor d16,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d16,d27 + vadd.i64 d20,d27 + vadd.i64 d16,d30 + mov r12,#4 +.L16_79_neon: + subs r12,#1 + vshr.u64 q12,q7,#19 + vshr.u64 q13,q7,#61 + vshr.u64 q15,q7,#6 + vsli.64 q12,q7,#45 + vext.8 q14,q0,q1,#8 @ X[i+1] + vsli.64 q13,q7,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q0,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q4,q5,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d20,#14 @ from NEON_00_15 + vadd.i64 q0,q14 + vshr.u64 d25,d20,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d20,#41 @ from NEON_00_15 + vadd.i64 q0,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d20,#50 + vsli.64 d25,d20,#46 + vsli.64 d26,d20,#23 +#if 16<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d23 + veor d29,d21,d22 + veor d24,d25 + vand d29,d20 + veor d24,d26 @ Sigma1(e) + veor d29,d22 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d16,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d16,#34 + vshr.u64 d26,d16,#39 + vsli.64 d24,d16,#36 + vsli.64 d25,d16,#30 + vsli.64 d26,d16,#25 + vadd.i64 d27,d0 + vorr d30,d16,d18 + vand d29,d16,d18 + veor d23,d24,d25 + vand d30,d17 + veor d23,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d23,d27 + vadd.i64 d19,d27 + vadd.i64 d23,d30 + vshr.u64 d24,d19,#14 @ 17 +#if 17<16 + vld1.64 {d1},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d19,#18 + vshr.u64 d26,d19,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d19,#50 + vsli.64 d25,d19,#46 + vsli.64 d26,d19,#23 +#if 17<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d22 + veor d29,d20,d21 + veor d24,d25 + vand d29,d19 + veor d24,d26 @ Sigma1(e) + veor d29,d21 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d23,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d23,#34 + vshr.u64 d26,d23,#39 + vsli.64 d24,d23,#36 + vsli.64 d25,d23,#30 + vsli.64 d26,d23,#25 + vadd.i64 d27,d1 + vorr d30,d23,d17 + vand d29,d23,d17 + veor d22,d24,d25 + vand d30,d16 + veor d22,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d22,d27 + vadd.i64 d18,d27 + vadd.i64 d22,d30 + vshr.u64 q12,q0,#19 + vshr.u64 q13,q0,#61 + vshr.u64 q15,q0,#6 + vsli.64 q12,q0,#45 + vext.8 q14,q1,q2,#8 @ X[i+1] + vsli.64 q13,q0,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q1,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q5,q6,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d18,#14 @ from NEON_00_15 + vadd.i64 q1,q14 + vshr.u64 d25,d18,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d18,#41 @ from NEON_00_15 + vadd.i64 q1,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d18,#50 + vsli.64 d25,d18,#46 + vsli.64 d26,d18,#23 +#if 18<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d21 + veor d29,d19,d20 + veor d24,d25 + vand d29,d18 + veor d24,d26 @ Sigma1(e) + veor d29,d20 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d22,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d22,#34 + vshr.u64 d26,d22,#39 + vsli.64 d24,d22,#36 + vsli.64 d25,d22,#30 + vsli.64 d26,d22,#25 + vadd.i64 d27,d2 + vorr d30,d22,d16 + vand d29,d22,d16 + veor d21,d24,d25 + vand d30,d23 + veor d21,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d21,d27 + vadd.i64 d17,d27 + vadd.i64 d21,d30 + vshr.u64 d24,d17,#14 @ 19 +#if 19<16 + vld1.64 {d3},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d17,#18 + vshr.u64 d26,d17,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d17,#50 + vsli.64 d25,d17,#46 + vsli.64 d26,d17,#23 +#if 19<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d20 + veor d29,d18,d19 + veor d24,d25 + vand d29,d17 + veor d24,d26 @ Sigma1(e) + veor d29,d19 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d21,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d21,#34 + vshr.u64 d26,d21,#39 + vsli.64 d24,d21,#36 + vsli.64 d25,d21,#30 + vsli.64 d26,d21,#25 + vadd.i64 d27,d3 + vorr d30,d21,d23 + vand d29,d21,d23 + veor d20,d24,d25 + vand d30,d22 + veor d20,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d20,d27 + vadd.i64 d16,d27 + vadd.i64 d20,d30 + vshr.u64 q12,q1,#19 + vshr.u64 q13,q1,#61 + vshr.u64 q15,q1,#6 + vsli.64 q12,q1,#45 + vext.8 q14,q2,q3,#8 @ X[i+1] + vsli.64 q13,q1,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q2,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q6,q7,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d16,#14 @ from NEON_00_15 + vadd.i64 q2,q14 + vshr.u64 d25,d16,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d16,#41 @ from NEON_00_15 + vadd.i64 q2,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d16,#50 + vsli.64 d25,d16,#46 + vsli.64 d26,d16,#23 +#if 20<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d19 + veor d29,d17,d18 + veor d24,d25 + vand d29,d16 + veor d24,d26 @ Sigma1(e) + veor d29,d18 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d20,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d20,#34 + vshr.u64 d26,d20,#39 + vsli.64 d24,d20,#36 + vsli.64 d25,d20,#30 + vsli.64 d26,d20,#25 + vadd.i64 d27,d4 + vorr d30,d20,d22 + vand d29,d20,d22 + veor d19,d24,d25 + vand d30,d21 + veor d19,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d19,d27 + vadd.i64 d23,d27 + vadd.i64 d19,d30 + vshr.u64 d24,d23,#14 @ 21 +#if 21<16 + vld1.64 {d5},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d23,#18 + vshr.u64 d26,d23,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d23,#50 + vsli.64 d25,d23,#46 + vsli.64 d26,d23,#23 +#if 21<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d18 + veor d29,d16,d17 + veor d24,d25 + vand d29,d23 + veor d24,d26 @ Sigma1(e) + veor d29,d17 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d19,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d19,#34 + vshr.u64 d26,d19,#39 + vsli.64 d24,d19,#36 + vsli.64 d25,d19,#30 + vsli.64 d26,d19,#25 + vadd.i64 d27,d5 + vorr d30,d19,d21 + vand d29,d19,d21 + veor d18,d24,d25 + vand d30,d20 + veor d18,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d18,d27 + vadd.i64 d22,d27 + vadd.i64 d18,d30 + vshr.u64 q12,q2,#19 + vshr.u64 q13,q2,#61 + vshr.u64 q15,q2,#6 + vsli.64 q12,q2,#45 + vext.8 q14,q3,q4,#8 @ X[i+1] + vsli.64 q13,q2,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q3,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q7,q0,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d22,#14 @ from NEON_00_15 + vadd.i64 q3,q14 + vshr.u64 d25,d22,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d22,#41 @ from NEON_00_15 + vadd.i64 q3,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d22,#50 + vsli.64 d25,d22,#46 + vsli.64 d26,d22,#23 +#if 22<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d17 + veor d29,d23,d16 + veor d24,d25 + vand d29,d22 + veor d24,d26 @ Sigma1(e) + veor d29,d16 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d18,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d18,#34 + vshr.u64 d26,d18,#39 + vsli.64 d24,d18,#36 + vsli.64 d25,d18,#30 + vsli.64 d26,d18,#25 + vadd.i64 d27,d6 + vorr d30,d18,d20 + vand d29,d18,d20 + veor d17,d24,d25 + vand d30,d19 + veor d17,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d17,d27 + vadd.i64 d21,d27 + vadd.i64 d17,d30 + vshr.u64 d24,d21,#14 @ 23 +#if 23<16 + vld1.64 {d7},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d21,#18 + vshr.u64 d26,d21,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d21,#50 + vsli.64 d25,d21,#46 + vsli.64 d26,d21,#23 +#if 23<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d16 + veor d29,d22,d23 + veor d24,d25 + vand d29,d21 + veor d24,d26 @ Sigma1(e) + veor d29,d23 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d17,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d17,#34 + vshr.u64 d26,d17,#39 + vsli.64 d24,d17,#36 + vsli.64 d25,d17,#30 + vsli.64 d26,d17,#25 + vadd.i64 d27,d7 + vorr d30,d17,d19 + vand d29,d17,d19 + veor d16,d24,d25 + vand d30,d18 + veor d16,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d16,d27 + vadd.i64 d20,d27 + vadd.i64 d16,d30 + vshr.u64 q12,q3,#19 + vshr.u64 q13,q3,#61 + vshr.u64 q15,q3,#6 + vsli.64 q12,q3,#45 + vext.8 q14,q4,q5,#8 @ X[i+1] + vsli.64 q13,q3,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q4,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q0,q1,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d20,#14 @ from NEON_00_15 + vadd.i64 q4,q14 + vshr.u64 d25,d20,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d20,#41 @ from NEON_00_15 + vadd.i64 q4,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d20,#50 + vsli.64 d25,d20,#46 + vsli.64 d26,d20,#23 +#if 24<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d23 + veor d29,d21,d22 + veor d24,d25 + vand d29,d20 + veor d24,d26 @ Sigma1(e) + veor d29,d22 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d16,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d16,#34 + vshr.u64 d26,d16,#39 + vsli.64 d24,d16,#36 + vsli.64 d25,d16,#30 + vsli.64 d26,d16,#25 + vadd.i64 d27,d8 + vorr d30,d16,d18 + vand d29,d16,d18 + veor d23,d24,d25 + vand d30,d17 + veor d23,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d23,d27 + vadd.i64 d19,d27 + vadd.i64 d23,d30 + vshr.u64 d24,d19,#14 @ 25 +#if 25<16 + vld1.64 {d9},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d19,#18 + vshr.u64 d26,d19,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d19,#50 + vsli.64 d25,d19,#46 + vsli.64 d26,d19,#23 +#if 25<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d22 + veor d29,d20,d21 + veor d24,d25 + vand d29,d19 + veor d24,d26 @ Sigma1(e) + veor d29,d21 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d23,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d23,#34 + vshr.u64 d26,d23,#39 + vsli.64 d24,d23,#36 + vsli.64 d25,d23,#30 + vsli.64 d26,d23,#25 + vadd.i64 d27,d9 + vorr d30,d23,d17 + vand d29,d23,d17 + veor d22,d24,d25 + vand d30,d16 + veor d22,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d22,d27 + vadd.i64 d18,d27 + vadd.i64 d22,d30 + vshr.u64 q12,q4,#19 + vshr.u64 q13,q4,#61 + vshr.u64 q15,q4,#6 + vsli.64 q12,q4,#45 + vext.8 q14,q5,q6,#8 @ X[i+1] + vsli.64 q13,q4,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q5,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q1,q2,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d18,#14 @ from NEON_00_15 + vadd.i64 q5,q14 + vshr.u64 d25,d18,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d18,#41 @ from NEON_00_15 + vadd.i64 q5,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d18,#50 + vsli.64 d25,d18,#46 + vsli.64 d26,d18,#23 +#if 26<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d21 + veor d29,d19,d20 + veor d24,d25 + vand d29,d18 + veor d24,d26 @ Sigma1(e) + veor d29,d20 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d22,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d22,#34 + vshr.u64 d26,d22,#39 + vsli.64 d24,d22,#36 + vsli.64 d25,d22,#30 + vsli.64 d26,d22,#25 + vadd.i64 d27,d10 + vorr d30,d22,d16 + vand d29,d22,d16 + veor d21,d24,d25 + vand d30,d23 + veor d21,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d21,d27 + vadd.i64 d17,d27 + vadd.i64 d21,d30 + vshr.u64 d24,d17,#14 @ 27 +#if 27<16 + vld1.64 {d11},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d17,#18 + vshr.u64 d26,d17,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d17,#50 + vsli.64 d25,d17,#46 + vsli.64 d26,d17,#23 +#if 27<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d20 + veor d29,d18,d19 + veor d24,d25 + vand d29,d17 + veor d24,d26 @ Sigma1(e) + veor d29,d19 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d21,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d21,#34 + vshr.u64 d26,d21,#39 + vsli.64 d24,d21,#36 + vsli.64 d25,d21,#30 + vsli.64 d26,d21,#25 + vadd.i64 d27,d11 + vorr d30,d21,d23 + vand d29,d21,d23 + veor d20,d24,d25 + vand d30,d22 + veor d20,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d20,d27 + vadd.i64 d16,d27 + vadd.i64 d20,d30 + vshr.u64 q12,q5,#19 + vshr.u64 q13,q5,#61 + vshr.u64 q15,q5,#6 + vsli.64 q12,q5,#45 + vext.8 q14,q6,q7,#8 @ X[i+1] + vsli.64 q13,q5,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q6,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q2,q3,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d16,#14 @ from NEON_00_15 + vadd.i64 q6,q14 + vshr.u64 d25,d16,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d16,#41 @ from NEON_00_15 + vadd.i64 q6,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d16,#50 + vsli.64 d25,d16,#46 + vsli.64 d26,d16,#23 +#if 28<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d19 + veor d29,d17,d18 + veor d24,d25 + vand d29,d16 + veor d24,d26 @ Sigma1(e) + veor d29,d18 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d20,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d20,#34 + vshr.u64 d26,d20,#39 + vsli.64 d24,d20,#36 + vsli.64 d25,d20,#30 + vsli.64 d26,d20,#25 + vadd.i64 d27,d12 + vorr d30,d20,d22 + vand d29,d20,d22 + veor d19,d24,d25 + vand d30,d21 + veor d19,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d19,d27 + vadd.i64 d23,d27 + vadd.i64 d19,d30 + vshr.u64 d24,d23,#14 @ 29 +#if 29<16 + vld1.64 {d13},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d23,#18 + vshr.u64 d26,d23,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d23,#50 + vsli.64 d25,d23,#46 + vsli.64 d26,d23,#23 +#if 29<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d18 + veor d29,d16,d17 + veor d24,d25 + vand d29,d23 + veor d24,d26 @ Sigma1(e) + veor d29,d17 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d19,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d19,#34 + vshr.u64 d26,d19,#39 + vsli.64 d24,d19,#36 + vsli.64 d25,d19,#30 + vsli.64 d26,d19,#25 + vadd.i64 d27,d13 + vorr d30,d19,d21 + vand d29,d19,d21 + veor d18,d24,d25 + vand d30,d20 + veor d18,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d18,d27 + vadd.i64 d22,d27 + vadd.i64 d18,d30 + vshr.u64 q12,q6,#19 + vshr.u64 q13,q6,#61 + vshr.u64 q15,q6,#6 + vsli.64 q12,q6,#45 + vext.8 q14,q7,q0,#8 @ X[i+1] + vsli.64 q13,q6,#3 + veor q15,q12 + vshr.u64 q12,q14,#1 + veor q15,q13 @ sigma1(X[i+14]) + vshr.u64 q13,q14,#8 + vadd.i64 q7,q15 + vshr.u64 q15,q14,#7 + vsli.64 q12,q14,#63 + vsli.64 q13,q14,#56 + vext.8 q14,q3,q4,#8 @ X[i+9] + veor q15,q12 + vshr.u64 d24,d22,#14 @ from NEON_00_15 + vadd.i64 q7,q14 + vshr.u64 d25,d22,#18 @ from NEON_00_15 + veor q15,q13 @ sigma0(X[i+1]) + vshr.u64 d26,d22,#41 @ from NEON_00_15 + vadd.i64 q7,q15 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d22,#50 + vsli.64 d25,d22,#46 + vsli.64 d26,d22,#23 +#if 30<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d17 + veor d29,d23,d16 + veor d24,d25 + vand d29,d22 + veor d24,d26 @ Sigma1(e) + veor d29,d16 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d18,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d18,#34 + vshr.u64 d26,d18,#39 + vsli.64 d24,d18,#36 + vsli.64 d25,d18,#30 + vsli.64 d26,d18,#25 + vadd.i64 d27,d14 + vorr d30,d18,d20 + vand d29,d18,d20 + veor d17,d24,d25 + vand d30,d19 + veor d17,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d17,d27 + vadd.i64 d21,d27 + vadd.i64 d17,d30 + vshr.u64 d24,d21,#14 @ 31 +#if 31<16 + vld1.64 {d15},[r1]! @ handles unaligned +#endif + vshr.u64 d25,d21,#18 + vshr.u64 d26,d21,#41 + vld1.64 {d28},[r3,:64]! @ K[i++] + vsli.64 d24,d21,#50 + vsli.64 d25,d21,#46 + vsli.64 d26,d21,#23 +#if 31<16 && defined(__ARMEL__) + vrev64.8 , +#endif + vadd.i64 d27,d28,d16 + veor d29,d22,d23 + veor d24,d25 + vand d29,d21 + veor d24,d26 @ Sigma1(e) + veor d29,d23 @ Ch(e,f,g) + vadd.i64 d27,d24 + vshr.u64 d24,d17,#28 + vadd.i64 d27,d29 + vshr.u64 d25,d17,#34 + vshr.u64 d26,d17,#39 + vsli.64 d24,d17,#36 + vsli.64 d25,d17,#30 + vsli.64 d26,d17,#25 + vadd.i64 d27,d15 + vorr d30,d17,d19 + vand d29,d17,d19 + veor d16,d24,d25 + vand d30,d18 + veor d16,d26 @ Sigma0(a) + vorr d30,d29 @ Maj(a,b,c) + vadd.i64 d16,d27 + vadd.i64 d20,d27 + vadd.i64 d16,d30 + bne .L16_79_neon + + vldmia r0,{d24-d31} @ load context to temp + vadd.i64 q8,q12 @ vectorized accumulate + vadd.i64 q9,q13 + vadd.i64 q10,q14 + vadd.i64 q11,q15 + vstmia r0,{d16-d23} @ save context + teq r1,r2 + sub r3,#640 @ rewind K512 + bne .Loop_neon + + vldmia sp!,{d8-d15} @ epilogue + .word 0xe12fff1e +#endif +.size sha512_block_data_order,.-sha512_block_data_order +.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by " .align 2 +.comm OPENSSL_armcap_P,4,4 diff --git a/app/openssl/crypto/sha/asm/sha512-mips.pl b/app/openssl/crypto/sha/asm/sha512-mips.pl new file mode 100644 index 00000000..ffa053bb --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha512-mips.pl @@ -0,0 +1,455 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# SHA2 block procedures for MIPS. + +# October 2010. +# +# SHA256 performance improvement on MIPS R5000 CPU is ~27% over gcc- +# generated code in o32 build and ~55% in n32/64 build. SHA512 [which +# for now can only be compiled for MIPS64 ISA] improvement is modest +# ~17%, but it comes for free, because it's same instruction sequence. +# Improvement coefficients are for aligned input. + +###################################################################### +# There is a number of MIPS ABI in use, O32 and N32/64 are most +# widely used. Then there is a new contender: NUBI. It appears that if +# one picks the latter, it's possible to arrange code in ABI neutral +# manner. Therefore let's stick to NUBI register layout: +# +($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25)); +($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); +($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23)); +($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31)); +# +# The return value is placed in $a0. Following coding rules facilitate +# interoperability: +# +# - never ever touch $tp, "thread pointer", former $gp [o32 can be +# excluded from the rule, because it's specified volatile]; +# - copy return value to $t0, former $v0 [or to $a0 if you're adapting +# old code]; +# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary; +# +# For reference here is register layout for N32/64 MIPS ABIs: +# +# ($zero,$at,$v0,$v1)=map("\$$_",(0..3)); +# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); +# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25)); +# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23)); +# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31)); +# +$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64 + +if ($flavour =~ /64|n32/i) { + $PTR_ADD="dadd"; # incidentally works even on n32 + $PTR_SUB="dsub"; # incidentally works even on n32 + $REG_S="sd"; + $REG_L="ld"; + $PTR_SLL="dsll"; # incidentally works even on n32 + $SZREG=8; +} else { + $PTR_ADD="add"; + $PTR_SUB="sub"; + $REG_S="sw"; + $REG_L="lw"; + $PTR_SLL="sll"; + $SZREG=4; +} +$pf = ($flavour =~ /nubi/i) ? $t0 : $t2; +# +# +# +###################################################################### + +$big_endian=(`echo MIPSEL | $ENV{CC} -E -P -`=~/MIPSEL/)?1:0; + +for (@ARGV) { $output=$_ if (/^\w[\w\-]*\.\w+$/); } +open STDOUT,">$output"; + +if (!defined($big_endian)) { $big_endian=(unpack('L',pack('N',1))==1); } + +if ($output =~ /512/) { + $label="512"; + $SZ=8; + $LD="ld"; # load from memory + $ST="sd"; # store to memory + $SLL="dsll"; # shift left logical + $SRL="dsrl"; # shift right logical + $ADDU="daddu"; + @Sigma0=(28,34,39); + @Sigma1=(14,18,41); + @sigma0=( 7, 1, 8); # right shift first + @sigma1=( 6,19,61); # right shift first + $lastK=0x817; + $rounds=80; +} else { + $label="256"; + $SZ=4; + $LD="lw"; # load from memory + $ST="sw"; # store to memory + $SLL="sll"; # shift left logical + $SRL="srl"; # shift right logical + $ADDU="addu"; + @Sigma0=( 2,13,22); + @Sigma1=( 6,11,25); + @sigma0=( 3, 7,18); # right shift first + @sigma1=(10,17,19); # right shift first + $lastK=0x8f2; + $rounds=64; +} + +$MSB = $big_endian ? 0 : ($SZ-1); +$LSB = ($SZ-1)&~$MSB; + +@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("\$$_",(1,2,3,7,24,25,30,31)); +@X=map("\$$_",(8..23)); + +$ctx=$a0; +$inp=$a1; +$len=$a2; $Ktbl=$len; + +sub BODY_00_15 { +my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_; +my ($T1,$tmp0,$tmp1,$tmp2)=(@X[4],@X[5],@X[6],@X[7]); + +$code.=<<___ if ($i<15); + ${LD}l @X[1],`($i+1)*$SZ+$MSB`($inp) + ${LD}r @X[1],`($i+1)*$SZ+$LSB`($inp) +___ +$code.=<<___ if (!$big_endian && $i<16 && $SZ==4); + srl $tmp0,@X[0],24 # byte swap($i) + srl $tmp1,@X[0],8 + andi $tmp2,@X[0],0xFF00 + sll @X[0],@X[0],24 + andi $tmp1,0xFF00 + sll $tmp2,$tmp2,8 + or @X[0],$tmp0 + or $tmp1,$tmp2 + or @X[0],$tmp1 +___ +$code.=<<___ if (!$big_endian && $i<16 && $SZ==8); + ori $tmp0,$zero,0xFF + dsll $tmp2,$tmp0,32 + or $tmp0,$tmp2 # 0x000000FF000000FF + and $tmp1,@X[0],$tmp0 # byte swap($i) + dsrl $tmp2,@X[0],24 + dsll $tmp1,24 + and $tmp2,$tmp0 + dsll $tmp0,8 # 0x0000FF000000FF00 + or $tmp1,$tmp2 + and $tmp2,@X[0],$tmp0 + dsrl @X[0],8 + dsll $tmp2,8 + and @X[0],$tmp0 + or $tmp1,$tmp2 + or @X[0],$tmp1 + dsrl $tmp1,@X[0],32 + dsll @X[0],32 + or @X[0],$tmp1 +___ +$code.=<<___; + $ADDU $T1,$X[0],$h # $i + $SRL $h,$e,@Sigma1[0] + xor $tmp2,$f,$g + $SLL $tmp1,$e,`$SZ*8-@Sigma1[2]` + and $tmp2,$e + $SRL $tmp0,$e,@Sigma1[1] + xor $h,$tmp1 + $SLL $tmp1,$e,`$SZ*8-@Sigma1[1]` + xor $h,$tmp0 + $SRL $tmp0,$e,@Sigma1[2] + xor $h,$tmp1 + $SLL $tmp1,$e,`$SZ*8-@Sigma1[0]` + xor $h,$tmp0 + xor $tmp2,$g # Ch(e,f,g) + xor $tmp0,$tmp1,$h # Sigma1(e) + + $SRL $h,$a,@Sigma0[0] + $ADDU $T1,$tmp2 + $LD $tmp2,`$i*$SZ`($Ktbl) # K[$i] + $SLL $tmp1,$a,`$SZ*8-@Sigma0[2]` + $ADDU $T1,$tmp0 + $SRL $tmp0,$a,@Sigma0[1] + xor $h,$tmp1 + $SLL $tmp1,$a,`$SZ*8-@Sigma0[1]` + xor $h,$tmp0 + $SRL $tmp0,$a,@Sigma0[2] + xor $h,$tmp1 + $SLL $tmp1,$a,`$SZ*8-@Sigma0[0]` + xor $h,$tmp0 + $ST @X[0],`($i%16)*$SZ`($sp) # offload to ring buffer + xor $h,$tmp1 # Sigma0(a) + + or $tmp0,$a,$b + and $tmp1,$a,$b + and $tmp0,$c + or $tmp1,$tmp0 # Maj(a,b,c) + $ADDU $T1,$tmp2 # +=K[$i] + $ADDU $h,$tmp1 + + $ADDU $d,$T1 + $ADDU $h,$T1 +___ +$code.=<<___ if ($i>=13); + $LD @X[3],`(($i+3)%16)*$SZ`($sp) # prefetch from ring buffer +___ +} + +sub BODY_16_XX { +my $i=@_[0]; +my ($tmp0,$tmp1,$tmp2,$tmp3)=(@X[4],@X[5],@X[6],@X[7]); + +$code.=<<___; + $SRL $tmp2,@X[1],@sigma0[0] # Xupdate($i) + $ADDU @X[0],@X[9] # +=X[i+9] + $SLL $tmp1,@X[1],`$SZ*8-@sigma0[2]` + $SRL $tmp0,@X[1],@sigma0[1] + xor $tmp2,$tmp1 + $SLL $tmp1,`@sigma0[2]-@sigma0[1]` + xor $tmp2,$tmp0 + $SRL $tmp0,@X[1],@sigma0[2] + xor $tmp2,$tmp1 + + $SRL $tmp3,@X[14],@sigma1[0] + xor $tmp2,$tmp0 # sigma0(X[i+1]) + $SLL $tmp1,@X[14],`$SZ*8-@sigma1[2]` + $ADDU @X[0],$tmp2 + $SRL $tmp0,@X[14],@sigma1[1] + xor $tmp3,$tmp1 + $SLL $tmp1,`@sigma1[2]-@sigma1[1]` + xor $tmp3,$tmp0 + $SRL $tmp0,@X[14],@sigma1[2] + xor $tmp3,$tmp1 + + xor $tmp3,$tmp0 # sigma1(X[i+14]) + $ADDU @X[0],$tmp3 +___ + &BODY_00_15(@_); +} + +$FRAMESIZE=16*$SZ+16*$SZREG; +$SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0xc0fff008 : 0xc0ff0000; + +$code.=<<___; +#ifdef OPENSSL_FIPSCANISTER +# include +#endif + +.text +.set noat +#if !defined(__vxworks) || defined(__pic__) +.option pic2 +#endif + +.align 5 +.globl sha${label}_block_data_order +.ent sha${label}_block_data_order +sha${label}_block_data_order: + .frame $sp,$FRAMESIZE,$ra + .mask $SAVED_REGS_MASK,-$SZREG + .set noreorder +___ +$code.=<<___ if ($flavour =~ /o32/i); # o32 PIC-ification + .cpload $pf +___ +$code.=<<___; + $PTR_SUB $sp,$FRAMESIZE + $REG_S $ra,$FRAMESIZE-1*$SZREG($sp) + $REG_S $fp,$FRAMESIZE-2*$SZREG($sp) + $REG_S $s11,$FRAMESIZE-3*$SZREG($sp) + $REG_S $s10,$FRAMESIZE-4*$SZREG($sp) + $REG_S $s9,$FRAMESIZE-5*$SZREG($sp) + $REG_S $s8,$FRAMESIZE-6*$SZREG($sp) + $REG_S $s7,$FRAMESIZE-7*$SZREG($sp) + $REG_S $s6,$FRAMESIZE-8*$SZREG($sp) + $REG_S $s5,$FRAMESIZE-9*$SZREG($sp) + $REG_S $s4,$FRAMESIZE-10*$SZREG($sp) +___ +$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue + $REG_S $s3,$FRAMESIZE-11*$SZREG($sp) + $REG_S $s2,$FRAMESIZE-12*$SZREG($sp) + $REG_S $s1,$FRAMESIZE-13*$SZREG($sp) + $REG_S $s0,$FRAMESIZE-14*$SZREG($sp) + $REG_S $gp,$FRAMESIZE-15*$SZREG($sp) +___ +$code.=<<___; + $PTR_SLL @X[15],$len,`log(16*$SZ)/log(2)` +___ +$code.=<<___ if ($flavour !~ /o32/i); # non-o32 PIC-ification + .cplocal $Ktbl + .cpsetup $pf,$zero,sha${label}_block_data_order +___ +$code.=<<___; + .set reorder + la $Ktbl,K${label} # PIC-ified 'load address' + + $LD $A,0*$SZ($ctx) # load context + $LD $B,1*$SZ($ctx) + $LD $C,2*$SZ($ctx) + $LD $D,3*$SZ($ctx) + $LD $E,4*$SZ($ctx) + $LD $F,5*$SZ($ctx) + $LD $G,6*$SZ($ctx) + $LD $H,7*$SZ($ctx) + + $PTR_ADD @X[15],$inp # pointer to the end of input + $REG_S @X[15],16*$SZ($sp) + b .Loop + +.align 5 +.Loop: + ${LD}l @X[0],$MSB($inp) + ${LD}r @X[0],$LSB($inp) +___ +for ($i=0;$i<16;$i++) +{ &BODY_00_15($i,@V); unshift(@V,pop(@V)); push(@X,shift(@X)); } +$code.=<<___; + b .L16_xx +.align 4 +.L16_xx: +___ +for (;$i<32;$i++) +{ &BODY_16_XX($i,@V); unshift(@V,pop(@V)); push(@X,shift(@X)); } +$code.=<<___; + and @X[6],0xfff + li @X[7],$lastK + .set noreorder + bne @X[6],@X[7],.L16_xx + $PTR_ADD $Ktbl,16*$SZ # Ktbl+=16 + + $REG_L @X[15],16*$SZ($sp) # restore pointer to the end of input + $LD @X[0],0*$SZ($ctx) + $LD @X[1],1*$SZ($ctx) + $LD @X[2],2*$SZ($ctx) + $PTR_ADD $inp,16*$SZ + $LD @X[3],3*$SZ($ctx) + $ADDU $A,@X[0] + $LD @X[4],4*$SZ($ctx) + $ADDU $B,@X[1] + $LD @X[5],5*$SZ($ctx) + $ADDU $C,@X[2] + $LD @X[6],6*$SZ($ctx) + $ADDU $D,@X[3] + $LD @X[7],7*$SZ($ctx) + $ADDU $E,@X[4] + $ST $A,0*$SZ($ctx) + $ADDU $F,@X[5] + $ST $B,1*$SZ($ctx) + $ADDU $G,@X[6] + $ST $C,2*$SZ($ctx) + $ADDU $H,@X[7] + $ST $D,3*$SZ($ctx) + $ST $E,4*$SZ($ctx) + $ST $F,5*$SZ($ctx) + $ST $G,6*$SZ($ctx) + $ST $H,7*$SZ($ctx) + + bne $inp,@X[15],.Loop + $PTR_SUB $Ktbl,`($rounds-16)*$SZ` # rewind $Ktbl + + $REG_L $ra,$FRAMESIZE-1*$SZREG($sp) + $REG_L $fp,$FRAMESIZE-2*$SZREG($sp) + $REG_L $s11,$FRAMESIZE-3*$SZREG($sp) + $REG_L $s10,$FRAMESIZE-4*$SZREG($sp) + $REG_L $s9,$FRAMESIZE-5*$SZREG($sp) + $REG_L $s8,$FRAMESIZE-6*$SZREG($sp) + $REG_L $s7,$FRAMESIZE-7*$SZREG($sp) + $REG_L $s6,$FRAMESIZE-8*$SZREG($sp) + $REG_L $s5,$FRAMESIZE-9*$SZREG($sp) + $REG_L $s4,$FRAMESIZE-10*$SZREG($sp) +___ +$code.=<<___ if ($flavour =~ /nubi/i); + $REG_L $s3,$FRAMESIZE-11*$SZREG($sp) + $REG_L $s2,$FRAMESIZE-12*$SZREG($sp) + $REG_L $s1,$FRAMESIZE-13*$SZREG($sp) + $REG_L $s0,$FRAMESIZE-14*$SZREG($sp) + $REG_L $gp,$FRAMESIZE-15*$SZREG($sp) +___ +$code.=<<___; + jr $ra + $PTR_ADD $sp,$FRAMESIZE +.end sha${label}_block_data_order + +.rdata +.align 5 +K${label}: +___ +if ($SZ==4) { +$code.=<<___; + .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 + .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5 + .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3 + .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174 + .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc + .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da + .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7 + .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967 + .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13 + .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85 + .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3 + .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070 + .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5 + .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3 + .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208 + .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 +___ +} else { +$code.=<<___; + .dword 0x428a2f98d728ae22, 0x7137449123ef65cd + .dword 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc + .dword 0x3956c25bf348b538, 0x59f111f1b605d019 + .dword 0x923f82a4af194f9b, 0xab1c5ed5da6d8118 + .dword 0xd807aa98a3030242, 0x12835b0145706fbe + .dword 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2 + .dword 0x72be5d74f27b896f, 0x80deb1fe3b1696b1 + .dword 0x9bdc06a725c71235, 0xc19bf174cf692694 + .dword 0xe49b69c19ef14ad2, 0xefbe4786384f25e3 + .dword 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65 + .dword 0x2de92c6f592b0275, 0x4a7484aa6ea6e483 + .dword 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5 + .dword 0x983e5152ee66dfab, 0xa831c66d2db43210 + .dword 0xb00327c898fb213f, 0xbf597fc7beef0ee4 + .dword 0xc6e00bf33da88fc2, 0xd5a79147930aa725 + .dword 0x06ca6351e003826f, 0x142929670a0e6e70 + .dword 0x27b70a8546d22ffc, 0x2e1b21385c26c926 + .dword 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df + .dword 0x650a73548baf63de, 0x766a0abb3c77b2a8 + .dword 0x81c2c92e47edaee6, 0x92722c851482353b + .dword 0xa2bfe8a14cf10364, 0xa81a664bbc423001 + .dword 0xc24b8b70d0f89791, 0xc76c51a30654be30 + .dword 0xd192e819d6ef5218, 0xd69906245565a910 + .dword 0xf40e35855771202a, 0x106aa07032bbd1b8 + .dword 0x19a4c116b8d2d0c8, 0x1e376c085141ab53 + .dword 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8 + .dword 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb + .dword 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3 + .dword 0x748f82ee5defb2fc, 0x78a5636f43172f60 + .dword 0x84c87814a1f0ab72, 0x8cc702081a6439ec + .dword 0x90befffa23631e28, 0xa4506cebde82bde9 + .dword 0xbef9a3f7b2c67915, 0xc67178f2e372532b + .dword 0xca273eceea26619c, 0xd186b8c721c0c207 + .dword 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178 + .dword 0x06f067aa72176fba, 0x0a637dc5a2c898a6 + .dword 0x113f9804bef90dae, 0x1b710b35131c471b + .dword 0x28db77f523047d84, 0x32caab7b40c72493 + .dword 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c + .dword 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a + .dword 0x5fcb6fab3ad6faec, 0x6c44198c4a475817 +___ +} +$code.=<<___; +.asciiz "SHA${label} for MIPS, CRYPTOGAMS by " +.align 5 + +___ + +$code =~ s/\`([^\`]*)\`/eval $1/gem; +print $code; +close STDOUT; diff --git a/app/openssl/crypto/sha/asm/sha512-parisc.pl b/app/openssl/crypto/sha/asm/sha512-parisc.pl new file mode 100755 index 00000000..fc0e15b3 --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha512-parisc.pl @@ -0,0 +1,793 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# SHA256/512 block procedure for PA-RISC. + +# June 2009. +# +# SHA256 performance is >75% better than gcc 3.2 generated code on +# PA-7100LC. Compared to code generated by vendor compiler this +# implementation is almost 70% faster in 64-bit build, but delivers +# virtually same performance in 32-bit build on PA-8600. +# +# SHA512 performance is >2.9x better than gcc 3.2 generated code on +# PA-7100LC, PA-RISC 1.1 processor. Then implementation detects if the +# code is executed on PA-RISC 2.0 processor and switches to 64-bit +# code path delivering adequate peformance even in "blended" 32-bit +# build. Though 64-bit code is not any faster than code generated by +# vendor compiler on PA-8600... +# +# Special thanks to polarhome.com for providing HP-UX account. + +$flavour = shift; +$output = shift; +open STDOUT,">$output"; + +if ($flavour =~ /64/) { + $LEVEL ="2.0W"; + $SIZE_T =8; + $FRAME_MARKER =80; + $SAVED_RP =16; + $PUSH ="std"; + $PUSHMA ="std,ma"; + $POP ="ldd"; + $POPMB ="ldd,mb"; +} else { + $LEVEL ="1.0"; + $SIZE_T =4; + $FRAME_MARKER =48; + $SAVED_RP =20; + $PUSH ="stw"; + $PUSHMA ="stwm"; + $POP ="ldw"; + $POPMB ="ldwm"; +} + +if ($output =~ /512/) { + $func="sha512_block_data_order"; + $SZ=8; + @Sigma0=(28,34,39); + @Sigma1=(14,18,41); + @sigma0=(1, 8, 7); + @sigma1=(19,61, 6); + $rounds=80; + $LAST10BITS=0x017; + $LD="ldd"; + $LDM="ldd,ma"; + $ST="std"; +} else { + $func="sha256_block_data_order"; + $SZ=4; + @Sigma0=( 2,13,22); + @Sigma1=( 6,11,25); + @sigma0=( 7,18, 3); + @sigma1=(17,19,10); + $rounds=64; + $LAST10BITS=0x0f2; + $LD="ldw"; + $LDM="ldwm"; + $ST="stw"; +} + +$FRAME=16*$SIZE_T+$FRAME_MARKER;# 16 saved regs + frame marker + # [+ argument transfer] +$XOFF=16*$SZ+32; # local variables +$FRAME+=$XOFF; +$XOFF+=$FRAME_MARKER; # distance between %sp and local variables + +$ctx="%r26"; # zapped by $a0 +$inp="%r25"; # zapped by $a1 +$num="%r24"; # zapped by $t0 + +$a0 ="%r26"; +$a1 ="%r25"; +$t0 ="%r24"; +$t1 ="%r29"; +$Tbl="%r31"; + +@V=($A,$B,$C,$D,$E,$F,$G,$H)=("%r17","%r18","%r19","%r20","%r21","%r22","%r23","%r28"); + +@X=("%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", "%r8", + "%r9", "%r10","%r11","%r12","%r13","%r14","%r15","%r16",$inp); + +sub ROUND_00_15 { +my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_; +$code.=<<___; + _ror $e,$Sigma1[0],$a0 + and $f,$e,$t0 + _ror $e,$Sigma1[1],$a1 + addl $t1,$h,$h + andcm $g,$e,$t1 + xor $a1,$a0,$a0 + _ror $a1,`$Sigma1[2]-$Sigma1[1]`,$a1 + or $t0,$t1,$t1 ; Ch(e,f,g) + addl @X[$i%16],$h,$h + xor $a0,$a1,$a1 ; Sigma1(e) + addl $t1,$h,$h + _ror $a,$Sigma0[0],$a0 + addl $a1,$h,$h + + _ror $a,$Sigma0[1],$a1 + and $a,$b,$t0 + and $a,$c,$t1 + xor $a1,$a0,$a0 + _ror $a1,`$Sigma0[2]-$Sigma0[1]`,$a1 + xor $t1,$t0,$t0 + and $b,$c,$t1 + xor $a0,$a1,$a1 ; Sigma0(a) + addl $h,$d,$d + xor $t1,$t0,$t0 ; Maj(a,b,c) + `"$LDM $SZ($Tbl),$t1" if ($i<15)` + addl $a1,$h,$h + addl $t0,$h,$h + +___ +} + +sub ROUND_16_xx { +my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_; +$i-=16; +$code.=<<___; + _ror @X[($i+1)%16],$sigma0[0],$a0 + _ror @X[($i+1)%16],$sigma0[1],$a1 + addl @X[($i+9)%16],@X[$i],@X[$i] + _ror @X[($i+14)%16],$sigma1[0],$t0 + _ror @X[($i+14)%16],$sigma1[1],$t1 + xor $a1,$a0,$a0 + _shr @X[($i+1)%16],$sigma0[2],$a1 + xor $t1,$t0,$t0 + _shr @X[($i+14)%16],$sigma1[2],$t1 + xor $a1,$a0,$a0 ; sigma0(X[(i+1)&0x0f]) + xor $t1,$t0,$t0 ; sigma1(X[(i+14)&0x0f]) + $LDM $SZ($Tbl),$t1 + addl $a0,@X[$i],@X[$i] + addl $t0,@X[$i],@X[$i] +___ +$code.=<<___ if ($i==15); + extru $t1,31,10,$a1 + comiclr,<> $LAST10BITS,$a1,%r0 + ldo 1($Tbl),$Tbl ; signal end of $Tbl +___ +&ROUND_00_15($i+16,$a,$b,$c,$d,$e,$f,$g,$h); +} + +$code=<<___; + .LEVEL $LEVEL + .SPACE \$TEXT\$ + .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY + + .ALIGN 64 +L\$table +___ +$code.=<<___ if ($SZ==8); + .WORD 0x428a2f98,0xd728ae22,0x71374491,0x23ef65cd + .WORD 0xb5c0fbcf,0xec4d3b2f,0xe9b5dba5,0x8189dbbc + .WORD 0x3956c25b,0xf348b538,0x59f111f1,0xb605d019 + .WORD 0x923f82a4,0xaf194f9b,0xab1c5ed5,0xda6d8118 + .WORD 0xd807aa98,0xa3030242,0x12835b01,0x45706fbe + .WORD 0x243185be,0x4ee4b28c,0x550c7dc3,0xd5ffb4e2 + .WORD 0x72be5d74,0xf27b896f,0x80deb1fe,0x3b1696b1 + .WORD 0x9bdc06a7,0x25c71235,0xc19bf174,0xcf692694 + .WORD 0xe49b69c1,0x9ef14ad2,0xefbe4786,0x384f25e3 + .WORD 0x0fc19dc6,0x8b8cd5b5,0x240ca1cc,0x77ac9c65 + .WORD 0x2de92c6f,0x592b0275,0x4a7484aa,0x6ea6e483 + .WORD 0x5cb0a9dc,0xbd41fbd4,0x76f988da,0x831153b5 + .WORD 0x983e5152,0xee66dfab,0xa831c66d,0x2db43210 + .WORD 0xb00327c8,0x98fb213f,0xbf597fc7,0xbeef0ee4 + .WORD 0xc6e00bf3,0x3da88fc2,0xd5a79147,0x930aa725 + .WORD 0x06ca6351,0xe003826f,0x14292967,0x0a0e6e70 + .WORD 0x27b70a85,0x46d22ffc,0x2e1b2138,0x5c26c926 + .WORD 0x4d2c6dfc,0x5ac42aed,0x53380d13,0x9d95b3df + .WORD 0x650a7354,0x8baf63de,0x766a0abb,0x3c77b2a8 + .WORD 0x81c2c92e,0x47edaee6,0x92722c85,0x1482353b + .WORD 0xa2bfe8a1,0x4cf10364,0xa81a664b,0xbc423001 + .WORD 0xc24b8b70,0xd0f89791,0xc76c51a3,0x0654be30 + .WORD 0xd192e819,0xd6ef5218,0xd6990624,0x5565a910 + .WORD 0xf40e3585,0x5771202a,0x106aa070,0x32bbd1b8 + .WORD 0x19a4c116,0xb8d2d0c8,0x1e376c08,0x5141ab53 + .WORD 0x2748774c,0xdf8eeb99,0x34b0bcb5,0xe19b48a8 + .WORD 0x391c0cb3,0xc5c95a63,0x4ed8aa4a,0xe3418acb + .WORD 0x5b9cca4f,0x7763e373,0x682e6ff3,0xd6b2b8a3 + .WORD 0x748f82ee,0x5defb2fc,0x78a5636f,0x43172f60 + .WORD 0x84c87814,0xa1f0ab72,0x8cc70208,0x1a6439ec + .WORD 0x90befffa,0x23631e28,0xa4506ceb,0xde82bde9 + .WORD 0xbef9a3f7,0xb2c67915,0xc67178f2,0xe372532b + .WORD 0xca273ece,0xea26619c,0xd186b8c7,0x21c0c207 + .WORD 0xeada7dd6,0xcde0eb1e,0xf57d4f7f,0xee6ed178 + .WORD 0x06f067aa,0x72176fba,0x0a637dc5,0xa2c898a6 + .WORD 0x113f9804,0xbef90dae,0x1b710b35,0x131c471b + .WORD 0x28db77f5,0x23047d84,0x32caab7b,0x40c72493 + .WORD 0x3c9ebe0a,0x15c9bebc,0x431d67c4,0x9c100d4c + .WORD 0x4cc5d4be,0xcb3e42b6,0x597f299c,0xfc657e2a + .WORD 0x5fcb6fab,0x3ad6faec,0x6c44198c,0x4a475817 +___ +$code.=<<___ if ($SZ==4); + .WORD 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 + .WORD 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 + .WORD 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 + .WORD 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 + .WORD 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc + .WORD 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da + .WORD 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 + .WORD 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 + .WORD 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 + .WORD 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 + .WORD 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 + .WORD 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 + .WORD 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 + .WORD 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 + .WORD 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 + .WORD 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 +___ +$code.=<<___; + + .EXPORT $func,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR + .ALIGN 64 +$func + .PROC + .CALLINFO FRAME=`$FRAME-16*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=18 + .ENTRY + $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue + $PUSHMA %r3,$FRAME(%sp) + $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp) + $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp) + $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp) + $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp) + $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp) + $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp) + $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp) + $PUSH %r11,`-$FRAME+8*$SIZE_T`(%sp) + $PUSH %r12,`-$FRAME+9*$SIZE_T`(%sp) + $PUSH %r13,`-$FRAME+10*$SIZE_T`(%sp) + $PUSH %r14,`-$FRAME+11*$SIZE_T`(%sp) + $PUSH %r15,`-$FRAME+12*$SIZE_T`(%sp) + $PUSH %r16,`-$FRAME+13*$SIZE_T`(%sp) + $PUSH %r17,`-$FRAME+14*$SIZE_T`(%sp) + $PUSH %r18,`-$FRAME+15*$SIZE_T`(%sp) + + _shl $num,`log(16*$SZ)/log(2)`,$num + addl $inp,$num,$num ; $num to point at the end of $inp + + $PUSH $num,`-$FRAME_MARKER-4*$SIZE_T`(%sp) ; save arguments + $PUSH $inp,`-$FRAME_MARKER-3*$SIZE_T`(%sp) + $PUSH $ctx,`-$FRAME_MARKER-2*$SIZE_T`(%sp) + + blr %r0,$Tbl + ldi 3,$t1 +L\$pic + andcm $Tbl,$t1,$Tbl ; wipe privilege level + ldo L\$table-L\$pic($Tbl),$Tbl +___ +$code.=<<___ if ($SZ==8 && $SIZE_T==4); + ldi 31,$t1 + mtctl $t1,%cr11 + extrd,u,*= $t1,%sar,1,$t1 ; executes on PA-RISC 1.0 + b L\$parisc1 + nop +___ +$code.=<<___; + $LD `0*$SZ`($ctx),$A ; load context + $LD `1*$SZ`($ctx),$B + $LD `2*$SZ`($ctx),$C + $LD `3*$SZ`($ctx),$D + $LD `4*$SZ`($ctx),$E + $LD `5*$SZ`($ctx),$F + $LD `6*$SZ`($ctx),$G + $LD `7*$SZ`($ctx),$H + + extru $inp,31,`log($SZ)/log(2)`,$t0 + sh3addl $t0,%r0,$t0 + subi `8*$SZ`,$t0,$t0 + mtctl $t0,%cr11 ; load %sar with align factor + +L\$oop + ldi `$SZ-1`,$t0 + $LDM $SZ($Tbl),$t1 + andcm $inp,$t0,$t0 ; align $inp +___ + for ($i=0;$i<15;$i++) { # load input block + $code.="\t$LD `$SZ*$i`($t0),@X[$i]\n"; } +$code.=<<___; + cmpb,*= $inp,$t0,L\$aligned + $LD `$SZ*15`($t0),@X[15] + $LD `$SZ*16`($t0),@X[16] +___ + for ($i=0;$i<16;$i++) { # align data + $code.="\t_align @X[$i],@X[$i+1],@X[$i]\n"; } +$code.=<<___; +L\$aligned + nop ; otherwise /usr/ccs/bin/as is confused by below .WORD +___ + +for($i=0;$i<16;$i++) { &ROUND_00_15($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; +L\$rounds + nop ; otherwise /usr/ccs/bin/as is confused by below .WORD +___ +for(;$i<32;$i++) { &ROUND_16_xx($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + bb,>= $Tbl,31,L\$rounds ; end of $Tbl signalled? + nop + + $POP `-$FRAME_MARKER-2*$SIZE_T`(%sp),$ctx ; restore arguments + $POP `-$FRAME_MARKER-3*$SIZE_T`(%sp),$inp + $POP `-$FRAME_MARKER-4*$SIZE_T`(%sp),$num + ldo `-$rounds*$SZ-1`($Tbl),$Tbl ; rewind $Tbl + + $LD `0*$SZ`($ctx),@X[0] ; load context + $LD `1*$SZ`($ctx),@X[1] + $LD `2*$SZ`($ctx),@X[2] + $LD `3*$SZ`($ctx),@X[3] + $LD `4*$SZ`($ctx),@X[4] + $LD `5*$SZ`($ctx),@X[5] + addl @X[0],$A,$A + $LD `6*$SZ`($ctx),@X[6] + addl @X[1],$B,$B + $LD `7*$SZ`($ctx),@X[7] + ldo `16*$SZ`($inp),$inp ; advance $inp + + $ST $A,`0*$SZ`($ctx) ; save context + addl @X[2],$C,$C + $ST $B,`1*$SZ`($ctx) + addl @X[3],$D,$D + $ST $C,`2*$SZ`($ctx) + addl @X[4],$E,$E + $ST $D,`3*$SZ`($ctx) + addl @X[5],$F,$F + $ST $E,`4*$SZ`($ctx) + addl @X[6],$G,$G + $ST $F,`5*$SZ`($ctx) + addl @X[7],$H,$H + $ST $G,`6*$SZ`($ctx) + $ST $H,`7*$SZ`($ctx) + + cmpb,*<>,n $inp,$num,L\$oop + $PUSH $inp,`-$FRAME_MARKER-3*$SIZE_T`(%sp) ; save $inp +___ +if ($SZ==8 && $SIZE_T==4) # SHA512 for 32-bit PA-RISC 1.0 +{{ +$code.=<<___; + b L\$done + nop + + .ALIGN 64 +L\$parisc1 +___ + +@V=( $Ahi, $Alo, $Bhi, $Blo, $Chi, $Clo, $Dhi, $Dlo, + $Ehi, $Elo, $Fhi, $Flo, $Ghi, $Glo, $Hhi, $Hlo) = + ( "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", "%r8", + "%r9","%r10","%r11","%r12","%r13","%r14","%r15","%r16"); +$a0 ="%r17"; +$a1 ="%r18"; +$a2 ="%r19"; +$a3 ="%r20"; +$t0 ="%r21"; +$t1 ="%r22"; +$t2 ="%r28"; +$t3 ="%r29"; +$Tbl="%r31"; + +@X=("%r23","%r24","%r25","%r26"); # zaps $num,$inp,$ctx + +sub ROUND_00_15_pa1 { +my ($i,$ahi,$alo,$bhi,$blo,$chi,$clo,$dhi,$dlo, + $ehi,$elo,$fhi,$flo,$ghi,$glo,$hhi,$hlo,$flag)=@_; +my ($Xhi,$Xlo,$Xnhi,$Xnlo) = @X; + +$code.=<<___ if (!$flag); + ldw `-$XOFF+8*(($i+1)%16)`(%sp),$Xnhi + ldw `-$XOFF+8*(($i+1)%16)+4`(%sp),$Xnlo ; load X[i+1] +___ +$code.=<<___; + shd $ehi,$elo,$Sigma1[0],$t0 + add $Xlo,$hlo,$hlo + shd $elo,$ehi,$Sigma1[0],$t1 + addc $Xhi,$hhi,$hhi ; h += X[i] + shd $ehi,$elo,$Sigma1[1],$t2 + ldwm 8($Tbl),$Xhi + shd $elo,$ehi,$Sigma1[1],$t3 + ldw -4($Tbl),$Xlo ; load K[i] + xor $t2,$t0,$t0 + xor $t3,$t1,$t1 + and $flo,$elo,$a0 + and $fhi,$ehi,$a1 + shd $ehi,$elo,$Sigma1[2],$t2 + andcm $glo,$elo,$a2 + shd $elo,$ehi,$Sigma1[2],$t3 + andcm $ghi,$ehi,$a3 + xor $t2,$t0,$t0 + xor $t3,$t1,$t1 ; Sigma1(e) + add $Xlo,$hlo,$hlo + xor $a2,$a0,$a0 + addc $Xhi,$hhi,$hhi ; h += K[i] + xor $a3,$a1,$a1 ; Ch(e,f,g) + + add $t0,$hlo,$hlo + shd $ahi,$alo,$Sigma0[0],$t0 + addc $t1,$hhi,$hhi ; h += Sigma1(e) + shd $alo,$ahi,$Sigma0[0],$t1 + add $a0,$hlo,$hlo + shd $ahi,$alo,$Sigma0[1],$t2 + addc $a1,$hhi,$hhi ; h += Ch(e,f,g) + shd $alo,$ahi,$Sigma0[1],$t3 + + xor $t2,$t0,$t0 + xor $t3,$t1,$t1 + shd $ahi,$alo,$Sigma0[2],$t2 + and $alo,$blo,$a0 + shd $alo,$ahi,$Sigma0[2],$t3 + and $ahi,$bhi,$a1 + xor $t2,$t0,$t0 + xor $t3,$t1,$t1 ; Sigma0(a) + + and $alo,$clo,$a2 + and $ahi,$chi,$a3 + xor $a2,$a0,$a0 + add $hlo,$dlo,$dlo + xor $a3,$a1,$a1 + addc $hhi,$dhi,$dhi ; d += h + and $blo,$clo,$a2 + add $t0,$hlo,$hlo + and $bhi,$chi,$a3 + addc $t1,$hhi,$hhi ; h += Sigma0(a) + xor $a2,$a0,$a0 + add $a0,$hlo,$hlo + xor $a3,$a1,$a1 ; Maj(a,b,c) + addc $a1,$hhi,$hhi ; h += Maj(a,b,c) + +___ +$code.=<<___ if ($i==15 && $flag); + extru $Xlo,31,10,$Xlo + comiclr,= $LAST10BITS,$Xlo,%r0 + b L\$rounds_pa1 + nop +___ +push(@X,shift(@X)); push(@X,shift(@X)); +} + +sub ROUND_16_xx_pa1 { +my ($Xhi,$Xlo,$Xnhi,$Xnlo) = @X; +my ($i)=shift; +$i-=16; +$code.=<<___; + ldw `-$XOFF+8*(($i+1)%16)`(%sp),$Xnhi + ldw `-$XOFF+8*(($i+1)%16)+4`(%sp),$Xnlo ; load X[i+1] + ldw `-$XOFF+8*(($i+9)%16)`(%sp),$a1 + ldw `-$XOFF+8*(($i+9)%16)+4`(%sp),$a0 ; load X[i+9] + ldw `-$XOFF+8*(($i+14)%16)`(%sp),$a3 + ldw `-$XOFF+8*(($i+14)%16)+4`(%sp),$a2 ; load X[i+14] + shd $Xnhi,$Xnlo,$sigma0[0],$t0 + shd $Xnlo,$Xnhi,$sigma0[0],$t1 + add $a0,$Xlo,$Xlo + shd $Xnhi,$Xnlo,$sigma0[1],$t2 + addc $a1,$Xhi,$Xhi + shd $Xnlo,$Xnhi,$sigma0[1],$t3 + xor $t2,$t0,$t0 + shd $Xnhi,$Xnlo,$sigma0[2],$t2 + xor $t3,$t1,$t1 + extru $Xnhi,`31-$sigma0[2]`,`32-$sigma0[2]`,$t3 + xor $t2,$t0,$t0 + shd $a3,$a2,$sigma1[0],$a0 + xor $t3,$t1,$t1 ; sigma0(X[i+1)&0x0f]) + shd $a2,$a3,$sigma1[0],$a1 + add $t0,$Xlo,$Xlo + shd $a3,$a2,$sigma1[1],$t2 + addc $t1,$Xhi,$Xhi + shd $a2,$a3,$sigma1[1],$t3 + xor $t2,$a0,$a0 + shd $a3,$a2,$sigma1[2],$t2 + xor $t3,$a1,$a1 + extru $a3,`31-$sigma1[2]`,`32-$sigma1[2]`,$t3 + xor $t2,$a0,$a0 + xor $t3,$a1,$a1 ; sigma0(X[i+14)&0x0f]) + add $a0,$Xlo,$Xlo + addc $a1,$Xhi,$Xhi + + stw $Xhi,`-$XOFF+8*($i%16)`(%sp) + stw $Xlo,`-$XOFF+8*($i%16)+4`(%sp) +___ +&ROUND_00_15_pa1($i,@_,1); +} +$code.=<<___; + ldw `0*4`($ctx),$Ahi ; load context + ldw `1*4`($ctx),$Alo + ldw `2*4`($ctx),$Bhi + ldw `3*4`($ctx),$Blo + ldw `4*4`($ctx),$Chi + ldw `5*4`($ctx),$Clo + ldw `6*4`($ctx),$Dhi + ldw `7*4`($ctx),$Dlo + ldw `8*4`($ctx),$Ehi + ldw `9*4`($ctx),$Elo + ldw `10*4`($ctx),$Fhi + ldw `11*4`($ctx),$Flo + ldw `12*4`($ctx),$Ghi + ldw `13*4`($ctx),$Glo + ldw `14*4`($ctx),$Hhi + ldw `15*4`($ctx),$Hlo + + extru $inp,31,2,$t0 + sh3addl $t0,%r0,$t0 + subi 32,$t0,$t0 + mtctl $t0,%cr11 ; load %sar with align factor + +L\$oop_pa1 + extru $inp,31,2,$a3 + comib,= 0,$a3,L\$aligned_pa1 + sub $inp,$a3,$inp + + ldw `0*4`($inp),$X[0] + ldw `1*4`($inp),$X[1] + ldw `2*4`($inp),$t2 + ldw `3*4`($inp),$t3 + ldw `4*4`($inp),$a0 + ldw `5*4`($inp),$a1 + ldw `6*4`($inp),$a2 + ldw `7*4`($inp),$a3 + vshd $X[0],$X[1],$X[0] + vshd $X[1],$t2,$X[1] + stw $X[0],`-$XOFF+0*4`(%sp) + ldw `8*4`($inp),$t0 + vshd $t2,$t3,$t2 + stw $X[1],`-$XOFF+1*4`(%sp) + ldw `9*4`($inp),$t1 + vshd $t3,$a0,$t3 +___ +{ +my @t=($t2,$t3,$a0,$a1,$a2,$a3,$t0,$t1); +for ($i=2;$i<=(128/4-8);$i++) { +$code.=<<___; + stw $t[0],`-$XOFF+$i*4`(%sp) + ldw `(8+$i)*4`($inp),$t[0] + vshd $t[1],$t[2],$t[1] +___ +push(@t,shift(@t)); +} +for (;$i<(128/4-1);$i++) { +$code.=<<___; + stw $t[0],`-$XOFF+$i*4`(%sp) + vshd $t[1],$t[2],$t[1] +___ +push(@t,shift(@t)); +} +$code.=<<___; + b L\$collected_pa1 + stw $t[0],`-$XOFF+$i*4`(%sp) + +___ +} +$code.=<<___; +L\$aligned_pa1 + ldw `0*4`($inp),$X[0] + ldw `1*4`($inp),$X[1] + ldw `2*4`($inp),$t2 + ldw `3*4`($inp),$t3 + ldw `4*4`($inp),$a0 + ldw `5*4`($inp),$a1 + ldw `6*4`($inp),$a2 + ldw `7*4`($inp),$a3 + stw $X[0],`-$XOFF+0*4`(%sp) + ldw `8*4`($inp),$t0 + stw $X[1],`-$XOFF+1*4`(%sp) + ldw `9*4`($inp),$t1 +___ +{ +my @t=($t2,$t3,$a0,$a1,$a2,$a3,$t0,$t1); +for ($i=2;$i<(128/4-8);$i++) { +$code.=<<___; + stw $t[0],`-$XOFF+$i*4`(%sp) + ldw `(8+$i)*4`($inp),$t[0] +___ +push(@t,shift(@t)); +} +for (;$i<128/4;$i++) { +$code.=<<___; + stw $t[0],`-$XOFF+$i*4`(%sp) +___ +push(@t,shift(@t)); +} +$code.="L\$collected_pa1\n"; +} + +for($i=0;$i<16;$i++) { &ROUND_00_15_pa1($i,@V); unshift(@V,pop(@V)); unshift(@V,pop(@V)); } +$code.="L\$rounds_pa1\n"; +for(;$i<32;$i++) { &ROUND_16_xx_pa1($i,@V); unshift(@V,pop(@V)); unshift(@V,pop(@V)); } + +$code.=<<___; + $POP `-$FRAME_MARKER-2*$SIZE_T`(%sp),$ctx ; restore arguments + $POP `-$FRAME_MARKER-3*$SIZE_T`(%sp),$inp + $POP `-$FRAME_MARKER-4*$SIZE_T`(%sp),$num + ldo `-$rounds*$SZ`($Tbl),$Tbl ; rewind $Tbl + + ldw `0*4`($ctx),$t1 ; update context + ldw `1*4`($ctx),$t0 + ldw `2*4`($ctx),$t3 + ldw `3*4`($ctx),$t2 + ldw `4*4`($ctx),$a1 + ldw `5*4`($ctx),$a0 + ldw `6*4`($ctx),$a3 + add $t0,$Alo,$Alo + ldw `7*4`($ctx),$a2 + addc $t1,$Ahi,$Ahi + ldw `8*4`($ctx),$t1 + add $t2,$Blo,$Blo + ldw `9*4`($ctx),$t0 + addc $t3,$Bhi,$Bhi + ldw `10*4`($ctx),$t3 + add $a0,$Clo,$Clo + ldw `11*4`($ctx),$t2 + addc $a1,$Chi,$Chi + ldw `12*4`($ctx),$a1 + add $a2,$Dlo,$Dlo + ldw `13*4`($ctx),$a0 + addc $a3,$Dhi,$Dhi + ldw `14*4`($ctx),$a3 + add $t0,$Elo,$Elo + ldw `15*4`($ctx),$a2 + addc $t1,$Ehi,$Ehi + stw $Ahi,`0*4`($ctx) + add $t2,$Flo,$Flo + stw $Alo,`1*4`($ctx) + addc $t3,$Fhi,$Fhi + stw $Bhi,`2*4`($ctx) + add $a0,$Glo,$Glo + stw $Blo,`3*4`($ctx) + addc $a1,$Ghi,$Ghi + stw $Chi,`4*4`($ctx) + add $a2,$Hlo,$Hlo + stw $Clo,`5*4`($ctx) + addc $a3,$Hhi,$Hhi + stw $Dhi,`6*4`($ctx) + ldo `16*$SZ`($inp),$inp ; advance $inp + stw $Dlo,`7*4`($ctx) + stw $Ehi,`8*4`($ctx) + stw $Elo,`9*4`($ctx) + stw $Fhi,`10*4`($ctx) + stw $Flo,`11*4`($ctx) + stw $Ghi,`12*4`($ctx) + stw $Glo,`13*4`($ctx) + stw $Hhi,`14*4`($ctx) + comb,= $inp,$num,L\$done + stw $Hlo,`15*4`($ctx) + b L\$oop_pa1 + $PUSH $inp,`-$FRAME_MARKER-3*$SIZE_T`(%sp) ; save $inp +L\$done +___ +}} +$code.=<<___; + $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue + $POP `-$FRAME+1*$SIZE_T`(%sp),%r4 + $POP `-$FRAME+2*$SIZE_T`(%sp),%r5 + $POP `-$FRAME+3*$SIZE_T`(%sp),%r6 + $POP `-$FRAME+4*$SIZE_T`(%sp),%r7 + $POP `-$FRAME+5*$SIZE_T`(%sp),%r8 + $POP `-$FRAME+6*$SIZE_T`(%sp),%r9 + $POP `-$FRAME+7*$SIZE_T`(%sp),%r10 + $POP `-$FRAME+8*$SIZE_T`(%sp),%r11 + $POP `-$FRAME+9*$SIZE_T`(%sp),%r12 + $POP `-$FRAME+10*$SIZE_T`(%sp),%r13 + $POP `-$FRAME+11*$SIZE_T`(%sp),%r14 + $POP `-$FRAME+12*$SIZE_T`(%sp),%r15 + $POP `-$FRAME+13*$SIZE_T`(%sp),%r16 + $POP `-$FRAME+14*$SIZE_T`(%sp),%r17 + $POP `-$FRAME+15*$SIZE_T`(%sp),%r18 + bv (%r2) + .EXIT + $POPMB -$FRAME(%sp),%r3 + .PROCEND + .STRINGZ "SHA`64*$SZ` block transform for PA-RISC, CRYPTOGAMS by " +___ + +# Explicitly encode PA-RISC 2.0 instructions used in this module, so +# that it can be compiled with .LEVEL 1.0. It should be noted that I +# wouldn't have to do this, if GNU assembler understood .ALLOW 2.0 +# directive... + +my $ldd = sub { + my ($mod,$args) = @_; + my $orig = "ldd$mod\t$args"; + + if ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 3 suffices + { my $opcode=(0x14<<26)|($2<<21)|($3<<16)|(($1&0x1FF8)<<1)|(($1>>13)&1); + $opcode|=(1<<3) if ($mod =~ /^,m/); + $opcode|=(1<<2) if ($mod =~ /^,mb/); + sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; + } + else { "\t".$orig; } +}; + +my $std = sub { + my ($mod,$args) = @_; + my $orig = "std$mod\t$args"; + + if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/) # format 3 suffices + { my $opcode=(0x1c<<26)|($3<<21)|($1<<16)|(($2&0x1FF8)<<1)|(($2>>13)&1); + sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; + } + else { "\t".$orig; } +}; + +my $extrd = sub { + my ($mod,$args) = @_; + my $orig = "extrd$mod\t$args"; + + # I only have ",u" completer, it's implicitly encoded... + if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 15 + { my $opcode=(0x36<<26)|($1<<21)|($4<<16); + my $len=32-$3; + $opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5); # encode pos + $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len + sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; + } + elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/) # format 12 + { my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9); + my $len=32-$2; + $opcode |= (($len&0x20)<<3)|($len&0x1f); # encode len + $opcode |= (1<<13) if ($mod =~ /,\**=/); + sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; + } + else { "\t".$orig; } +}; + +my $shrpd = sub { + my ($mod,$args) = @_; + my $orig = "shrpd$mod\t$args"; + + if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/) # format 14 + { my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4; + my $cpos=63-$3; + $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode sa + sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig; + } + elsif ($args =~ /%r([0-9]+),%r([0-9]+),%sar,%r([0-9]+)/) # format 11 + { sprintf "\t.WORD\t0x%08x\t; %s", + (0x34<<26)|($2<<21)|($1<<16)|(1<<9)|$3,$orig; + } + else { "\t".$orig; } +}; + +sub assemble { + my ($mnemonic,$mod,$args)=@_; + my $opcode = eval("\$$mnemonic"); + + ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args"; +} + +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/ge; + + s/shd\s+(%r[0-9]+),(%r[0-9]+),([0-9]+)/ + $3>31 ? sprintf("shd\t%$2,%$1,%d",$3-32) # rotation for >=32 + : sprintf("shd\t%$1,%$2,%d",$3)/e or + # translate made up instructons: _ror, _shr, _align, _shl + s/_ror(\s+)(%r[0-9]+),/ + ($SZ==4 ? "shd" : "shrpd")."$1$2,$2,"/e or + + s/_shr(\s+%r[0-9]+),([0-9]+),/ + $SZ==4 ? sprintf("extru%s,%d,%d,",$1,31-$2,32-$2) + : sprintf("extrd,u%s,%d,%d,",$1,63-$2,64-$2)/e or + + s/_align(\s+%r[0-9]+,%r[0-9]+),/ + ($SZ==4 ? "vshd$1," : "shrpd$1,%sar,")/e or + + s/_shl(\s+%r[0-9]+),([0-9]+),/ + $SIZE_T==4 ? sprintf("zdep%s,%d,%d,",$1,31-$2,32-$2) + : sprintf("depd,z%s,%d,%d,",$1,63-$2,64-$2)/e; + + s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e if ($SIZE_T==4); + + s/cmpb,\*/comb,/ if ($SIZE_T==4); + + s/\bbv\b/bve/ if ($SIZE_T==8); + + print $_,"\n"; +} + +close STDOUT; diff --git a/app/openssl/crypto/sha/asm/sha512-ppc.pl b/app/openssl/crypto/sha/asm/sha512-ppc.pl index 768a6a6f..6b44a68e 100755 --- a/app/openssl/crypto/sha/asm/sha512-ppc.pl +++ b/app/openssl/crypto/sha/asm/sha512-ppc.pl @@ -40,6 +40,7 @@ $output =shift; if ($flavour =~ /64/) { $SIZE_T=8; + $LRSAVE=2*$SIZE_T; $STU="stdu"; $UCMP="cmpld"; $SHL="sldi"; @@ -47,6 +48,7 @@ if ($flavour =~ /64/) { $PUSH="std"; } elsif ($flavour =~ /32/) { $SIZE_T=4; + $LRSAVE=$SIZE_T; $STU="stwu"; $UCMP="cmplw"; $SHL="slwi"; @@ -87,7 +89,8 @@ if ($output =~ /512/) { $SHR="srwi"; } -$FRAME=32*$SIZE_T; +$FRAME=32*$SIZE_T+16*$SZ; +$LOCALS=6*$SIZE_T; $sp ="r1"; $toc="r2"; @@ -179,13 +182,12 @@ $code=<<___; .globl $func .align 6 $func: + $STU $sp,-$FRAME($sp) mflr r0 - $STU $sp,`-($FRAME+16*$SZ)`($sp) $SHL $num,$num,`log(16*$SZ)/log(2)` $PUSH $ctx,`$FRAME-$SIZE_T*22`($sp) - $PUSH r0,`$FRAME-$SIZE_T*21`($sp) $PUSH $toc,`$FRAME-$SIZE_T*20`($sp) $PUSH r13,`$FRAME-$SIZE_T*19`($sp) $PUSH r14,`$FRAME-$SIZE_T*18`($sp) @@ -206,6 +208,7 @@ $func: $PUSH r29,`$FRAME-$SIZE_T*3`($sp) $PUSH r30,`$FRAME-$SIZE_T*2`($sp) $PUSH r31,`$FRAME-$SIZE_T*1`($sp) + $PUSH r0,`$FRAME+$LRSAVE`($sp) $LD $A,`0*$SZ`($ctx) mr $inp,r4 ; incarnate $inp @@ -217,7 +220,7 @@ $func: $LD $G,`6*$SZ`($ctx) $LD $H,`7*$SZ`($ctx) - b LPICmeup + bl LPICmeup LPICedup: andi. r0,$inp,3 bne Lunaligned @@ -226,40 +229,14 @@ Laligned: $PUSH $num,`$FRAME-$SIZE_T*24`($sp) ; end pointer $PUSH $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer bl Lsha2_block_private -Ldone: - $POP r0,`$FRAME-$SIZE_T*21`($sp) - $POP $toc,`$FRAME-$SIZE_T*20`($sp) - $POP r13,`$FRAME-$SIZE_T*19`($sp) - $POP r14,`$FRAME-$SIZE_T*18`($sp) - $POP r15,`$FRAME-$SIZE_T*17`($sp) - $POP r16,`$FRAME-$SIZE_T*16`($sp) - $POP r17,`$FRAME-$SIZE_T*15`($sp) - $POP r18,`$FRAME-$SIZE_T*14`($sp) - $POP r19,`$FRAME-$SIZE_T*13`($sp) - $POP r20,`$FRAME-$SIZE_T*12`($sp) - $POP r21,`$FRAME-$SIZE_T*11`($sp) - $POP r22,`$FRAME-$SIZE_T*10`($sp) - $POP r23,`$FRAME-$SIZE_T*9`($sp) - $POP r24,`$FRAME-$SIZE_T*8`($sp) - $POP r25,`$FRAME-$SIZE_T*7`($sp) - $POP r26,`$FRAME-$SIZE_T*6`($sp) - $POP r27,`$FRAME-$SIZE_T*5`($sp) - $POP r28,`$FRAME-$SIZE_T*4`($sp) - $POP r29,`$FRAME-$SIZE_T*3`($sp) - $POP r30,`$FRAME-$SIZE_T*2`($sp) - $POP r31,`$FRAME-$SIZE_T*1`($sp) - mtlr r0 - addi $sp,$sp,`$FRAME+16*$SZ` - blr -___ + b Ldone -# PowerPC specification allows an implementation to be ill-behaved -# upon unaligned access which crosses page boundary. "Better safe -# than sorry" principle makes me treat it specially. But I don't -# look for particular offending word, but rather for the input -# block which crosses the boundary. Once found that block is aligned -# and hashed separately... -$code.=<<___; +; PowerPC specification allows an implementation to be ill-behaved +; upon unaligned access which crosses page boundary. "Better safe +; than sorry" principle makes me treat it specially. But I don't +; look for particular offending word, but rather for the input +; block which crosses the boundary. Once found that block is aligned +; and hashed separately... .align 4 Lunaligned: subfic $t1,$inp,4096 @@ -278,7 +255,7 @@ Lunaligned: Lcross_page: li $t1,`16*$SZ/4` mtctr $t1 - addi r20,$sp,$FRAME ; aligned spot below the frame + addi r20,$sp,$LOCALS ; aligned spot below the frame Lmemcpy: lbz r16,0($inp) lbz r17,1($inp) @@ -293,8 +270,8 @@ Lmemcpy: bdnz Lmemcpy $PUSH $inp,`$FRAME-$SIZE_T*26`($sp) ; save real inp - addi $t1,$sp,`$FRAME+16*$SZ` ; fictitious end pointer - addi $inp,$sp,$FRAME ; fictitious inp pointer + addi $t1,$sp,`$LOCALS+16*$SZ` ; fictitious end pointer + addi $inp,$sp,$LOCALS ; fictitious inp pointer $PUSH $num,`$FRAME-$SIZE_T*25`($sp) ; save real num $PUSH $t1,`$FRAME-$SIZE_T*24`($sp) ; end pointer $PUSH $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer @@ -303,10 +280,36 @@ Lmemcpy: $POP $num,`$FRAME-$SIZE_T*25`($sp) ; restore real num addic. $num,$num,`-16*$SZ` ; num-- bne- Lunaligned - b Ldone -___ -$code.=<<___; +Ldone: + $POP r0,`$FRAME+$LRSAVE`($sp) + $POP $toc,`$FRAME-$SIZE_T*20`($sp) + $POP r13,`$FRAME-$SIZE_T*19`($sp) + $POP r14,`$FRAME-$SIZE_T*18`($sp) + $POP r15,`$FRAME-$SIZE_T*17`($sp) + $POP r16,`$FRAME-$SIZE_T*16`($sp) + $POP r17,`$FRAME-$SIZE_T*15`($sp) + $POP r18,`$FRAME-$SIZE_T*14`($sp) + $POP r19,`$FRAME-$SIZE_T*13`($sp) + $POP r20,`$FRAME-$SIZE_T*12`($sp) + $POP r21,`$FRAME-$SIZE_T*11`($sp) + $POP r22,`$FRAME-$SIZE_T*10`($sp) + $POP r23,`$FRAME-$SIZE_T*9`($sp) + $POP r24,`$FRAME-$SIZE_T*8`($sp) + $POP r25,`$FRAME-$SIZE_T*7`($sp) + $POP r26,`$FRAME-$SIZE_T*6`($sp) + $POP r27,`$FRAME-$SIZE_T*5`($sp) + $POP r28,`$FRAME-$SIZE_T*4`($sp) + $POP r29,`$FRAME-$SIZE_T*3`($sp) + $POP r30,`$FRAME-$SIZE_T*2`($sp) + $POP r31,`$FRAME-$SIZE_T*1`($sp) + mtlr r0 + addi $sp,$sp,$FRAME + blr + .long 0 + .byte 0,12,4,1,0x80,18,3,0 + .long 0 + .align 4 Lsha2_block_private: ___ @@ -372,6 +375,8 @@ $code.=<<___; $ST $H,`7*$SZ`($ctx) bne Lsha2_block_private blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 ___ # Ugly hack here, because PPC assembler syntax seem to vary too @@ -379,22 +384,15 @@ ___ $code.=<<___; .align 6 LPICmeup: - bl LPIC - addi $Tbl,$Tbl,`64-4` ; "distance" between . and last nop - b LPICedup - nop - nop - nop - nop - nop -LPIC: mflr $Tbl + mflr r0 + bcl 20,31,\$+4 + mflr $Tbl ; vvvvvv "distance" between . and 1st data entry + addi $Tbl,$Tbl,`64-8` + mtlr r0 blr - nop - nop - nop - nop - nop - nop + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + .space `64-9*4` ___ $code.=<<___ if ($SZ==8); .long 0x428a2f98,0xd728ae22,0x71374491,0x23ef65cd diff --git a/app/openssl/crypto/sha/asm/sha512-s390x.pl b/app/openssl/crypto/sha/asm/sha512-s390x.pl index e7ef2d5a..079a3fc7 100644 --- a/app/openssl/crypto/sha/asm/sha512-s390x.pl +++ b/app/openssl/crypto/sha/asm/sha512-s390x.pl @@ -26,6 +26,26 @@ # favour dual-issue z10 pipeline. Hardware SHA256/512 is ~4.7x faster # than software. +# November 2010. +# +# Adapt for -m31 build. If kernel supports what's called "highgprs" +# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit +# instructions and achieve "64-bit" performance even in 31-bit legacy +# application context. The feature is not specific to any particular +# processor, as long as it's "z-CPU". Latter implies that the code +# remains z/Architecture specific. On z900 SHA256 was measured to +# perform 2.4x and SHA512 - 13x better than code generated by gcc 4.3. + +$flavour = shift; + +if ($flavour =~ /3[12]/) { + $SIZE_T=4; + $g=""; +} else { + $SIZE_T=8; + $g="g"; +} + $t0="%r0"; $t1="%r1"; $ctx="%r2"; $t2="%r2"; @@ -44,7 +64,7 @@ $tbl="%r13"; $T1="%r14"; $sp="%r15"; -$output=shift; +while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} open STDOUT,">$output"; if ($output =~ /512/) { @@ -78,7 +98,8 @@ if ($output =~ /512/) { } $Func="sha${label}_block_data_order"; $Table="K${label}"; -$frame=160+16*$SZ; +$stdframe=16*$SIZE_T+4*8; +$frame=$stdframe+16*$SZ; sub BODY_00_15 { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; @@ -93,9 +114,9 @@ $code.=<<___; xgr $t0,$t1 $ROT $t1,$t1,`$Sigma1[2]-$Sigma1[1]` xgr $t2,$g - $ST $T1,`160+$SZ*($i%16)`($sp) + $ST $T1,`$stdframe+$SZ*($i%16)`($sp) xgr $t0,$t1 # Sigma1(e) - la $T1,0($T1,$h) # T1+=h + algr $T1,$h # T1+=h ngr $t2,$e lgr $t1,$a algr $T1,$t0 # T1+=Sigma1(e) @@ -113,7 +134,7 @@ $code.=<<___; ngr $t2,$b algr $h,$T1 # h+=T1 ogr $t2,$t1 # Maj(a,b,c) - la $d,0($d,$T1) # d+=T1 + algr $d,$T1 # d+=T1 algr $h,$t2 # h+=Maj(a,b,c) ___ } @@ -122,19 +143,19 @@ sub BODY_16_XX { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; $code.=<<___; - $LD $T1,`160+$SZ*(($i+1)%16)`($sp) ### $i - $LD $t1,`160+$SZ*(($i+14)%16)`($sp) + $LD $T1,`$stdframe+$SZ*(($i+1)%16)`($sp) ### $i + $LD $t1,`$stdframe+$SZ*(($i+14)%16)`($sp) $ROT $t0,$T1,$sigma0[0] $SHR $T1,$sigma0[2] $ROT $t2,$t0,`$sigma0[1]-$sigma0[0]` xgr $T1,$t0 $ROT $t0,$t1,$sigma1[0] - xgr $T1,$t2 # sigma0(X[i+1]) + xgr $T1,$t2 # sigma0(X[i+1]) $SHR $t1,$sigma1[2] - $ADD $T1,`160+$SZ*($i%16)`($sp) # +=X[i] + $ADD $T1,`$stdframe+$SZ*($i%16)`($sp) # +=X[i] xgr $t1,$t0 $ROT $t0,$t0,`$sigma1[1]-$sigma1[0]` - $ADD $T1,`160+$SZ*(($i+9)%16)`($sp) # +=X[i+9] + $ADD $T1,`$stdframe+$SZ*(($i+9)%16)`($sp) # +=X[i+9] xgr $t1,$t0 # sigma1(X[i+14]) algr $T1,$t1 # +=sigma1(X[i+14]) ___ @@ -212,6 +233,7 @@ $code.=<<___; .globl $Func .type $Func,\@function $Func: + sllg $len,$len,`log(16*$SZ)/log(2)` ___ $code.=<<___ if ($kimdfunc); larl %r1,OPENSSL_s390xcap_P @@ -219,15 +241,15 @@ $code.=<<___ if ($kimdfunc); tmhl %r0,0x4000 # check for message-security assist jz .Lsoftware lghi %r0,0 - la %r1,16($sp) + la %r1,`2*$SIZE_T`($sp) .long 0xb93e0002 # kimd %r0,%r2 - lg %r0,16($sp) + lg %r0,`2*$SIZE_T`($sp) tmhh %r0,`0x8000>>$kimdfunc` jz .Lsoftware lghi %r0,$kimdfunc lgr %r1,$ctx lgr %r2,$inp - sllg %r3,$len,`log(16*$SZ)/log(2)` + lgr %r3,$len .long 0xb93e0002 # kimd %r0,%r2 brc 1,.-4 # pay attention to "partial completion" br %r14 @@ -235,13 +257,12 @@ $code.=<<___ if ($kimdfunc); .Lsoftware: ___ $code.=<<___; - sllg $len,$len,`log(16*$SZ)/log(2)` lghi %r1,-$frame - agr $len,$inp - stmg $ctx,%r15,16($sp) + la $len,0($len,$inp) + stm${g} $ctx,%r15,`2*$SIZE_T`($sp) lgr %r0,$sp la $sp,0(%r1,$sp) - stg %r0,0($sp) + st${g} %r0,0($sp) larl $tbl,$Table $LD $A,`0*$SZ`($ctx) @@ -265,7 +286,7 @@ $code.=<<___; clgr $len,$t0 jne .Lrounds_16_xx - lg $ctx,`$frame+16`($sp) + l${g} $ctx,`$frame+2*$SIZE_T`($sp) la $inp,`16*$SZ`($inp) $ADD $A,`0*$SZ`($ctx) $ADD $B,`1*$SZ`($ctx) @@ -283,14 +304,14 @@ $code.=<<___; $ST $F,`5*$SZ`($ctx) $ST $G,`6*$SZ`($ctx) $ST $H,`7*$SZ`($ctx) - clg $inp,`$frame+32`($sp) + cl${g} $inp,`$frame+4*$SIZE_T`($sp) jne .Lloop - lmg %r6,%r15,`$frame+48`($sp) + lm${g} %r6,%r15,`$frame+6*$SIZE_T`($sp) br %r14 .size $Func,.-$Func .string "SHA${label} block transform for s390x, CRYPTOGAMS by " -.comm OPENSSL_s390xcap_P,8,8 +.comm OPENSSL_s390xcap_P,16,8 ___ $code =~ s/\`([^\`]*)\`/eval $1/gem; diff --git a/app/openssl/crypto/sha/asm/sha512-sparcv9.pl b/app/openssl/crypto/sha/asm/sha512-sparcv9.pl index ec5d7813..58574078 100644 --- a/app/openssl/crypto/sha/asm/sha512-sparcv9.pl +++ b/app/openssl/crypto/sha/asm/sha512-sparcv9.pl @@ -305,9 +305,9 @@ $code.=<<___; srlx @X[(($i+9)/2)%8],32,$tmp1 ! X[i+9] xor $tmp0,$tmp2,$tmp2 ! sigma1(X[i+14]) srl @X[($i/2)%8],0,$tmp0 + add $tmp2,$tmp1,$tmp1 add $xi,$T1,$T1 ! +=X[i] xor $tmp0,@X[($i/2)%8],@X[($i/2)%8] - add $tmp2,$T1,$T1 add $tmp1,$T1,$T1 srl $T1,0,$T1 @@ -318,9 +318,9 @@ ___ $code.=<<___; srlx @X[($i/2)%8],32,$tmp1 ! X[i] xor $tmp0,$tmp2,$tmp2 ! sigma1(X[i+14]) - srl @X[($i/2)%8],0,@X[($i/2)%8] add $xi,$T1,$T1 ! +=X[i+9] - add $tmp2,$T1,$T1 + add $tmp2,$tmp1,$tmp1 + srl @X[($i/2)%8],0,@X[($i/2)%8] add $tmp1,$T1,$T1 sllx $T1,32,$tmp0 diff --git a/app/openssl/crypto/sha/asm/sha512-x86_64.S b/app/openssl/crypto/sha/asm/sha512-x86_64.S new file mode 100644 index 00000000..2d3294e0 --- /dev/null +++ b/app/openssl/crypto/sha/asm/sha512-x86_64.S @@ -0,0 +1,1802 @@ +.text + +.globl sha512_block_data_order +.type sha512_block_data_order,@function +.align 16 +sha512_block_data_order: + pushq %rbx + pushq %rbp + pushq %r12 + pushq %r13 + pushq %r14 + pushq %r15 + movq %rsp,%r11 + shlq $4,%rdx + subq $128+32,%rsp + leaq (%rsi,%rdx,8),%rdx + andq $-64,%rsp + movq %rdi,128+0(%rsp) + movq %rsi,128+8(%rsp) + movq %rdx,128+16(%rsp) + movq %r11,128+24(%rsp) +.Lprologue: + + leaq K512(%rip),%rbp + + movq 0(%rdi),%rax + movq 8(%rdi),%rbx + movq 16(%rdi),%rcx + movq 24(%rdi),%rdx + movq 32(%rdi),%r8 + movq 40(%rdi),%r9 + movq 48(%rdi),%r10 + movq 56(%rdi),%r11 + jmp .Lloop + +.align 16 +.Lloop: + xorq %rdi,%rdi + movq 0(%rsi),%r12 + movq %r8,%r13 + movq %rax,%r14 + bswapq %r12 + rorq $23,%r13 + movq %r9,%r15 + movq %r12,0(%rsp) + + rorq $5,%r14 + xorq %r8,%r13 + xorq %r10,%r15 + + rorq $4,%r13 + addq %r11,%r12 + xorq %rax,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r8,%r15 + movq %rbx,%r11 + + rorq $6,%r14 + xorq %r8,%r13 + xorq %r10,%r15 + + xorq %rcx,%r11 + xorq %rax,%r14 + addq %r15,%r12 + movq %rbx,%r15 + + rorq $14,%r13 + andq %rax,%r11 + andq %rcx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r11 + + addq %r12,%rdx + addq %r12,%r11 + leaq 1(%rdi),%rdi + addq %r14,%r11 + + movq 8(%rsi),%r12 + movq %rdx,%r13 + movq %r11,%r14 + bswapq %r12 + rorq $23,%r13 + movq %r8,%r15 + movq %r12,8(%rsp) + + rorq $5,%r14 + xorq %rdx,%r13 + xorq %r9,%r15 + + rorq $4,%r13 + addq %r10,%r12 + xorq %r11,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rdx,%r15 + movq %rax,%r10 + + rorq $6,%r14 + xorq %rdx,%r13 + xorq %r9,%r15 + + xorq %rbx,%r10 + xorq %r11,%r14 + addq %r15,%r12 + movq %rax,%r15 + + rorq $14,%r13 + andq %r11,%r10 + andq %rbx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r10 + + addq %r12,%rcx + addq %r12,%r10 + leaq 1(%rdi),%rdi + addq %r14,%r10 + + movq 16(%rsi),%r12 + movq %rcx,%r13 + movq %r10,%r14 + bswapq %r12 + rorq $23,%r13 + movq %rdx,%r15 + movq %r12,16(%rsp) + + rorq $5,%r14 + xorq %rcx,%r13 + xorq %r8,%r15 + + rorq $4,%r13 + addq %r9,%r12 + xorq %r10,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rcx,%r15 + movq %r11,%r9 + + rorq $6,%r14 + xorq %rcx,%r13 + xorq %r8,%r15 + + xorq %rax,%r9 + xorq %r10,%r14 + addq %r15,%r12 + movq %r11,%r15 + + rorq $14,%r13 + andq %r10,%r9 + andq %rax,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r9 + + addq %r12,%rbx + addq %r12,%r9 + leaq 1(%rdi),%rdi + addq %r14,%r9 + + movq 24(%rsi),%r12 + movq %rbx,%r13 + movq %r9,%r14 + bswapq %r12 + rorq $23,%r13 + movq %rcx,%r15 + movq %r12,24(%rsp) + + rorq $5,%r14 + xorq %rbx,%r13 + xorq %rdx,%r15 + + rorq $4,%r13 + addq %r8,%r12 + xorq %r9,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rbx,%r15 + movq %r10,%r8 + + rorq $6,%r14 + xorq %rbx,%r13 + xorq %rdx,%r15 + + xorq %r11,%r8 + xorq %r9,%r14 + addq %r15,%r12 + movq %r10,%r15 + + rorq $14,%r13 + andq %r9,%r8 + andq %r11,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r8 + + addq %r12,%rax + addq %r12,%r8 + leaq 1(%rdi),%rdi + addq %r14,%r8 + + movq 32(%rsi),%r12 + movq %rax,%r13 + movq %r8,%r14 + bswapq %r12 + rorq $23,%r13 + movq %rbx,%r15 + movq %r12,32(%rsp) + + rorq $5,%r14 + xorq %rax,%r13 + xorq %rcx,%r15 + + rorq $4,%r13 + addq %rdx,%r12 + xorq %r8,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rax,%r15 + movq %r9,%rdx + + rorq $6,%r14 + xorq %rax,%r13 + xorq %rcx,%r15 + + xorq %r10,%rdx + xorq %r8,%r14 + addq %r15,%r12 + movq %r9,%r15 + + rorq $14,%r13 + andq %r8,%rdx + andq %r10,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rdx + + addq %r12,%r11 + addq %r12,%rdx + leaq 1(%rdi),%rdi + addq %r14,%rdx + + movq 40(%rsi),%r12 + movq %r11,%r13 + movq %rdx,%r14 + bswapq %r12 + rorq $23,%r13 + movq %rax,%r15 + movq %r12,40(%rsp) + + rorq $5,%r14 + xorq %r11,%r13 + xorq %rbx,%r15 + + rorq $4,%r13 + addq %rcx,%r12 + xorq %rdx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r11,%r15 + movq %r8,%rcx + + rorq $6,%r14 + xorq %r11,%r13 + xorq %rbx,%r15 + + xorq %r9,%rcx + xorq %rdx,%r14 + addq %r15,%r12 + movq %r8,%r15 + + rorq $14,%r13 + andq %rdx,%rcx + andq %r9,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rcx + + addq %r12,%r10 + addq %r12,%rcx + leaq 1(%rdi),%rdi + addq %r14,%rcx + + movq 48(%rsi),%r12 + movq %r10,%r13 + movq %rcx,%r14 + bswapq %r12 + rorq $23,%r13 + movq %r11,%r15 + movq %r12,48(%rsp) + + rorq $5,%r14 + xorq %r10,%r13 + xorq %rax,%r15 + + rorq $4,%r13 + addq %rbx,%r12 + xorq %rcx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r10,%r15 + movq %rdx,%rbx + + rorq $6,%r14 + xorq %r10,%r13 + xorq %rax,%r15 + + xorq %r8,%rbx + xorq %rcx,%r14 + addq %r15,%r12 + movq %rdx,%r15 + + rorq $14,%r13 + andq %rcx,%rbx + andq %r8,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rbx + + addq %r12,%r9 + addq %r12,%rbx + leaq 1(%rdi),%rdi + addq %r14,%rbx + + movq 56(%rsi),%r12 + movq %r9,%r13 + movq %rbx,%r14 + bswapq %r12 + rorq $23,%r13 + movq %r10,%r15 + movq %r12,56(%rsp) + + rorq $5,%r14 + xorq %r9,%r13 + xorq %r11,%r15 + + rorq $4,%r13 + addq %rax,%r12 + xorq %rbx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r9,%r15 + movq %rcx,%rax + + rorq $6,%r14 + xorq %r9,%r13 + xorq %r11,%r15 + + xorq %rdx,%rax + xorq %rbx,%r14 + addq %r15,%r12 + movq %rcx,%r15 + + rorq $14,%r13 + andq %rbx,%rax + andq %rdx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rax + + addq %r12,%r8 + addq %r12,%rax + leaq 1(%rdi),%rdi + addq %r14,%rax + + movq 64(%rsi),%r12 + movq %r8,%r13 + movq %rax,%r14 + bswapq %r12 + rorq $23,%r13 + movq %r9,%r15 + movq %r12,64(%rsp) + + rorq $5,%r14 + xorq %r8,%r13 + xorq %r10,%r15 + + rorq $4,%r13 + addq %r11,%r12 + xorq %rax,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r8,%r15 + movq %rbx,%r11 + + rorq $6,%r14 + xorq %r8,%r13 + xorq %r10,%r15 + + xorq %rcx,%r11 + xorq %rax,%r14 + addq %r15,%r12 + movq %rbx,%r15 + + rorq $14,%r13 + andq %rax,%r11 + andq %rcx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r11 + + addq %r12,%rdx + addq %r12,%r11 + leaq 1(%rdi),%rdi + addq %r14,%r11 + + movq 72(%rsi),%r12 + movq %rdx,%r13 + movq %r11,%r14 + bswapq %r12 + rorq $23,%r13 + movq %r8,%r15 + movq %r12,72(%rsp) + + rorq $5,%r14 + xorq %rdx,%r13 + xorq %r9,%r15 + + rorq $4,%r13 + addq %r10,%r12 + xorq %r11,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rdx,%r15 + movq %rax,%r10 + + rorq $6,%r14 + xorq %rdx,%r13 + xorq %r9,%r15 + + xorq %rbx,%r10 + xorq %r11,%r14 + addq %r15,%r12 + movq %rax,%r15 + + rorq $14,%r13 + andq %r11,%r10 + andq %rbx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r10 + + addq %r12,%rcx + addq %r12,%r10 + leaq 1(%rdi),%rdi + addq %r14,%r10 + + movq 80(%rsi),%r12 + movq %rcx,%r13 + movq %r10,%r14 + bswapq %r12 + rorq $23,%r13 + movq %rdx,%r15 + movq %r12,80(%rsp) + + rorq $5,%r14 + xorq %rcx,%r13 + xorq %r8,%r15 + + rorq $4,%r13 + addq %r9,%r12 + xorq %r10,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rcx,%r15 + movq %r11,%r9 + + rorq $6,%r14 + xorq %rcx,%r13 + xorq %r8,%r15 + + xorq %rax,%r9 + xorq %r10,%r14 + addq %r15,%r12 + movq %r11,%r15 + + rorq $14,%r13 + andq %r10,%r9 + andq %rax,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r9 + + addq %r12,%rbx + addq %r12,%r9 + leaq 1(%rdi),%rdi + addq %r14,%r9 + + movq 88(%rsi),%r12 + movq %rbx,%r13 + movq %r9,%r14 + bswapq %r12 + rorq $23,%r13 + movq %rcx,%r15 + movq %r12,88(%rsp) + + rorq $5,%r14 + xorq %rbx,%r13 + xorq %rdx,%r15 + + rorq $4,%r13 + addq %r8,%r12 + xorq %r9,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rbx,%r15 + movq %r10,%r8 + + rorq $6,%r14 + xorq %rbx,%r13 + xorq %rdx,%r15 + + xorq %r11,%r8 + xorq %r9,%r14 + addq %r15,%r12 + movq %r10,%r15 + + rorq $14,%r13 + andq %r9,%r8 + andq %r11,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r8 + + addq %r12,%rax + addq %r12,%r8 + leaq 1(%rdi),%rdi + addq %r14,%r8 + + movq 96(%rsi),%r12 + movq %rax,%r13 + movq %r8,%r14 + bswapq %r12 + rorq $23,%r13 + movq %rbx,%r15 + movq %r12,96(%rsp) + + rorq $5,%r14 + xorq %rax,%r13 + xorq %rcx,%r15 + + rorq $4,%r13 + addq %rdx,%r12 + xorq %r8,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rax,%r15 + movq %r9,%rdx + + rorq $6,%r14 + xorq %rax,%r13 + xorq %rcx,%r15 + + xorq %r10,%rdx + xorq %r8,%r14 + addq %r15,%r12 + movq %r9,%r15 + + rorq $14,%r13 + andq %r8,%rdx + andq %r10,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rdx + + addq %r12,%r11 + addq %r12,%rdx + leaq 1(%rdi),%rdi + addq %r14,%rdx + + movq 104(%rsi),%r12 + movq %r11,%r13 + movq %rdx,%r14 + bswapq %r12 + rorq $23,%r13 + movq %rax,%r15 + movq %r12,104(%rsp) + + rorq $5,%r14 + xorq %r11,%r13 + xorq %rbx,%r15 + + rorq $4,%r13 + addq %rcx,%r12 + xorq %rdx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r11,%r15 + movq %r8,%rcx + + rorq $6,%r14 + xorq %r11,%r13 + xorq %rbx,%r15 + + xorq %r9,%rcx + xorq %rdx,%r14 + addq %r15,%r12 + movq %r8,%r15 + + rorq $14,%r13 + andq %rdx,%rcx + andq %r9,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rcx + + addq %r12,%r10 + addq %r12,%rcx + leaq 1(%rdi),%rdi + addq %r14,%rcx + + movq 112(%rsi),%r12 + movq %r10,%r13 + movq %rcx,%r14 + bswapq %r12 + rorq $23,%r13 + movq %r11,%r15 + movq %r12,112(%rsp) + + rorq $5,%r14 + xorq %r10,%r13 + xorq %rax,%r15 + + rorq $4,%r13 + addq %rbx,%r12 + xorq %rcx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r10,%r15 + movq %rdx,%rbx + + rorq $6,%r14 + xorq %r10,%r13 + xorq %rax,%r15 + + xorq %r8,%rbx + xorq %rcx,%r14 + addq %r15,%r12 + movq %rdx,%r15 + + rorq $14,%r13 + andq %rcx,%rbx + andq %r8,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rbx + + addq %r12,%r9 + addq %r12,%rbx + leaq 1(%rdi),%rdi + addq %r14,%rbx + + movq 120(%rsi),%r12 + movq %r9,%r13 + movq %rbx,%r14 + bswapq %r12 + rorq $23,%r13 + movq %r10,%r15 + movq %r12,120(%rsp) + + rorq $5,%r14 + xorq %r9,%r13 + xorq %r11,%r15 + + rorq $4,%r13 + addq %rax,%r12 + xorq %rbx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r9,%r15 + movq %rcx,%rax + + rorq $6,%r14 + xorq %r9,%r13 + xorq %r11,%r15 + + xorq %rdx,%rax + xorq %rbx,%r14 + addq %r15,%r12 + movq %rcx,%r15 + + rorq $14,%r13 + andq %rbx,%rax + andq %rdx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rax + + addq %r12,%r8 + addq %r12,%rax + leaq 1(%rdi),%rdi + addq %r14,%rax + + jmp .Lrounds_16_xx +.align 16 +.Lrounds_16_xx: + movq 8(%rsp),%r13 + movq 112(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 72(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 0(%rsp),%r12 + movq %r8,%r13 + addq %r14,%r12 + movq %rax,%r14 + rorq $23,%r13 + movq %r9,%r15 + movq %r12,0(%rsp) + + rorq $5,%r14 + xorq %r8,%r13 + xorq %r10,%r15 + + rorq $4,%r13 + addq %r11,%r12 + xorq %rax,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r8,%r15 + movq %rbx,%r11 + + rorq $6,%r14 + xorq %r8,%r13 + xorq %r10,%r15 + + xorq %rcx,%r11 + xorq %rax,%r14 + addq %r15,%r12 + movq %rbx,%r15 + + rorq $14,%r13 + andq %rax,%r11 + andq %rcx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r11 + + addq %r12,%rdx + addq %r12,%r11 + leaq 1(%rdi),%rdi + addq %r14,%r11 + + movq 16(%rsp),%r13 + movq 120(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 80(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 8(%rsp),%r12 + movq %rdx,%r13 + addq %r14,%r12 + movq %r11,%r14 + rorq $23,%r13 + movq %r8,%r15 + movq %r12,8(%rsp) + + rorq $5,%r14 + xorq %rdx,%r13 + xorq %r9,%r15 + + rorq $4,%r13 + addq %r10,%r12 + xorq %r11,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rdx,%r15 + movq %rax,%r10 + + rorq $6,%r14 + xorq %rdx,%r13 + xorq %r9,%r15 + + xorq %rbx,%r10 + xorq %r11,%r14 + addq %r15,%r12 + movq %rax,%r15 + + rorq $14,%r13 + andq %r11,%r10 + andq %rbx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r10 + + addq %r12,%rcx + addq %r12,%r10 + leaq 1(%rdi),%rdi + addq %r14,%r10 + + movq 24(%rsp),%r13 + movq 0(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 88(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 16(%rsp),%r12 + movq %rcx,%r13 + addq %r14,%r12 + movq %r10,%r14 + rorq $23,%r13 + movq %rdx,%r15 + movq %r12,16(%rsp) + + rorq $5,%r14 + xorq %rcx,%r13 + xorq %r8,%r15 + + rorq $4,%r13 + addq %r9,%r12 + xorq %r10,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rcx,%r15 + movq %r11,%r9 + + rorq $6,%r14 + xorq %rcx,%r13 + xorq %r8,%r15 + + xorq %rax,%r9 + xorq %r10,%r14 + addq %r15,%r12 + movq %r11,%r15 + + rorq $14,%r13 + andq %r10,%r9 + andq %rax,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r9 + + addq %r12,%rbx + addq %r12,%r9 + leaq 1(%rdi),%rdi + addq %r14,%r9 + + movq 32(%rsp),%r13 + movq 8(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 96(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 24(%rsp),%r12 + movq %rbx,%r13 + addq %r14,%r12 + movq %r9,%r14 + rorq $23,%r13 + movq %rcx,%r15 + movq %r12,24(%rsp) + + rorq $5,%r14 + xorq %rbx,%r13 + xorq %rdx,%r15 + + rorq $4,%r13 + addq %r8,%r12 + xorq %r9,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rbx,%r15 + movq %r10,%r8 + + rorq $6,%r14 + xorq %rbx,%r13 + xorq %rdx,%r15 + + xorq %r11,%r8 + xorq %r9,%r14 + addq %r15,%r12 + movq %r10,%r15 + + rorq $14,%r13 + andq %r9,%r8 + andq %r11,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r8 + + addq %r12,%rax + addq %r12,%r8 + leaq 1(%rdi),%rdi + addq %r14,%r8 + + movq 40(%rsp),%r13 + movq 16(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 104(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 32(%rsp),%r12 + movq %rax,%r13 + addq %r14,%r12 + movq %r8,%r14 + rorq $23,%r13 + movq %rbx,%r15 + movq %r12,32(%rsp) + + rorq $5,%r14 + xorq %rax,%r13 + xorq %rcx,%r15 + + rorq $4,%r13 + addq %rdx,%r12 + xorq %r8,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rax,%r15 + movq %r9,%rdx + + rorq $6,%r14 + xorq %rax,%r13 + xorq %rcx,%r15 + + xorq %r10,%rdx + xorq %r8,%r14 + addq %r15,%r12 + movq %r9,%r15 + + rorq $14,%r13 + andq %r8,%rdx + andq %r10,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rdx + + addq %r12,%r11 + addq %r12,%rdx + leaq 1(%rdi),%rdi + addq %r14,%rdx + + movq 48(%rsp),%r13 + movq 24(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 112(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 40(%rsp),%r12 + movq %r11,%r13 + addq %r14,%r12 + movq %rdx,%r14 + rorq $23,%r13 + movq %rax,%r15 + movq %r12,40(%rsp) + + rorq $5,%r14 + xorq %r11,%r13 + xorq %rbx,%r15 + + rorq $4,%r13 + addq %rcx,%r12 + xorq %rdx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r11,%r15 + movq %r8,%rcx + + rorq $6,%r14 + xorq %r11,%r13 + xorq %rbx,%r15 + + xorq %r9,%rcx + xorq %rdx,%r14 + addq %r15,%r12 + movq %r8,%r15 + + rorq $14,%r13 + andq %rdx,%rcx + andq %r9,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rcx + + addq %r12,%r10 + addq %r12,%rcx + leaq 1(%rdi),%rdi + addq %r14,%rcx + + movq 56(%rsp),%r13 + movq 32(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 120(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 48(%rsp),%r12 + movq %r10,%r13 + addq %r14,%r12 + movq %rcx,%r14 + rorq $23,%r13 + movq %r11,%r15 + movq %r12,48(%rsp) + + rorq $5,%r14 + xorq %r10,%r13 + xorq %rax,%r15 + + rorq $4,%r13 + addq %rbx,%r12 + xorq %rcx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r10,%r15 + movq %rdx,%rbx + + rorq $6,%r14 + xorq %r10,%r13 + xorq %rax,%r15 + + xorq %r8,%rbx + xorq %rcx,%r14 + addq %r15,%r12 + movq %rdx,%r15 + + rorq $14,%r13 + andq %rcx,%rbx + andq %r8,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rbx + + addq %r12,%r9 + addq %r12,%rbx + leaq 1(%rdi),%rdi + addq %r14,%rbx + + movq 64(%rsp),%r13 + movq 40(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 0(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 56(%rsp),%r12 + movq %r9,%r13 + addq %r14,%r12 + movq %rbx,%r14 + rorq $23,%r13 + movq %r10,%r15 + movq %r12,56(%rsp) + + rorq $5,%r14 + xorq %r9,%r13 + xorq %r11,%r15 + + rorq $4,%r13 + addq %rax,%r12 + xorq %rbx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r9,%r15 + movq %rcx,%rax + + rorq $6,%r14 + xorq %r9,%r13 + xorq %r11,%r15 + + xorq %rdx,%rax + xorq %rbx,%r14 + addq %r15,%r12 + movq %rcx,%r15 + + rorq $14,%r13 + andq %rbx,%rax + andq %rdx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rax + + addq %r12,%r8 + addq %r12,%rax + leaq 1(%rdi),%rdi + addq %r14,%rax + + movq 72(%rsp),%r13 + movq 48(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 8(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 64(%rsp),%r12 + movq %r8,%r13 + addq %r14,%r12 + movq %rax,%r14 + rorq $23,%r13 + movq %r9,%r15 + movq %r12,64(%rsp) + + rorq $5,%r14 + xorq %r8,%r13 + xorq %r10,%r15 + + rorq $4,%r13 + addq %r11,%r12 + xorq %rax,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r8,%r15 + movq %rbx,%r11 + + rorq $6,%r14 + xorq %r8,%r13 + xorq %r10,%r15 + + xorq %rcx,%r11 + xorq %rax,%r14 + addq %r15,%r12 + movq %rbx,%r15 + + rorq $14,%r13 + andq %rax,%r11 + andq %rcx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r11 + + addq %r12,%rdx + addq %r12,%r11 + leaq 1(%rdi),%rdi + addq %r14,%r11 + + movq 80(%rsp),%r13 + movq 56(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 16(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 72(%rsp),%r12 + movq %rdx,%r13 + addq %r14,%r12 + movq %r11,%r14 + rorq $23,%r13 + movq %r8,%r15 + movq %r12,72(%rsp) + + rorq $5,%r14 + xorq %rdx,%r13 + xorq %r9,%r15 + + rorq $4,%r13 + addq %r10,%r12 + xorq %r11,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rdx,%r15 + movq %rax,%r10 + + rorq $6,%r14 + xorq %rdx,%r13 + xorq %r9,%r15 + + xorq %rbx,%r10 + xorq %r11,%r14 + addq %r15,%r12 + movq %rax,%r15 + + rorq $14,%r13 + andq %r11,%r10 + andq %rbx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r10 + + addq %r12,%rcx + addq %r12,%r10 + leaq 1(%rdi),%rdi + addq %r14,%r10 + + movq 88(%rsp),%r13 + movq 64(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 24(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 80(%rsp),%r12 + movq %rcx,%r13 + addq %r14,%r12 + movq %r10,%r14 + rorq $23,%r13 + movq %rdx,%r15 + movq %r12,80(%rsp) + + rorq $5,%r14 + xorq %rcx,%r13 + xorq %r8,%r15 + + rorq $4,%r13 + addq %r9,%r12 + xorq %r10,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rcx,%r15 + movq %r11,%r9 + + rorq $6,%r14 + xorq %rcx,%r13 + xorq %r8,%r15 + + xorq %rax,%r9 + xorq %r10,%r14 + addq %r15,%r12 + movq %r11,%r15 + + rorq $14,%r13 + andq %r10,%r9 + andq %rax,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r9 + + addq %r12,%rbx + addq %r12,%r9 + leaq 1(%rdi),%rdi + addq %r14,%r9 + + movq 96(%rsp),%r13 + movq 72(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 32(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 88(%rsp),%r12 + movq %rbx,%r13 + addq %r14,%r12 + movq %r9,%r14 + rorq $23,%r13 + movq %rcx,%r15 + movq %r12,88(%rsp) + + rorq $5,%r14 + xorq %rbx,%r13 + xorq %rdx,%r15 + + rorq $4,%r13 + addq %r8,%r12 + xorq %r9,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rbx,%r15 + movq %r10,%r8 + + rorq $6,%r14 + xorq %rbx,%r13 + xorq %rdx,%r15 + + xorq %r11,%r8 + xorq %r9,%r14 + addq %r15,%r12 + movq %r10,%r15 + + rorq $14,%r13 + andq %r9,%r8 + andq %r11,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%r8 + + addq %r12,%rax + addq %r12,%r8 + leaq 1(%rdi),%rdi + addq %r14,%r8 + + movq 104(%rsp),%r13 + movq 80(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 40(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 96(%rsp),%r12 + movq %rax,%r13 + addq %r14,%r12 + movq %r8,%r14 + rorq $23,%r13 + movq %rbx,%r15 + movq %r12,96(%rsp) + + rorq $5,%r14 + xorq %rax,%r13 + xorq %rcx,%r15 + + rorq $4,%r13 + addq %rdx,%r12 + xorq %r8,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %rax,%r15 + movq %r9,%rdx + + rorq $6,%r14 + xorq %rax,%r13 + xorq %rcx,%r15 + + xorq %r10,%rdx + xorq %r8,%r14 + addq %r15,%r12 + movq %r9,%r15 + + rorq $14,%r13 + andq %r8,%rdx + andq %r10,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rdx + + addq %r12,%r11 + addq %r12,%rdx + leaq 1(%rdi),%rdi + addq %r14,%rdx + + movq 112(%rsp),%r13 + movq 88(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 48(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 104(%rsp),%r12 + movq %r11,%r13 + addq %r14,%r12 + movq %rdx,%r14 + rorq $23,%r13 + movq %rax,%r15 + movq %r12,104(%rsp) + + rorq $5,%r14 + xorq %r11,%r13 + xorq %rbx,%r15 + + rorq $4,%r13 + addq %rcx,%r12 + xorq %rdx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r11,%r15 + movq %r8,%rcx + + rorq $6,%r14 + xorq %r11,%r13 + xorq %rbx,%r15 + + xorq %r9,%rcx + xorq %rdx,%r14 + addq %r15,%r12 + movq %r8,%r15 + + rorq $14,%r13 + andq %rdx,%rcx + andq %r9,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rcx + + addq %r12,%r10 + addq %r12,%rcx + leaq 1(%rdi),%rdi + addq %r14,%rcx + + movq 120(%rsp),%r13 + movq 96(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 56(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 112(%rsp),%r12 + movq %r10,%r13 + addq %r14,%r12 + movq %rcx,%r14 + rorq $23,%r13 + movq %r11,%r15 + movq %r12,112(%rsp) + + rorq $5,%r14 + xorq %r10,%r13 + xorq %rax,%r15 + + rorq $4,%r13 + addq %rbx,%r12 + xorq %rcx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r10,%r15 + movq %rdx,%rbx + + rorq $6,%r14 + xorq %r10,%r13 + xorq %rax,%r15 + + xorq %r8,%rbx + xorq %rcx,%r14 + addq %r15,%r12 + movq %rdx,%r15 + + rorq $14,%r13 + andq %rcx,%rbx + andq %r8,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rbx + + addq %r12,%r9 + addq %r12,%rbx + leaq 1(%rdi),%rdi + addq %r14,%rbx + + movq 0(%rsp),%r13 + movq 104(%rsp),%r14 + movq %r13,%r12 + movq %r14,%r15 + + rorq $7,%r12 + xorq %r13,%r12 + shrq $7,%r13 + + rorq $1,%r12 + xorq %r12,%r13 + movq 64(%rsp),%r12 + + rorq $42,%r15 + xorq %r14,%r15 + shrq $6,%r14 + + rorq $19,%r15 + addq %r13,%r12 + xorq %r15,%r14 + + addq 120(%rsp),%r12 + movq %r9,%r13 + addq %r14,%r12 + movq %rbx,%r14 + rorq $23,%r13 + movq %r10,%r15 + movq %r12,120(%rsp) + + rorq $5,%r14 + xorq %r9,%r13 + xorq %r11,%r15 + + rorq $4,%r13 + addq %rax,%r12 + xorq %rbx,%r14 + + addq (%rbp,%rdi,8),%r12 + andq %r9,%r15 + movq %rcx,%rax + + rorq $6,%r14 + xorq %r9,%r13 + xorq %r11,%r15 + + xorq %rdx,%rax + xorq %rbx,%r14 + addq %r15,%r12 + movq %rcx,%r15 + + rorq $14,%r13 + andq %rbx,%rax + andq %rdx,%r15 + + rorq $28,%r14 + addq %r13,%r12 + addq %r15,%rax + + addq %r12,%r8 + addq %r12,%rax + leaq 1(%rdi),%rdi + addq %r14,%rax + + cmpq $80,%rdi + jb .Lrounds_16_xx + + movq 128+0(%rsp),%rdi + leaq 128(%rsi),%rsi + + addq 0(%rdi),%rax + addq 8(%rdi),%rbx + addq 16(%rdi),%rcx + addq 24(%rdi),%rdx + addq 32(%rdi),%r8 + addq 40(%rdi),%r9 + addq 48(%rdi),%r10 + addq 56(%rdi),%r11 + + cmpq 128+16(%rsp),%rsi + + movq %rax,0(%rdi) + movq %rbx,8(%rdi) + movq %rcx,16(%rdi) + movq %rdx,24(%rdi) + movq %r8,32(%rdi) + movq %r9,40(%rdi) + movq %r10,48(%rdi) + movq %r11,56(%rdi) + jb .Lloop + + movq 128+24(%rsp),%rsi + movq (%rsi),%r15 + movq 8(%rsi),%r14 + movq 16(%rsi),%r13 + movq 24(%rsi),%r12 + movq 32(%rsi),%rbp + movq 40(%rsi),%rbx + leaq 48(%rsi),%rsp +.Lepilogue: + .byte 0xf3,0xc3 +.size sha512_block_data_order,.-sha512_block_data_order +.align 64 +.type K512,@object +K512: +.quad 0x428a2f98d728ae22,0x7137449123ef65cd +.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc +.quad 0x3956c25bf348b538,0x59f111f1b605d019 +.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 +.quad 0xd807aa98a3030242,0x12835b0145706fbe +.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 +.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 +.quad 0x9bdc06a725c71235,0xc19bf174cf692694 +.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 +.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 +.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 +.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 +.quad 0x983e5152ee66dfab,0xa831c66d2db43210 +.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 +.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 +.quad 0x06ca6351e003826f,0x142929670a0e6e70 +.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 +.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df +.quad 0x650a73548baf63de,0x766a0abb3c77b2a8 +.quad 0x81c2c92e47edaee6,0x92722c851482353b +.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 +.quad 0xc24b8b70d0f89791,0xc76c51a30654be30 +.quad 0xd192e819d6ef5218,0xd69906245565a910 +.quad 0xf40e35855771202a,0x106aa07032bbd1b8 +.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 +.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 +.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb +.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 +.quad 0x748f82ee5defb2fc,0x78a5636f43172f60 +.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec +.quad 0x90befffa23631e28,0xa4506cebde82bde9 +.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b +.quad 0xca273eceea26619c,0xd186b8c721c0c207 +.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 +.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 +.quad 0x113f9804bef90dae,0x1b710b35131c471b +.quad 0x28db77f523047d84,0x32caab7b40c72493 +.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c +.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a +.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 diff --git a/app/openssl/crypto/sha/asm/sha512-x86_64.pl b/app/openssl/crypto/sha/asm/sha512-x86_64.pl index e6643f8c..8d516785 100755 --- a/app/openssl/crypto/sha/asm/sha512-x86_64.pl +++ b/app/openssl/crypto/sha/asm/sha512-x86_64.pl @@ -51,7 +51,8 @@ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or die "can't locate x86_64-xlate.pl"; -open STDOUT,"| $^X $xlate $flavour $output"; +open OUT,"| \"$^X\" $xlate $flavour $output"; +*STDOUT=*OUT; if ($output =~ /512/) { $func="sha512_block_data_order"; @@ -95,50 +96,44 @@ sub ROUND_00_15() { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; $code.=<<___; - mov $e,$a0 - mov $e,$a1 + ror \$`$Sigma1[2]-$Sigma1[1]`,$a0 mov $f,$a2 + mov $T1,`$SZ*($i&0xf)`(%rsp) - ror \$$Sigma1[0],$a0 - ror \$$Sigma1[1],$a1 + ror \$`$Sigma0[2]-$Sigma0[1]`,$a1 + xor $e,$a0 xor $g,$a2 # f^g - xor $a1,$a0 - ror \$`$Sigma1[2]-$Sigma1[1]`,$a1 + ror \$`$Sigma1[1]-$Sigma1[0]`,$a0 + add $h,$T1 # T1+=h + xor $a,$a1 + + add ($Tbl,$round,$SZ),$T1 # T1+=K[round] and $e,$a2 # (f^g)&e - mov $T1,`$SZ*($i&0xf)`(%rsp) + mov $b,$h - xor $a1,$a0 # Sigma1(e) + ror \$`$Sigma0[1]-$Sigma0[0]`,$a1 + xor $e,$a0 xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g - add $h,$T1 # T1+=h - - mov $a,$h - add $a0,$T1 # T1+=Sigma1(e) + xor $c,$h # b^c + xor $a,$a1 add $a2,$T1 # T1+=Ch(e,f,g) - mov $a,$a0 - mov $a,$a1 + mov $b,$a2 - ror \$$Sigma0[0],$h - ror \$$Sigma0[1],$a0 - mov $a,$a2 - add ($Tbl,$round,$SZ),$T1 # T1+=K[round] + ror \$$Sigma1[0],$a0 # Sigma1(e) + and $a,$h # h=(b^c)&a + and $c,$a2 # b&c - xor $a0,$h - ror \$`$Sigma0[2]-$Sigma0[1]`,$a0 - or $c,$a1 # a|c + ror \$$Sigma0[0],$a1 # Sigma0(a) + add $a0,$T1 # T1+=Sigma1(e) + add $a2,$h # h+=b&c (completes +=Maj(a,b,c) - xor $a0,$h # h=Sigma0(a) - and $c,$a2 # a&c add $T1,$d # d+=T1 - - and $b,$a1 # (a|c)&b add $T1,$h # h+=T1 - - or $a2,$a1 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1($round),$round # round++ + add $a1,$h # h+=Sigma0(a) - add $a1,$h # h+=Maj(a,b,c) ___ } @@ -147,32 +142,30 @@ sub ROUND_16_XX() $code.=<<___; mov `$SZ*(($i+1)&0xf)`(%rsp),$a0 - mov `$SZ*(($i+14)&0xf)`(%rsp),$T1 - - mov $a0,$a2 + mov `$SZ*(($i+14)&0xf)`(%rsp),$a1 + mov $a0,$T1 + mov $a1,$a2 + ror \$`$sigma0[1]-$sigma0[0]`,$T1 + xor $a0,$T1 shr \$$sigma0[2],$a0 - ror \$$sigma0[0],$a2 - - xor $a2,$a0 - ror \$`$sigma0[1]-$sigma0[0]`,$a2 - xor $a2,$a0 # sigma0(X[(i+1)&0xf]) - mov $T1,$a1 + ror \$$sigma0[0],$T1 + xor $T1,$a0 # sigma0(X[(i+1)&0xf]) + mov `$SZ*(($i+9)&0xf)`(%rsp),$T1 - shr \$$sigma1[2],$T1 - ror \$$sigma1[0],$a1 - - xor $a1,$T1 - ror \$`$sigma1[1]-$sigma1[0]`,$a1 - - xor $a1,$T1 # sigma1(X[(i+14)&0xf]) + ror \$`$sigma1[1]-$sigma1[0]`,$a2 + xor $a1,$a2 + shr \$$sigma1[2],$a1 + ror \$$sigma1[0],$a2 add $a0,$T1 - - add `$SZ*(($i+9)&0xf)`(%rsp),$T1 + xor $a2,$a1 # sigma1(X[(i+14)&0xf]) add `$SZ*($i&0xf)`(%rsp),$T1 + mov $e,$a0 + add $a1,$T1 + mov $a,$a1 ___ &ROUND_00_15(@_); } @@ -219,6 +212,8 @@ $func: ___ for($i=0;$i<16;$i++) { $code.=" mov $SZ*$i($inp),$T1\n"; + $code.=" mov @ROT[4],$a0\n"; + $code.=" mov @ROT[0],$a1\n"; $code.=" bswap $T1\n"; &ROUND_00_15($i,@ROT); unshift(@ROT,pop(@ROT)); -- cgit v1.2.3